%Aigaion2 BibTeX export from Idiap Publications
%Tuesday 14 May 2024 06:13:26 PM

@PHDTHESIS{Hermann_THESIS_2023,
         author = {Hermann, Enno},
       keywords = {Automatic Speech Recognition, Data Augmentation, Dysarthria, Lattice-Free MMI, Pathological Speech Processing},
       projects = {Idiap, TAPAS, IICT},
          title = {On matching data and model in LF-MMI-based dysarthric speech recognition},
           year = {2023},
         school = {{\'{E}}cole polytechnique f{\'{e}}d{\'{e}}rale de Lausanne},
            url = {https://infoscience.epfl.ch/record/303171},
            doi = {https://doi.org/10.5075/epfl-thesis-9681},
       abstract = {In light of steady progress in machine learning, automatic speech recognition (ASR) is entering more and more areas of our daily life, but people with dysarthria and other speech pathologies are left behind. Their voices are underrepresented in the training data and so different from typical speech that ASR systems fail to recognise them. This thesis aims to adapt both acoustic models and training data of ASR systems in order to better handle dysarthric speech. We first build state-of-the-art acoustic models based on sequence-discriminative lattice-free maximum mutual information (LF-MMI) training that serve as baselines for the following experiments. We propose the dynamic combination of models trained on either control, dysarthric, or both groups of speakers to address the acoustic variability of dysarthric speech. Furthermore, we combine models trained with either phoneme or grapheme acoustic units in order to implicitly handle pronunciation variants. Second, we develop a framework to analyse the acoustic space of ASR training data and its discriminability. We observe that these discriminability measures are strongly linked to subjective intelligibility ratings of dysarthric speakers and ASR performance. Finally, we compare a range of data augmentation methods, including voice conversion and speech synthesis, for creating artificial dysarthric training data for ASR systems. With our analysis framework, we find that these methods reproduce characteristics of natural dysarthric speech.},
            pdf = {https://publications.idiap.ch/attachments/papers/2023/Hermann_THESIS_2023.pdf}
}