%Aigaion2 BibTeX export from Idiap Publications
%Wednesday 01 May 2024 06:18:50 PM

@INPROCEEDINGS{Dighe_IEEESLT2018_2018,
         author = {Dighe, Pranay and Bourlard, Herv{\'{e}} and Asaei, Afsaneh},
       keywords = {deep neural networks, far-field asr, low-rank sparsity, models, soft targets},
       projects = {PHASER-QUAD},
          month = dec,
          title = {Far-field ASR Using Low-rank and Sparse Soft Targets from Parallel Data},
      booktitle = {IEEE Workshop on Spoken Language Technology},
           year = {2018},
          pages = {581-587},
      publisher = {IEEE},
       location = {Athens, GREECE},
           issn = {2639-5479},
           isbn = {978-1-5386-4334-1},
       abstract = {Far-field automatic speech recognition (ASR) of conversational speech is often considered to be a very challenging task due to the poor quality of alignments available for training the DNN acoustic models. A common way to alleviate this problem is to use clean alignments obtained from parallelly recorded close-talk speech data. In this work, we advance the parallel data approach by obtaining enhanced low-rank and sparse soft targets from a close-talk ASR system and using them for training more accurate far-field acoustic models. Specifically, we (i) exploit eigenposteriors and Compressive Sensing dictionaries to learn low-dimensional senone subspaces in DNN posterior space, and (ii) enhance close-talk DNN posteriors to achieve high quality soft targets for training far-field DNN acoustic models. We show that the enhanced soft targets encode the structural and temporal interrelationships among senone classes which are easily accessible in the DNN posterior space of close-talk speech but not in its noisy far-field counterpart. We exploit enhanced soft targets to improve the mapping of far-field acoustics to closetalk senone classes. The experiments are performed on AMI meeting corpus where our approach improves DNN based acoustic modeling by 4.4\% absolute (similar to 8\% rel.) reduction in WER as compared to a system which doesn't use parallel data. Finally, the approach is also validated on state-of-the-art recurrent and time delay neural network architectures.},
            pdf = {https://publications.idiap.ch/attachments/papers/2018/Dighe_IEEESLT2018_2018.pdf}
}