%Aigaion2 BibTeX export from Idiap Publications %Wednesday 20 November 2024 07:32:38 PM @INPROCEEDINGS{Motlicek_INTERSPEECH2014_2014, author = {Motlicek, Petr and Imseng, David and Cernak, Milos and Kim, Namhoon}, projects = {Idiap, SAMSUNG, DBOX}, month = sep, title = {Development of Bilingual ASR System for MediaParl Corpus}, booktitle = {Proceedings of the 15th Annual Conference of the International Speech Communication Association (Interspeech 2014)}, year = {2014}, publisher = {ISCA}, location = {Singapore}, crossref = {Motlicek_Idiap-RR-21-2014}, abstract = {The development of an Automatic Speech Recognition (ASR) system for the bilingual MediaParl corpus is challenging for several reasons: (1) reverberant recordings, (2) accented speech, and (3) no prior information about the language. In that context, we employ frequency domain linear prediction-based (FDLP) features to reduce the effect of reverberation, exploit bilingual deep neural networks applied in Tandem and hybrid acoustic modeling approaches to significantly improve ASR for accented speech and develop a fully bilingual ASR system using entropy-based decoding-graph selection. Our experiments indicate that the proposed bilingual ASR system performs similar to a language-specific ASR system if approximately five seconds of speech are available.}, pdf = {https://publications.idiap.ch/attachments/papers/2014/Motlicek_INTERSPEECH2014_2014.pdf} } crossreferenced publications: @TECHREPORT{Motlicek_Idiap-RR-21-2014, author = {Motlicek, Petr and Imseng, David and Cernak, Milos and Kim, Namhoon}, keywords = {lan- guage identification, Multilingual automatic speech recognition, non-native speech}, projects = {Idiap, SAMSUNG, DBOX}, month = {12}, title = {Development of Bilingual ASR System for MediaParl Corpus}, type = {Idiap-RR}, number = {Idiap-RR-21-2014}, year = {2014}, institution = {Idiap}, address = {Rue Marconi 19}, abstract = {The development of an Automatic Speech Recognition (ASR) system for the bilingual MediaParl corpus is challenging for several reasons: (1) reverberant recordings, (2) accented speech, and (3) no prior information about the language. In that context, we employ frequency domain linear prediction-based (FDLP) features to reduce the effect of reverberation, exploit bilingual deep neural networks applied in Tandem and hybrid acoustic modeling approaches to significantly improve ASR for accented speech and develop a fully bilingual ASR system using entropy-based decoding-graph selection. Our experiments indicate that the proposed bilingual ASR system performs similar to a language-specific ASR system if approximately five seconds of speech are available.}, pdf = {https://publications.idiap.ch/attachments/reports/2014/Motlicek_Idiap-RR-21-2014.pdf} }