%Aigaion2 BibTeX export from Idiap Publications %Monday 30 December 2024 06:33:32 PM @ARTICLE{Himawan_SPEECHCOMMUNICATION_2016, author = {Himawan, Ivan and Motlicek, Petr and Imseng, David and Sridharan, Sridha}, projects = {Idiap, DBOX, SIIP, MALORCA}, month = oct, title = {Feature mapping using far-field microphones for distant speech recognition}, journal = {Speech Communication}, volume = {83}, year = {2016}, pages = {1-9}, note = {A publication of the European Association for Signal Processing (EURASIP) and of the International Speech Communication Association (ISCA)}, url = {http://authors.elsevier.com/a/1TSFec7UHvWV5}, doi = {10.1016/j.specom.2016.07.003}, crossref = {Himawan_Idiap-RR-20-2016}, abstract = {Acoustic modeling based on deep architectures has recently gained remarkable success, with substantial improvement of speech recognition accuracy in several automatic speech recognition (ASR) tasks. For distant speech recognition, the multi-channel deep neural network based approaches rely on the powerful modeling capability of deep neural network (DNN) to learn suitable representation of distant speech directly from its multi-channel source. In this model-based combination of multiple microphones, features from each channel are concatenated and used together as an input to DNN. This allows integrating the multi-channel audio for acoustic modeling without any pre-processing steps. Despite powerful modeling capabilities of DNN, an environmental mismatch due to noise and reverberation may result in severe performance degradation when features are simply fed to a DNN without a feature enhancement step. In this paper, we introduce the nonlinear bottleneck feature mapping approach using DNN, to transform the noisy and reverberant features to its clean version. The bottleneck features derived from the DNN are used as a teacher signal because they contain relevant information to phoneme classification, and the mapping is performed with the objective of suppressing noise and reverberation. The individual and combined impacts of beamforming and speaker adaptation techniques along with the feature mapping are examined for distant large vocabulary speech recognition, using a single and multiple far-field microphones. As an alternative to beamforming, experiments with concatenating multiple channel features are conducted. The experimental results on the AMI meeting corpus show that the feature mapping, used in combination with beamforming and speaker adaptation yields a distant speech recognition performance below 50\% word error rate (WER), using DNN for acoustic modeling.}, pdf = {https://publications.idiap.ch/attachments/papers/2016/Himawan_SPEECHCOMMUNICATION_2016.pdf} } crossreferenced publications: @TECHREPORT{Himawan_Idiap-RR-20-2016, author = {Himawan, Ivan and Motlicek, Petr and Imseng, David and Sridharan, Sridha}, projects = {Idiap, SIIP, MALORCA, DBOX}, month = {8}, title = {Feature mapping using far-field microphones for distant speech recognition}, type = {Idiap-RR}, number = {Idiap-RR-20-2016}, year = {2016}, institution = {Idiap}, address = {Rue Marconi 19}, abstract = {Acoustic modeling based on deep architectures has recently gained remarkable success, with substantial improvement of speech recognition accuracy in several automatic speech recognition (ASR) tasks. For distant speech recognition, the multi-channel deep neural network based approaches rely on the powerful modeling capability of deep neural network (DNN) to learn suitable representation of distant speech directly from its multi-channel source. In this model-based combination of multiple microphones, features from each channel are concatenated and used together as an input to DNN. This allows integrating the multi-channel audio for acoustic modeling without any pre-processing steps. Despite powerful modeling capabilities of DNN, an environmental mismatch due to noise and reverberation may result in severe performance degradation when features are simply fed to a DNN without a feature enhancement step. In this paper, we introduce the nonlinear bottleneck feature mapping approach using DNN, to transform the noisy and reverberant features to its clean version. The bottleneck features derived from the DNN are used as a teacher signal because they contain relevant information to phoneme classification, and the mapping is performed with the objective of suppressing noise and reverberation. The individual and combined impacts of beamforming and speaker adaptation techniques along with the feature mapping are examined for distant large vocabulary speech recognition, using a single and multiple far-field microphones. As an alternative to beamforming, experiments with concatenating multiple channel features are conducted. The experimental results on the AMI meeting corpus show that the feature mapping, used in combination with beamforming and speaker adaptation yields a distant speech recognition performance below 50\% word error rate (WER), using DNN for acoustic modeling.}, pdf = {https://publications.idiap.ch/attachments/reports/2016/Himawan_Idiap-RR-20-2016.pdf} }