%Aigaion2 BibTeX export from Idiap Publications
%Saturday 04 May 2024 04:20:34 AM

@INPROCEEDINGS{Lecorve_INTERSPEECH-2_2012,
         author = {Lecorv{\'{e}}, Gw{\'{e}}nol{\'{e}} and Motlicek, Petr},
       keywords = {ASR, Automatic Speech Recognition, Language Models, recurrent neural network, speech decoding, weighted finite state transducer, WFST},
       projects = {Idiap},
          month = sep,
          title = {Conversion of Recurrent Neural Network Language Models to Weighted Finite State Transducers for Automatic Speech Recognition},
      booktitle = {Proceedings of Interspeech},
           year = {2012},
          pages = {to appear},
       location = {Portland, Oregon, USA},
       crossref = {Lecorve_Idiap-RR-21-2012},
       abstract = {Recurrent neural network language models (RNNLMs) have recently shown to outperform the venerable n-gram language models (LMs). However, in automatic speech recognition (ASR), RNNLMs were not yet used to directly decode a speech signal. Instead, RNNLMs are rather applied to rescore N-best lists generated from word lattices. To use RNNLMs in earlier stages of the speech recognition, our work proposes to transform RNNLMs into weighted finite state transducers approximating their underlying probability distribution. While the main idea consists in discretizing continuous representations of word histories, we present a first implementation of the approach using clustering techniques and entropy-based pruning. Achieved experimental results on LM perplexity and on ASR word error rates are encouraging since the performance of the discretized RNNLMs is comparable to the one of n-gram LMs.},
            pdf = {https://publications.idiap.ch/attachments/papers/2012/Lecorve_INTERSPEECH-2_2012.pdf}
}



crossreferenced publications: 
@TECHREPORT{Lecorve_Idiap-RR-21-2012,
         author = {Lecorv{\'{e}}, Gw{\'{e}}nol{\'{e}} and Motlicek, Petr},
       keywords = {ASR, Automatic Speech Recognition, Language Models, recurrent neural network, speech decoding, weighted finite state transducer, WFST},
       projects = {Idiap},
          month = {7},
          title = {Conversion of Recurrent Neural Network Language Models to Weighted Finite State Transducers for Automatic Speech Recognition},
           type = {Idiap-RR},
         number = {Idiap-RR-21-2012},
           year = {2012},
    institution = {Idiap},
       abstract = {Recurrent neural network language models (RNNLMs) have recently shown to outperform the venerable n-gram language models (LMs). However, in automatic speech recognition (ASR), RNNLMs were not yet used to directly decode a speech signal. Instead, RNNLMs are rather applied to rescore N-best lists generated from word lattices. To use RNNLMs in earlier stages of the speech recognition, our work proposes to transform RNNLMs into weighted finite state transducers approximating their underlying probability distribution. While the main idea consists in discretizing continuous representations of word histories, we present a first implementation of the approach using clustering techniques and entropy-based pruning. Achieved experimental results on LM perplexity and on ASR word error rates are encouraging since the performance of the discretized RNNLMs is comparable to the one of n-gram LMs.},
            pdf = {https://publications.idiap.ch/attachments/reports/2012/Lecorve_Idiap-RR-21-2012.pdf}
}