%Aigaion2 BibTeX export from Idiap Publications
%Wednesday 16 October 2024 10:08:12 AM

@INPROCEEDINGS{Thorbecke_EMNLP_2024,
         author = {Thorbecke, Iuliia and Zuluaga-Gomez, Juan and Villatoro-Tello, Esa{\'{u}} and Kumar, Shashi and Rangappa, Pradeep and Burdisso, Sergio and Motlicek, Petr and S, Karthik Pandia D and Ganapathiraju, Aravind},
       keywords = {pseudo-labelling, shallow fusion, streaming transducer},
       projects = {UNIPHORE},
          title = {Fast Streaming Transducer ASR Prototyping via Knowledge Distillation with Whisper},
      booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing},
           year = {2024},
      publisher = {ACL},
       crossref = {Iuliia_Idiap-RR-10-2024},
       abstract = {The training of automatic speech recognition (ASR) with little to no supervised data remains an open question. In this work, we demonstrate that streaming Transformer-Transducer (TT) models can be trained from scratch in consumer and accessible GPUs in their entirety with pseudo-labeled (PL) speech from foundational speech models (FSM). This allows training a robust ASR model just in one stage and does not require large data and computational budget compared to the two-step scenario with pre-training and fine-tuning. We perform a comprehensive ablation on different aspects of PL-based streaming TT models such as the impact of (1) shallow fusion of n-gram LMs, (2) contextual biasing with named entities, (3) chunk-wise decoding for low-latency streaming applications, and (4) TT overall performance as the function of the FSM size. Our results demonstrate that TT can be trained from scratch without supervised data, even with very noisy PLs. We validate the proposed framework on 6 languages from CommonVoice and propose multiple heuristics to filter out hallucinated PLs.},
            pdf = {https://publications.idiap.ch/attachments/papers/2024/Thorbecke_EMNLP_2024.pdf}
}



crossreferenced publications: 
@TECHREPORT{Iuliia_Idiap-RR-10-2024,
         author = {Iuliia, Thorbecke and Juan, Zuluaga-Gomez. and Villatoro-Tello, Esa{\'{u}} and Kumar, Shashi and Rangappa, Pradeep and Burdisso, Sergio and Motlicek, Petr and S, Karthik Pandia D and Ganapathiraju, Aravind},
       projects = {Idiap, UNIPHORE},
          month = {10},
          title = {Fast Streaming Transducer ASR Prototyping via Knowledge Distillation with Whisper},
           type = {Idiap-RR},
         number = {Idiap-RR-10-2024},
           year = {2024},
    institution = {Idiap},
           note = {accepted to EMNLP},
       abstract = {The training of automatic speech recognition (ASR) with little to no supervised data remains an open question. In this work, we demonstrate that streaming Transformer-Transducer (TT) models can be trained from scratch in consumer and accessible GPUs in their entirety with pseudo-labeled (PL) speech from foundational speech models (FSM). This allows training a robust ASR model just in one stage and does not require large data and computational budget compared to the two-step scenario with pre-training and fine-tuning. We perform a comprehensive ablation on different aspects of PL-based streaming TT models such as the impact of (1) shallow fusion of n-gram LMs, (2) contextual biasing with named entities, (3) chunk-wise decoding for low-latency streaming applications, and (4) TT overall performance as the function of the FSM size. Our results demonstrate that TT can be trained from scratch without supervised data, even with very noisy PLs. We validate the proposed framework on 6 languages from CommonVoice and propose multiple heuristics to filter out hallucinated PLs.},
            pdf = {https://publications.idiap.ch/attachments/reports/2024/Iuliia_Idiap-RR-10-2024.pdf}
}