%Aigaion2 BibTeX export from Idiap Publications
%Thursday 04 December 2025 06:19:21 PM

@INPROCEEDINGS{Iuliia_TSD2025_2025,
                      author = {Iuliia, Thorbecke and Villatoro-Tello, Esa{\'{u}} and Zuluaga-Gomez, Juan and Kumar, Shashi and Burdisso, Sergio and Rangappa, Pradeep and Carofilis, Andr{\'{e}}s and Madikeri, Srikanth and Motlicek, Petr and S, Karthik Pandia D and Hacioğlu, Kadri and Stolcke, Andreas},
                    keywords = {Aho-Corasick algorithm, Contextualisation and adaptation of ASR, real-time ASR, transformer transducer},
                    projects = {Idiap},
         mainresearchprogram = {Human-AI Teaming},
  additionalresearchprograms = {AI for Everyone},
                       month = aug,
                       title = {Unifying Global and Near-Context Biasing in a Single Trie Pass.},
                   booktitle = {Text, Speech, and Dialogue. TSD 2025. Lecture Notes in Computer Science, Springer},
                      volume = {16029},
                        year = {2025},
                   publisher = {Springer},
                        isbn = {978-3-032-02547-0},
                         url = {https://link.springer.com/chapter/10.1007/978-3-032-02548-7_15},
                         doi = {doi.org/10.1007/978-3-032-02548-7_15},
                    abstract = {Despite the success of end-to-end automatic speech recognition (ASR) models, challenges persist in recognizing rare, out-of-vocabulary words—including named entities (NE)—and in adapting to new domains using only text data. This work presents a practical approach to address these challenges through an unexplored combination of an NE bias list and a word-level n-gram language model (LM). This solution balances simplicity and effectiveness, improving entities’ recognition while maintaining or even enhancing overall ASR performance. We efficiently integrate this enriched biasing method into a transducer-based ASR system, enabling context adaptation with almost no computational overhead. We present our results on three datasets spanning four languages and compare them to state-of-the-art biasing strategies We demonstrate that the proposed combination of keyword biasing and n-gram LM improves entity recognition by up to 32\% relative and reduces overall WER by up to a 12\% relative.},
                         pdf = {https://publications.idiap.ch/attachments/papers/2025/Iuliia_TSD2025_2025.pdf}
}