%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 04:52:56 PM @TECHREPORT{Popescu-Belis_Idiap-RR-26-2010, author = {Popescu-Belis, Andrei and Kilgour, Jonathan and Nanchen, Alexandre and Poller, Peter}, projects = {Idiap, AMIDA, IM2}, month = {7}, title = {The ACLD: Speech-based Just-in-Time Retrieval of Multimedia Documents and Websites}, type = {Idiap-RR}, number = {Idiap-RR-26-2010}, year = {2010}, institution = {Idiap}, crossref = {Popescu-Belis_SSCS2010_2010}, abstract = {The Automatic Content Linking Device (ACLD) is a just-in-time retrieval system that monitors an ongoing conversation or a monologue and enriches it with potentially related documents, including transcripts of past meetings, from local repositories or from the Internet. The linked content is displayed in real-time to the participants in the conversation, or to users watching a recorded conversation or talk. The system can be demonstrated in both settings, using real-time automatic speech recognition (ASR) or replaying offline ASR, via a flexible user interface that displays results and provides access to the content of past meetings and documents.}, pdf = {https://publications.idiap.ch/attachments/reports/2010/Popescu-Belis_Idiap-RR-26-2010.pdf} } crossreferenced publications: @INPROCEEDINGS{Popescu-Belis_SSCS2010_2010, author = {Popescu-Belis, Andrei and Kilgour, Jonathan and Nanchen, Alexandre and Poller, Peter}, keywords = {just-in-time retrieval, multimedia IR, speech-based IR}, projects = {Idiap, AMIDA, IM2}, month = {10}, title = {The ACLD: Speech-based Just-in-Time Retrieval of Meeting Transcripts, Documents and Websites}, booktitle = {ACM Multimedia Workshop on Searching Spontaneous Conversational Speech}, year = {2010}, location = {Florence, Italy}, crossref = {Popescu-Belis_Idiap-RR-26-2010}, abstract = {The Automatic Content Linking Device (ACLD) is a just-in-time retrieval system that monitors an ongoing conversation or a monologue and enriches it with potentially related documents, including transcripts of past meetings, from local repositories or from the Internet. The linked content is displayed in real-time to the participants in the conversation, or to users watching a recorded conversation or talk. The system can be demonstrated in both settings, using real-time automatic speech recognition (ASR) or replaying offline ASR, via a flexible user interface that displays results and provides access to the content of past meetings and documents.}, pdf = {https://publications.idiap.ch/attachments/papers/2010/Popescu-Belis_SSCS2010_2010.pdf} }