%Aigaion2 BibTeX export from Idiap Publications
%Sunday 22 December 2024 04:28:02 AM

@INPROCEEDINGS{gatica03a-conf,
         author = {Gatica-Perez, Daniel and McCowan, Iain A. and Barnard, Mark and Bengio, Samy and Bourlard, Herv{\'{e}}},
       projects = {Idiap},
          title = {On automatic annotation of meeting databases},
      booktitle = {IEEE International Conference on Image Processing (ICIP)},
           year = {2003},
       crossref = {gatica03a},
       abstract = {In this paper, we discuss meetings as an application domain for multimedia content analysis. Meeting databases are a rich data source suitable for a variety of audio, visual and multi-modal tasks, including speech recognition, people and action recognition, and information retrieval. We specifically focus on the task of semantic annotation of audio-visual (AV) events, where annotation consists of assigning labels (event names) to the data. In order to develop an automatic annotation system in a principled manner, it is essential to have a well-defined task, a standard corpus and an objective performance measure. In this work we address each of these issues to automatically annotate events based on participant interactions.},
            pdf = {https://publications.idiap.ch/attachments/reports/2003/rr03-06.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2003/rr03-06.ps.gz},
ipdmembership={speech, vision},
}



crossreferenced publications: 
@TECHREPORT{gatica03a,
         author = {Gatica-Perez, Daniel and McCowan, Iain A. and Barnard, Mark and Bengio, Samy and Bourlard, Herv{\'{e}}},
       projects = {Idiap},
          title = {On automatic annotation of meeting databases},
           type = {Idiap-RR},
         number = {Idiap-RR-06-2003},
           year = {2003},
    institution = {IDIAP},
       abstract = {In this paper, we discuss meetings as an application domain for multimedia content analysis. Meeting databases are a rich data source suitable for a variety of audio, visual and multi-modal tasks, including speech recognition, people and action recognition, and information retrieval. We specifically focus on the task of semantic annotation of audio-visual (AV) events, where annotation consists of assigning labels (event names) to the data. In order to develop an automatic annotation system in a principled manner, it is essential to have a well-defined task, a standard corpus and an objective performance measure. In this work we address each of these issues to automatically annotate events based on participant interactions.},
            pdf = {https://publications.idiap.ch/attachments/reports/2003/rr03-06.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2003/rr03-06.ps.gz},
ipdmembership={speech, vision},
}