%Aigaion2 BibTeX export from Idiap Publications
%Tuesday 30 April 2024 02:35:06 AM

@INPROCEEDINGS{hung:MM:2007,
         author = {Hung, Hayley and Jayagopi, Dinesh Babu and Yeo, Chuohao and Friedland, Gerald and Ba, Sil{\`{e}}ye O. and Odobez, Jean-Marc and Ramchandran, Kannan and Mirghafori, Nikki and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {Using Audio and Video Features to Classify the Most Dominant Person in a Group Meeting},
      booktitle = {},
           year = {2007},
           note = {IDIAP-RR 07-29},
       crossref = {hung:rr07-29},
       abstract = {The automated extraction of semantically meaningful information from multi-modal data is becoming increasingly necessary due to the escalation of captured data for archival. A novel area of multi-modal data labelling, which has received relatively little attention, is the automatic estimation of the most dominant person in a group meeting. In this paper, we provide a framework for detecting dominance in group meetings using different audio and video cues. We show that by using a simple model for dominance estimation we can obtain promising results.},
            pdf = {https://publications.idiap.ch/attachments/papers/2007/hung-MM-2007.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2007/hung-MM-2007.ps.gz},
ipdmembership={vision},
}



crossreferenced publications: 
@TECHREPORT{hung:rr07-29,
         author = {Hung, Hayley and Jayagopi, Dinesh Babu and Yeo, Chuohao and Friedland, Gerald and Ba, Sil{\`{e}}ye O. and Odobez, Jean-Marc and Ramchandran, Kannan and Mirghafori, Nikki and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {Using Audio and Video Features to Classify the Most Dominant Person in a Group Meeting},
           type = {Idiap-RR},
         number = {Idiap-RR-29-2007},
           year = {2007},
    institution = {IDIAP},
           note = {To appear in Association for Computing Machinery - Multimedia (ACM-MM,',','),
 September 23--28, 2007, Augsburg, Bavaria, Germany.},
       abstract = {The automated extraction of semantically meaningful information from multi-modal data is becoming increasingly necessary due to the escalation of captured data for archival. A novel area of multi-modal data labelling, which has received relatively little attention, is the automatic estimation of the most dominant person in a group meeting. In this paper, we provide a framework for detecting dominance in group meetings using different audio and video cues. We show that by using a simple model for dominance estimation we can obtain promising results.},
            pdf = {https://publications.idiap.ch/attachments/reports/2007/hung-idiap-rr-07-29.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2007/hung-idiap-rr-07-29.ps.gz},
ipdmembership={vision},
}