%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 12:32:41 PM

@ARTICLE{mccowan-rr-03-27b,
         author = {McCowan, Iain A. and Gatica-Perez, Daniel and Bengio, Samy and Lathoud, Guillaume and Barnard, Mark and Zhang, Dong},
       projects = {Idiap},
          title = {Automatic Analysis of Multimodal Group Actions in Meetings},
        journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (to appear)},
           year = {2004},
           note = {To appear.},
       crossref = {mccowan-rr-03-27},
       abstract = {This paper investigates the recognition of group actions in meetings. A statistical framework is proposed in which group actions result from the interactions of the individual participants. The group actions are modelled using different HMM-based approaches, where the observations are provided by a set of audio-visual features monitoring the actions of individuals. Experiments demonstrate the importance of taking interactions into account in modelling the group actions. It is also shown that the visual modality contains useful information, even for predominantly audio-based events, motivating a multimodal approach to meeting analysis.},
            pdf = {https://publications.idiap.ch/attachments/reports/2003/mccowan-03-27.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2003/mccowan-03-27.ps.gz},
ipdmembership={speech, learning, vision},
language={English},
}



crossreferenced publications: 
@TECHREPORT{mccowan-rr-03-27,
         author = {McCowan, Iain A. and Gatica-Perez, Daniel and Bengio, Samy and Lathoud, Guillaume and Barnard, Mark and Zhang, Dong},
       projects = {Idiap},
          title = {Automatic Analysis of Multimodal Group Actions in Meetings},
           type = {Idiap-RR},
         number = {Idiap-RR-27-2003},
           year = {2003},
    institution = {IDIAP},
        address = {Martigny, Switzerland},
           note = {To appear in IEEE Transactions of Pattern Analysis and Machine Intelligence},
       abstract = {This paper investigates the recognition of group actions in meetings. A statistical framework is proposed in which group actions result from the interactions of the individual participants. The group actions are modelled using different HMM-based approaches, where the observations are provided by a set of audio-visual features monitoring the actions of individuals. Experiments demonstrate the importance of taking interactions into account in modelling the group actions. It is also shown that the visual modality contains useful information, even for predominantly audio-based events, motivating a multimodal approach to meeting analysis.},
            pdf = {https://publications.idiap.ch/attachments/reports/2003/mccowan-03-27.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2003/mccowan-03-27.ps.gz},
ipdmembership={speech, learning, vision},
language={English},
}