%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 04:10:59 PM @INPROCEEDINGS{Ba_ICME_2009, author = {Ba, Sil{\`{e}}ye O. and Hung, Hayley and Odobez, Jean-Marc}, projects = {Idiap, AMIDA, IM2}, month = {6}, title = {Visual Activity Context For Focus of Attention Estimation in Dynamic Meetings}, booktitle = {International Conference on Multimedia & Expo}, year = {2009}, crossref = {Ba_Idiap-RR-02-2009}, abstract = {We address the problem of recognizing, in dynamic meetings in which people do not remain seated all the time, the visual focus of attention (VFOA) of seated people from their head pose and contextual activity cues. We propose a model that comprises the VFOA of a meeting participant as the hidden state, and his head pose as the observation. To account for the presence of moving visual targets due to the dynamic nature of the meeting, the locations of the visual targets are used as an input variables to the head pose observation model. Contextual information is introduced in the VFOA dynamics through a slide activity variable and speaking or visual activity variables that relate people's focus to the meeting activity context. The main novelty of this paper is the introduction of visual activity context for FOA recognition to account for the correlation between a person's focus and the other people's gestures, hand and body motions. We evaluate our model on a large dataset of 5 hours. Our results show that, for VFOA estimation in meetings, visual activity contextual information can be as effective as speaking context.}, pdf = {https://publications.idiap.ch/attachments/papers/2009/Ba_ICME_2009.pdf} } crossreferenced publications: @TECHREPORT{Ba_Idiap-RR-02-2009, author = {Ba, Sil{\`{e}}ye O. and Hung, Hayley and Odobez, Jean-Marc}, projects = {Idiap, AMIDA, IM2}, month = {1}, title = {Visual activity context for focus of attention estimation in dynamic meetings}, type = {Idiap-RR}, number = {Idiap-RR-02-2009}, year = {2009}, institution = {Idiap}, address = {rue marconi 19, 1920, martigny switzerland}, note = {idiap-rr}, abstract = {We address the problem of recognizing, in dynamic meetings in which people do not remain seated all the time, the visual focus of attention (VFOA) of seated people from their head pose and contextual activity cues. We propose a model that comprises the VFOA of a meeting participant as the hidden state, and his head pose as the observation. To account for the presence of moving visual targets due to the dynamic nature of the meeting, the locations of the visual targets are used as an input variables to the head pose observation model. Contextual information is introduced in the VFOA dynamics through a slide activity variable and speaking or visual activity variables that relate people{\^{a}}€™s focus to the meeting activity context. The main novelty of this paper is the introduction of visual activity context for FOA recognition to account for the correlation between a person{\^{a}}€™s focus and the other people{\^{a}}€™s gestures, hand and body motions. We evaluate our model on a large dataset of 5 hours. Our results show that, for VFOA estimation in meetings, visual activity contextual information can be as effective as speaking context.}, pdf = {https://publications.idiap.ch/attachments/reports/2009/Ba_Idiap-RR-02-2009.pdf} }