<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">CONF</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Anemueller_ICMI2008_2008/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">The DIRAC AWEAR Audio-Visual Platform for Detection of Unexpected and Incongruent Events</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Anemueller, Joern</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Back, Joerg-Henrik</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Caputo, Barbara</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">havlena, michal</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Luo, Jie</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Kayser, Hendrik</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Leibe, Bastian</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Motlicek, Petr</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Pajdla, Tomas</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Pavel, Misha</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Torii, Akihiko</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Gool, Luc Van</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Zweig, Alon</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Hermansky, Hynek</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2="0">
			<subfield code="i">EXTERNAL</subfield>
			<subfield code="u">http://publications.idiap.ch/attachments/papers/2008/Anemueller_ICMI2008_2008.pdf</subfield>
			<subfield code="x">PUBLIC</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2=" ">
			<subfield code="u">http://publications.idiap.ch/index.php/publications/showcite/Anemueller_Idiap-RR-41-2010</subfield>
			<subfield code="z">Related documents</subfield>
		</datafield>
		<datafield tag="711" ind1="2" ind2=" ">
			<subfield code="a">Proceedings of the International Conference on Multimodal Interfaces</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2008</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">It is of prime importance in everyday human life to cope with and
respond appropriately to events that are not foreseen by prior
experience. Machines to a large extent lack the ability to respond
appropriately to such inputs. An important class of unexpected
events is defined by incongruent combinations of inputs from
different modalities and therefore multimodal information
provides a crucial cue for the identification of such events, e.g.,
the sound of a voice is being heard while the person in the fieldof-
view does not move her lips. In the project DIRAC (â€œDetection
and Identification of Rare Audio-visual Cuesâ€) we have been
developing algorithmic approaches to the detection of such
events, as well as an experimental hardware platform to test it. An
audio-visual platform (â€œAWEARâ€ â€“ audio-visual wearable
device) has been constructed with the goal to help users with
disabilities or a high cognitive load to deal with unexpected
events. Key hardware components include stereo panoramic
vision sensors and 6-channel worn-behind-the-ear (hearing aid)
microphone arrays. Data have been recorded to study audio-visual
tracking, a/v scene/object classification and a/v detection of
incongruencies.</subfield>
		</datafield>
	</record>
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">REPORT</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Anemueller_Idiap-RR-41-2010/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">The DIRAC AWEAR Audio-Visual Platform for Detection of Unexpected and Incongruent Events</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Anemueller, Joern</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Back, Joerg-Henrik</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Caputo, Barbara</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">havlena, michal</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Luo, Jie</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Kayser, Hendrik</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Leibe, Bastian</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Motlicek, Petr</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Pajdla, Tomas</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Pavel, Misha</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Torii, Akihiko</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Gool, Luc Van</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Hermansky, Hynek</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Zweig, Alon</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2="0">
			<subfield code="i">EXTERNAL</subfield>
			<subfield code="u">http://publications.idiap.ch/attachments/reports/2008/Anemueller_Idiap-RR-41-2010.pdf</subfield>
			<subfield code="x">PUBLIC</subfield>
		</datafield>
		<datafield tag="088" ind1=" " ind2=" ">
			<subfield code="a">Idiap-RR-41-2010</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2010</subfield>
			<subfield code="b">Idiap</subfield>
		</datafield>
		<datafield tag="771" ind1="2" ind2=" ">
			<subfield code="d">November 2010</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">It is of prime importance in everyday human life to cope with and
respond appropriately to events that are not foreseen by prior
experience. Machines to a large extent lack the ability to respond
appropriately to such inputs. An important class of unexpected
events is defined by incongruent combinations of inputs from
different modalities and therefore multimodal information
provides a crucial cue for the identification of such events, e.g.,
the sound of a voice is being heard while the person in the fieldof-
view does not move her lips. In the project DIRAC (â€œDetection
and Identification of Rare Audio-visual Cuesâ€) we have been
developing algorithmic approaches to the detection of such
events, as well as an experimental hardware platform to test it. An
audio-visual platform (â€œAWEARâ€ â€“ audio-visual wearable
device) has been constructed with the goal to help users with
disabilities or a high cognitive load to deal with unexpected
events. Key hardware components include stereo panoramic
vision sensors and 6-channel worn-behind-the-ear (hearing aid)
microphone arrays. Data have been recorded to study audio-visual
tracking, a/v scene/object classification and a/v detection of
incongruencies.</subfield>
		</datafield>
	</record>
</collection>