<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">CONF</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Hung_ECCVM2SFA2_2008/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">Towards Audio-Visual On-line Diarization Of Participants In Group Meetings</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Hung, Hayley</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Friedland, Gerald</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2="0">
			<subfield code="i">EXTERNAL</subfield>
			<subfield code="u">http://publications.idiap.ch/attachments/papers/2008/Hung_ECCVM2SFA2_2008.pdf</subfield>
			<subfield code="x">PUBLIC</subfield>
		</datafield>
		<datafield tag="711" ind1="2" ind2=" ">
			<subfield code="a">European Conference on Computer Vision Workshop on Multi-camera and Multi-modal Sensor Fusion</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2008</subfield>
		</datafield>
		<datafield tag="771" ind1="2" ind2=" ">
			<subfield code="d">October 2008</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">We propose a fully automated, unsupervised, and non-int\-rusive method
of identifying the current speaker audio-vis\-ually in a group
conversation. This is achieved without specialized hardware, user
interaction, or prior assignment of microphones to participants.
Speakers are identified acoustically using a novel on-line speaker
diarization approach. The output is then used to find the
corresponding person in a four-camera video stream by approximating
individual activity with computationally efficient features. We
present results showing the robustness of the association on over 4.5
hours of non-scripted audio-visual meeting data.</subfield>
		</datafield>
	</record>
</collection>