<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">CONF</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Siegfried_ECEM_2017/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">Supervised Gaze Bias Correction for Gaze Coding in Interactions</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Siegfried, Remy</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Odobez, Jean-Marc</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2="0">
			<subfield code="i">EXTERNAL</subfield>
			<subfield code="u">http://publications.idiap.ch/attachments/papers/2018/Siegfried_ECEM_2017.pdf</subfield>
			<subfield code="x">PUBLIC</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2=" ">
			<subfield code="u">http://publications.idiap.ch/index.php/publications/showcite/Siegfried_Idiap-RR-23-2017</subfield>
			<subfield code="z">Related documents</subfield>
		</datafield>
		<datafield tag="711" ind1="2" ind2=" ">
			<subfield code="a">ECEM COGAIN Symposium</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2017</subfield>
		</datafield>
		<datafield tag="773" ind1=" " ind2=" ">
			<subfield code="c">3</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">Understanding the role of gaze in conversations and social interactions or exploiting it for HRI applications is an ongoing research subject. In these contexts, vision-based eye trackers are preferred as they are non-invasive and allow people to behave more naturally. In particular, appearance-based methods (ABM) are very promising, as they can perform online gaze estimation and have the potential to be head pose and person invariant, accommodate more situations as well as user mobility and the resulting low-resolution images. However, they may also suffer from a lack of robustness when several of these challenges are jointly present. In this work, we address gaze coding in human-human interactions and present a simple method based on a few manually annotated frames that is able to much reduce the error of a head pose invariant ABM method, as shown on a dataset of 6 interactions.</subfield>
		</datafield>
	</record>
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">REPORT</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Siegfried_Idiap-RR-23-2017/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">Supervised Gaze Bias Correction for Gaze Coding in Interactions</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Siegfried, Remy</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Odobez, Jean-Marc</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">appearance model</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">attention</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">bias correction</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">eye tracking</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Gaze</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">usability</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2="0">
			<subfield code="i">EXTERNAL</subfield>
			<subfield code="u">http://publications.idiap.ch/attachments/reports/2017/Siegfried_Idiap-RR-23-2017.pdf</subfield>
			<subfield code="x">PUBLIC</subfield>
		</datafield>
		<datafield tag="088" ind1=" " ind2=" ">
			<subfield code="a">Idiap-RR-23-2017</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2017</subfield>
			<subfield code="b">Idiap</subfield>
		</datafield>
		<datafield tag="771" ind1="2" ind2=" ">
			<subfield code="d">September 2017</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">Understanding the role of gaze in conversations and social interactions or exploiting it for
HRI applications is an ongoing research subject. In these contexts, vision based eye trackers
are preferred as they are non-invasive and allow people to behave more naturally. In particular,
appearance based methods (ABM) are very promising, as they can perform online gaze estima-
tion and have the potential to be head pose and person invariant, accommodate more situations
as well as user mobility and the resulting low resolution images. However, they may also suffer
from a lack of robustness when several of these challenges are jointly present. In this work,
we address gaze coding in human-human interactions, and present a simple method based on
a few manually annotated frames that is able to much reduce the error of a head pose invariant
ABM method, as shown on a dataset of 6 interactions.</subfield>
		</datafield>
	</record>
</collection>