%Aigaion2 BibTeX export from Idiap Publications
%Friday 11 October 2024 12:45:51 AM

@INPROCEEDINGS{Gupta_CVPRW_2024,
         author = {Gupta, Anshul and Vuillecard, Pierre and Farkhondeh, Arya and Odobez, Jean-Marc},
       projects = {Idiap, AI4Autism},
          month = jun,
          title = {Exploring the Zero-Shot Capabilities of Vision-Language Models for Improving Gaze Following},
      booktitle = {Int. Conf. Computer Vision and Pattern Recognition (CVPR), Workshop on Gaze Estimation and Prediction in the Wild},
           year = {2024},
       abstract = {Contextual cues related to a person’s pose and interactions with objects and other people in the scene can provide valuable information for gaze following. While existing methods have focused on dedicated cue extraction methods, in this work we investigate the zero-shot capabilities of Vision-Language Models (VLMs) for extracting a wide array of contextual cues to improve gaze following per- formance. We first evaluate various VLMs, prompting strategies, and in-context learning (ICL) techniques for zero-shot cue recognition performance. We then use these insights to extract contextual cues for gaze following, and investigate their impact when incorporated into a state of the art model for the task. Our analysis indicates that BLIP-2 is the overall top performing VLM and that ICL can improve performance. We also observe that VLMs are sensitive to the choice of the text prompt although ensem- bling over multiple text prompts can provide more robust performance. Additionally, we discover that using the entire image along with an ellipse drawn around the target person is the most effective strategy for visual prompting. For gaze following, incorporating the extracted cues results in better generalization performance, especially when considering a larger set of cues, highlighting the potential of this approach.},
            pdf = {https://publications.idiap.ch/attachments/papers/2024/Gupta_CVPRW_2024.pdf}
}