%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 05:37:21 PM @INPROCEEDINGS{Burdisso_CLINICALNLP@NAACL2024_2024, author = {Burdisso, Sergio and Reyes-Ram{\'{\i}}rez, Ernesto A. and Villatoro-Tello, Esa{\'{u}} and S{\'{a}}nchez-Vega, Fernando and L{\'{o}}pez-Monroy, A. Pastor and Motlicek, Petr}, keywords = {bias, depression detection, explainability}, projects = {Idiap}, month = jun, title = {DAIC-WOZ: On the Validity of Using the Therapist's prompts in Automatic Depression Detection from Clinical Interviews}, booktitle = {Proceedings of the 6th Clinical Natural Language Processing Workshop}, year = {2024}, pages = {82–90}, publisher = {Association for Computational Linguistics}, location = {Mexico City, Mexico}, url = {https://aclanthology.org/2024.clinicalnlp-1.8}, doi = {10.18653/v1/2024.clinicalnlp-1.8}, abstract = {Automatic depression detection from conversational data has gained significant interest in recent years. The DAIC-WOZ dataset, interviews conducted by a human-controlled virtual agent, has been widely used for this task. Recent studies have reported enhanced performance when incorporating interviewer's prompts into the model. In this work, we hypothesize that this improvement might be mainly due to a bias present in these prompts, rather than the proposed architectures and methods. Through ablation experiments and qualitative analysis, we discover that models using interviewer's prompts learn to focus on a specific region of the interviews, where questions about past experiences with mental health issues are asked, and use them as discriminative shortcuts to detect depressed participants. In contrast, models using participant responses gather evidence from across the entire interview. Finally, to highlight the magnitude of this bias, we achieve a 0.90 F1 score by intentionally exploiting it, the highest result reported to date on this dataset using only textual information. Our findings underline the need for caution when incorporating interviewers' prompts into models, as they may inadvertently learn to exploit targeted prompts, rather than learning to characterize the language and behavior that are genuinely indicative of the patient's mental health condition.}, pdf = {https://publications.idiap.ch/attachments/papers/2024/Burdisso_CLINICALNLP@NAACL2024_2024.pdf} }