%Aigaion2 BibTeX export from Idiap Publications
%Thursday 04 December 2025 08:39:10 PM

@INPROCEEDINGS{Cespedes-Sarrias_ACMMM25_2025,
                      author = {C{\'{e}}spedes-Sarrias, Berta and Collado-Capell, Carlos and Rodenas-Ruiz, Pablo and Hrynenko, Olena and Cavallaro, Andrea},
                    keywords = {deep learning, Hate Speech, Multimodal fusion, social media},
                    projects = {Idiap},
         mainresearchprogram = {Sustainable & Resilient Societies},
                       title = {MM-HSD: Multi-Modal Hate Speech Detection in Videos},
                   booktitle = {Proceedings of the 33rd ACM International Conference on Multimedia (MM'25), October 27-31, 2025, Dublin, Ireland.},
                        year = {2025},
                         doi = {https://doi.org/10.1145/3746027.3754558},
                    abstract = {While hate speech detection (HSD) has been extensively studied in text, existing multi-modal approaches remain limited, particularly in videos. As modalities are not always individually informative, simple fusion methods fail to fully capture inter-modal dependencies. Moreover, previous work often omits relevant modalities such as on-screen text and audio, which may contain subtle hateful content and thus provide essential cues, both individually and in combination with others. In this paper, we present MM-HSD, a multi-modal model for HSD in videos that integrates video frames, audio, and text derived from speech transcripts and from frames (i.e. on-screen text) together with features extracted by Cross-Modal Attention (CMA). We are the first to use CMA as an early feature extractor for HSD in videos, to systematically compare query/key configurations, and to evaluate the interactions between different modalities in the CMA block. Our approach leads to improved performance when on-screen text is used as a query and the rest of the modalities serve as a key. Experiments on the HateMM dataset show that MM-HSD outperforms state-of-the-art methods on M-F1 score (0.874), using concatenation of transcript, audio, video, on-screen text, and CMA for feature extraction on raw embeddings of the modalities. The code is available at https://github.com/idiap/mm-hsd.},
                         pdf = {https://publications.idiap.ch/attachments/papers/2025/Cespedes-Sarrias_ACMMM25_2025.pdf}
}