%Aigaion2 BibTeX export from Idiap Publications %Saturday 18 January 2025 01:30:06 AM @INPROCEEDINGS{Sarkar_ICASSP_2025, author = {Sarkar, Eklavya and Magimai-Doss, Mathew}, keywords = {bioacoustics, fine-tuning, human speech, pre-training domain, self-supervised learning}, projects = {EVOLANG}, title = {Comparing Self-Supervised Learning Models Pre-Trained on Human Speech and Animal Vocalizations for Bioacoustics Processing}, booktitle = {International Conference on Acoustics, Speech and Signal Processing}, year = {2025}, abstract = {Self-supervised learning (SSL) foundation models have emerged as powerful, domain-agnostic, general-purpose feature extractors applicable to a wide range of tasks. Such models pre-trained on human speech have demonstrated high transferability for bioacoustic processing. This paper investigates (i) whether SSL models pre-trained directly on animal vocalizations offer a significant advantage over those pre-trained on speech, and (ii) whether fine-tuning speech-pretrained models on automatic speech recognition (ASR) tasks can enhance bioacoustic classification. We conduct a comparative analysis using three diverse bioacoustic datasets and two different bioacoustic tasks. Results indicate that pre-training on bioacoustic data provides only marginal improvements over speech-pretrained models, with comparable performance in most scenarios. Fine-tuning on ASR tasks yields mixed outcomes, suggesting that the general-purpose representations learned during SSL pre-training are already well-suited for bioacoustic tasks. These findings highlight the robustness of speech-pretrained SSL models for bioacoustics and imply that extensive fine-tuning may not be necessary for optimal performance.}, pdf = {https://publications.idiap.ch/attachments/papers/2025/Sarkar_ICASSP_2025.pdf} }