%Aigaion2 BibTeX export from Idiap Publications
%Thursday 02 May 2024 07:05:26 AM

@INPROCEEDINGS{Le_INTERSPEECH2018_2018,
         author = {Le, Nam and Odobez, Jean-Marc},
       keywords = {deep neural networks, embedding learning, speaker verification, triplet loss},
       projects = {Idiap, EUMSSI, MUMMER},
          title = {Robust and Discriminative Speaker Embedding via Intra-Class Distance Variance Regularization},
      booktitle = {Proceedings of Interspeech},
           year = {2018},
          pages = {2257-2261},
       location = {Hyderabad, INDIA},
           issn = {2308-457X},
           isbn = {978-1-5108-7221-9},
            doi = {10.21437/Interspeech.2018-1685},
       abstract = {Learning a good speaker embedding is critical for many speech processing tasks, including recognition, verification, and diarization. To this end, we propose a complementary optimizing goal called intra-class loss to improve deep speaker embed dings learned with triplet loss. This loss function is formulated as a soft constraint on the averaged pair-wise distance between samples from the same class. Its goal is to prevent the scattering of these samples within the embedding space to increase the intra-class compactncss.When intra-class loss is jointly optimized with triplet loss, we can observe 2 major improvements: the deep embedding network can achieve a more robust and discriminative representation and the training process is more stable with a faster convergence rate. We conduct experiments on 2 large public benchmarking datasets for speaker verification, VoxCeleb and VoxForge. The results show that intra-class loss helps accelerating the convergence of deep network training and significantly improves the overall performance of the resulted embeddings.},
            pdf = {https://publications.idiap.ch/attachments/papers/2018/Le_INTERSPEECH2018_2018.pdf}
}