%Aigaion2 BibTeX export from Idiap Publications
%Friday 26 April 2024 09:38:59 PM

@ARTICLE{Le_MTAP_2018,
         author = {Le, Nam and Odobez, Jean-Marc},
       keywords = {deep learning, Face, Metric learning, multimodal identification, speaker, Speaker Diarization, transfer learning},
       projects = {Idiap},
          month = jan,
          title = {Improving speech embedding using crossmodal transfer learning with audio-visual data},
        journal = {Multimedia Tools and Applications},
         volume = {78},
         number = {11},
           year = {2019},
          pages = {15681-15704},
           issn = {1380-7501},
            doi = {10.1007/s11042-018-6992-3},
       abstract = {Learning a discriminative voice embedding allows speaker turns to be compared directly and efficiently, which is crucial for tasks such as diarization and verification. This paper investigates several transfer learning approaches to improve a voice embedding using knowledge transferred from a face representation. The main idea of our crossmodal approaches is to constrain the target voice embedding space to share latent attributes with the source face embedding space.The shared latent attributes can be formalized as geometric properties or distribution characterics between these embedding spaces. We propose four transfer learning approaches belonging to two categories: the first category relies on the structure of the source face embedding space to regularize at different granularities the speaker turn embedding space. The second category -a domain adaptation approach- improves the embedding space of speaker turns by applying a maximum mean discrepancy loss to minimize the disparity between the distributions of the embedded features. Experiments are conducted on TV news datasets, REPERE and ETAPE, to demonstrate our methods. Quantitative results in verification and clustering tasks show promising improvement, especially in cases where speaker turns are short or the training data size is limited. The analysis also gives insights the embedding spaces and shows their potential applications.}
}