%Aigaion2 BibTeX export from Idiap Publications
%Friday 03 May 2024 02:27:53 AM

@INPROCEEDINGS{lathoud03b,
         author = {Lathoud, Guillaume and McCowan, Iain A. and Moore, Darren},
       projects = {Idiap},
          month = {9},
          title = {{S}egmenting {M}ultiple {C}oncurrent {S}peakers Using {M}icrophone Arrays},
      booktitle = {{P}roceedings of Eurospeech 2003},
           year = {2003},
        address = {Geneva, Switzerland},
           note = {IDIAP-RR 03-21},
       crossref = {lathoud-rr-03-21},
       abstract = {Speaker turn detection is an important task for many speech processing applications. However, accurate segmentation can be hard to achieve if there are multiple concurrent speakers (overlap,',','),
 as is typically the case in multi-party conversations. In such cases, the location of the speaker, as measured using a microphone array, may provide greater discrimination than traditional spectral features. This was verified in previous work which obtained a global segmentation in terms of single speaker classes, as well as possible overlap combinations. However, such a global strategy suffers from an explosion of the number of overlap classes, as each possible combination of concurrent speakers must be modeled explicitly. In this paper, we propose two alternative schemes that produce an individual segmentation decision for each speaker, implicitly handling all overlapping speaker combinations. The proposed approaches also allow straightforward online implementations. Experiments are presented comparing the segmentation with that obtained using the previous system.},
            pdf = {https://publications.idiap.ch/attachments/papers/2003/lathoud_eurospeech2003.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2003/lathoud_eurospeech2003.ps.gz},
ipdinar={2003},
ipdmembership={speech},
}



crossreferenced publications: 
@TECHREPORT{lathoud-rr-03-21,
         author = {Lathoud, Guillaume and McCowan, Iain A. and Moore, Darren},
       projects = {Idiap},
          title = {{S}egmenting {M}ultiple {C}oncurrent {S}peakers Using {M}icrophone Arrays},
           type = {Idiap-RR},
         number = {Idiap-RR-21-2003},
           year = {2003},
    institution = {IDIAP},
        address = {Martigny, Switzerland},
           note = {Published in ``Proceedings of Eurospeech 2003''},
       abstract = {Speaker turn detection is an important task for many speech processing applications. However, accurate segmentation can be hard to achieve if there are multiple concurrent speakers (overlap,',','),
 as is typically the case in multi-party conversations. In such cases, the location of the speaker, as measured using a microphone array, may provide greater discrimination than traditional spectral features. This was verified in previous work which obtained a global segmentation in terms of single speaker classes, as well as possible overlap combinations. However, such a global strategy suffers from an explosion of the number of overlap classes, as each possible combination of concurrent speakers must be modeled explicitly. In this paper, we propose two alternative schemes that produce an individual segmentation decision for each speaker, implicitly handling all overlapping speaker combinations. The proposed approaches also allow straightforward online implementations. Experiments are presented comparing the segmentation with that obtained using the previous system.},
            pdf = {https://publications.idiap.ch/attachments/reports/2003/rr-03-21.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2003/rr-03-21.ps.gz},
ipdinar={2003},
ipdmembership={speech},
language={English},
}