%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 11:51:13 AM

@INPROCEEDINGS{monayACM:2004,
         author = {Monay, Florent and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {PLSA-based Image Auto-Annotation: Constraining the Latent Space},
      booktitle = {Proc. ACM Int. Conf. on Multimedia (ACM MM)},
           year = {2004},
           note = {IDIAP-RR 04-30},
       crossref = {monay02},
       abstract = {We address the problem of unsupervised image auto-annotation with probabilistic latent space models. Unlike most previous works, which build latent space representations assuming equal relevance for the text and visual modalities, we propose a new way of modeling multi-modal co-occurrences, constraining the definition of the latent space to ensure its consistency in semantic terms (words,',','),
 while retaining the ability to jointly model visual information. The concept is implemented by a linked pair of Probabilistic Latent Semantic Analysis (PLSA) models. On a 16000-image collection, we show with extensive experiments and using various performance measures, that our approach significantly outperforms previous joint models.},
            pdf = {https://publications.idiap.ch/attachments/papers/2004/monay-acm-1568937089.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2004/monay-acm-1568937089.ps.gz},
ipdmembership={vision},
}



crossreferenced publications: 
@TECHREPORT{monay02,
         author = {Monay, Florent and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {PLSA-based Image Auto-Annotation: Constraining the Latent Space},
           type = {Idiap-RR},
         number = {Idiap-RR-30-2004},
           year = {2004},
    institution = {IDIAP},
           note = {Published in ``Proc. ACM Multimedia 2004'', 2004},
       abstract = {We address the problem of unsupervised image auto-annotation with probabilistic latent space models. Unlike most previous works, which build latent space representations assuming equal relevance for the text and visual modalities, we propose a new way of modeling multi-modal co-occurrences, constraining the definition of the latent space to ensure its consistency in semantic terms (words,',','),
 while retaining the ability to jointly model visual information. The concept is implemented by a linked pair of Probabilistic Latent Semantic Analysis (PLSA) models. On a 16000-image collection, we show with extensive experiments and using various performance measures, that our approach significantly outperforms previous joint models.},
            pdf = {https://publications.idiap.ch/attachments/reports/2004/rr04-30.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2004/rr04-30.ps.gz},
ipdmembership={vision},
}