%Aigaion2 BibTeX export from Idiap Publications
%Saturday 21 December 2024 06:18:23 PM

@ARTICLE{monay:pami:2007,
         author = {Monay, Florent and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {Modeling semantic aspects for cross-media image indexing},
        journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},
           year = {2007},
           note = {IDIAP-RR 05-56},
       crossref = {monay:rr05-56},
       abstract = {To go beyond the query-by-example paradigm in image retrieval, there is a need for semantic indexing of large image collections for intuitive text-based image search. Different models have been proposed to learn the dependencies between the visual content of an image set and the associated text captions, then allowing for the automatic creation of semantic indices for unannotated images. The task, however, remains unsolved. In this paper, we present three alternatives to learn a Probabilistic Latent Semantic Analysis model (PLSA) for annotated images, and evaluate their respective performance for automatic image indexing. Under the PLSA assumptions, an image is modeled as a mixture of latent aspects that generates both image features and text captions, and we investigate three ways to learn the mixture of aspects. We also propose a more discriminative image representation than the traditional Blob histogram, concatenating quantized local color information and quantized local texture descriptors. The first learning procedure of a PLSA model for annotated images is a standard EM algorithm, which implicitly assumes that the visual and the textual modalities can be treated equivalently. The other two models are based on an asymmetric PLSA learning, allowing to constrain the definition of the latent space on the visual or on the textual modality. We demonstrate that the textual modality is more appropriate to learn a semantically meaningful latent space, which translates into improved annotation performance. A comparison of our learning algorithms with respect to recent methods on a standard dataset is presented, and a detailed evaluation of the performance shows the validity of our framework.},
            pdf = {https://publications.idiap.ch/attachments/papers/2007/monay-pami-2007.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2007/monay-pami-2007.ps.gz},
ipdmembership={vision},
}



crossreferenced publications: 
@TECHREPORT{monay:rr05-56,
         author = {Monay, Florent and Gatica-Perez, Daniel},
       projects = {Idiap},
          title = {Modeling semantic aspects for cross-media image indexing},
           type = {Idiap-RR},
         number = {Idiap-RR-56-2005},
           year = {2005},
    institution = {IDIAP},
       abstract = {To go beyond the query-by-example paradigm in image retrieval, there is a need for semantic indexing of large image collections for intuitive text-based image search. Different models have been proposed to learn the dependencies between the visual content of an image set and the associated text captions, then allowing for the automatic creation of semantic indices for unannotated images. The task, however, remains unsolved. In this paper, we present three alternatives to learn a Probabilistic Latent Semantic Analysis model (PLSA) for annotated images, and evaluate their respective performance for automatic image indexing. Under the PLSA assumptions, an image is modeled as a mixture of latent aspects that generates both image features and text captions, and we investigate three ways to learn the mixture of aspects. We also propose a more discriminative image representation than the traditional Blob histogram, concatenating quantized local color information and quantized local texture descriptors. The first learning procedure of a PLSA model for annotated images is a standard EM algorithm, which implicitly assumes that the visual and the textual modalities can be treated equivalently. The other two models are based on an asymmetric PLSA learning, allowing to constrain the definition of the latent space on the visual or on the textual modality. We demonstrate that the textual modality is more appropriate to learn a semantically meaningful latent space, which translates into improved annotation performance. A comparison of our learning algorithms with respect to recent methods on a standard dataset is presented, and a detailed evaluation of the performance shows the validity of our framework.},
            pdf = {https://publications.idiap.ch/attachments/reports/2005/monay-idiap-rr-05-56.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2005/monay-idiap-rr-05-56.ps.gz},
ipdmembership={vision},
}