%Aigaion2 BibTeX export from Idiap Publications
%Sunday 13 October 2024 09:31:32 PM

@INPROCEEDINGS{Lebret_ICML_2015,
         author = {Lebret, R{\'{e}}mi and Pinheiro, Pedro H. O. and Collobert, Ronan},
       projects = {Idiap},
          title = {Phrase-based Image Captioning},
      booktitle = {International Conference on Machine Learning (ICML)},
         volume = {37},
           year = {2015},
          pages = {2085–2094},
      publisher = {JMLR},
       location = {Lille, France},
            url = {http://jmlr.org/proceedings/papers/v37/lebret15.html},
       crossref = {Lebret_Idiap-RR-08-2015},
       abstract = {Generating a novel textual description of an image is an interesting problem that connects computer vision and natural language processing. In this paper, we present a simple model that is able to generate descriptive sentences given a sample image. This model has a strong focus on the syntax of the descriptions. We train a purely bilinear model that learns a metric between an image representation (generated from a previously trained Convolutional Neural Network) and phrases that are used to described them. The system is then able to infer phrases from a given image sample. Based on caption syntax statistics, we propose a simple language model that can produce relevant descriptions for a given test image using the phrases inferred. Our approach, which is considerably simpler than state-of-the-art models, achieves comparable results in two popular datasets for the task: Flickr30k and the recently proposed Microsoft COCO.},
            pdf = {https://publications.idiap.ch/attachments/papers/2015/Lebret_ICML_2015.pdf}
}



crossreferenced publications: 
@TECHREPORT{Lebret_Idiap-RR-08-2015,
         author = {Lebret, R{\'{e}}mi and Pinheiro, Pedro H. O. and Collobert, Ronan},
       projects = {Idiap},
          month = {5},
          title = {Phrase-based Image Captioning},
           type = {Idiap-RR},
         number = {Idiap-RR-08-2015},
           year = {2015},
    institution = {Idiap},
           note = {Under review by the International Conference on Machine Learning (ICML).},
       abstract = {Generating a novel textual description of an image is an interesting problem that connects computer vision and natural language processing.  
In this paper, we present a simple model that is able to generate descriptive sentences given a sample image. 
This model has a strong focus on the syntax of the descriptions.
We train a purely bilinear model that learns a metric between an image representation (generated from a previously trained Convolutional Neural Network) and phrases that are used to described them. The system is then able to infer phrases from a given image sample. Based on caption syntax statistics, we propose a simple language model that can produce relevant descriptions for a given test image using the phrases inferred. Our approach, which is considerably simpler than state-of-the-art models, achieves comparable results in two popular datasets for the task: Flickr30k and the recently proposed Microsoft COCO.},
            pdf = {https://publications.idiap.ch/attachments/reports/2015/Lebret_Idiap-RR-08-2015.pdf}
}