%Aigaion2 BibTeX export from Idiap Publications
%Sunday 28 April 2024 04:25:49 AM

@INCOLLECTION{Alvarez-Carmona_SPRINGERNATURESINGAPORE_2022,
         author = {{\'{A}}lvarez-Carmona, Miguel {\'{A}}. and VILLATORO-TELLO, Esa{\'{u}} and Pineda, Luis Villase{\~{n}}or and Montes-y-G{\'{o}}mez, Manuel},
       keywords = {Age prediction, Author profiling, Gender prediction, Location prediction, Multimodal classification, Occupation prediction},
       projects = {Idiap},
          month = may,
          title = {Classifying the Social Media Author Profile Through a Multimodal Representation},
      booktitle = {Intelligent Technologies: Concepts, Applications, and Future Directions. Studies in Computational Intelligence},
         series = {7092},
         volume = {1028},
           year = {2022},
      publisher = {Springer},
            url = {https://link.springer.com/chapter/10.1007/978-981-19-1021-0_3},
            doi = {https://doi.org/10.1007/978-981-19-1021-0_3},
       abstract = {The author profiling task refers to extracting as much of an author through what he writes, such as gender, age, nationality, location, among others. Although this task arose a few decades ago, the explosion in social networks has made the task of author profiling mainly focus on digital media. Typically, previous works have used only the text of the users of social networks to determine specific demographic characteristics. However, due to the nature of social networks, it is possible to have access to another type of information that could be useful. This work faces the author profiling in a social networks domain with text and images information. This kind of information is also called multimodal information. The solution proposed uses an approach to extract data from social media messages and users’ images. The proposal shows evidence of the complementarity of the modalities by merging these two information sources. The idea consists of transforming images into texts and having the same representation framework for both types of information, which allows them to be merged. The work explores distinct approaches to extracting information from text and images. The results show that the text constructed from the images contains helpful information, increasing this task’s precision.}
}