<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
	<record>
		<datafield tag="980" ind1=" " ind2=" ">
			<subfield code="a">CHAPTER</subfield>
		</datafield>
		<datafield tag="970" ind1=" " ind2=" ">
			<subfield code="a">Alvarez-Carmona_SPRINGERNATURESINGAPORE_2022/IDIAP</subfield>
		</datafield>
		<datafield tag="245" ind1=" " ind2=" ">
			<subfield code="a">Classifying the Social Media Author Profile Through a Multimodal Representation</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Álvarez-Carmona, Miguel Á.</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Villatoro-Tello, Esaú</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Pineda, Luis Villaseñor</subfield>
		</datafield>
		<datafield tag="700" ind1=" " ind2=" ">
			<subfield code="a">Montes-y-Gómez, Manuel</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Age prediction</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Author profiling</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Gender prediction</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Location prediction</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Multimodal classification</subfield>
		</datafield>
		<datafield tag="653" ind1="1" ind2=" ">
			<subfield code="a">Occupation prediction</subfield>
		</datafield>
		<datafield tag="711" ind1="2" ind2=" ">
			<subfield code="a">Intelligent Technologies: Concepts, Applications, and Future Directions. Studies in Computational Intelligence</subfield>
		</datafield>
		<datafield tag="440" ind1=" " ind2=" ">
			<subfield code="a">7092</subfield>
		</datafield>
		<datafield tag="773" ind1=" " ind2=" ">
			<subfield code="v">1028</subfield>
		</datafield>
		<datafield tag="260" ind1=" " ind2=" ">
			<subfield code="c">2022</subfield>
			<subfield code="b">Springer</subfield>
		</datafield>
		<datafield tag="856" ind1="4" ind2=" ">
			<subfield code="u">https://link.springer.com/chapter/10.1007/978-981-19-1021-0_3</subfield>
			<subfield code="z">URL</subfield>
		</datafield>
		<datafield tag="024" ind1="7" ind2=" ">
			<subfield code="a">https://doi.org/10.1007/978-981-19-1021-0_3</subfield>
			<subfield code="2">doi</subfield>
		</datafield>
		<datafield tag="520" ind1=" " ind2=" ">
			<subfield code="a">The author profiling task refers to extracting as much of an author through what he writes, such as gender, age, nationality, location, among others. Although this task arose a few decades ago, the explosion in social networks has made the task of author profiling mainly focus on digital media. Typically, previous works have used only the text of the users of social networks to determine specific demographic characteristics. However, due to the nature of social networks, it is possible to have access to another type of information that could be useful. This work faces the author profiling in a social networks domain with text and images information. This kind of information is also called multimodal information. The solution proposed uses an approach to extract data from social media messages and users’ images. The proposal shows evidence of the complementarity of the modalities by merging these two information sources. The idea consists of transforming images into texts and having the same representation framework for both types of information, which allows them to be merged. The work explores distinct approaches to extracting information from text and images. The results show that the text constructed from the images contains helpful information, increasing this task’s precision.</subfield>
		</datafield>
	</record>
</collection>