%Aigaion2 BibTeX export from Idiap Publications %Friday 27 December 2024 01:09:21 AM @ARTICLE{Can_ACMJOCCH_2018, author = {Can, Gulcan and Odobez, Jean-Marc and Gatica-Perez, Daniel}, keywords = {Convolutional Neural Networks, Crowdsourcing, language, maya glyphs, shape recognition, transfer learning}, projects = {Idiap}, month = may, title = {How to Tell Ancient Signs Apart? Recognizing and Visualizing Maya Glyphs with CNNs}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {11}, number = {4}, year = {2018}, pages = {20}, doi = {10.1145/3230670}, abstract = {Thanks to the digital preservation of cultural heritage materials, multimedia tools (e.g., based on automatic visual processing) considerably ease the work of scholars in the humanities and help them to perform quantitative analysis of their data. In this context, this article assesses three different Convolutional Neural Network (CNN) architectures along with three learning approaches to train them for hieroglyph classification, which is a very challenging task due to the limited availability of segmented ancient Maya glyphs. More precisely, the first approach, the baseline, relies on pretrained networks as feature extractor. The second one investigates a transfer learning method by fine-tuning a pretrained network for our glyph classification task. The third approach considers directly training networks from scratch with our glyph data. The merits of three different network architectures are compared: a generic sequential model (i.e., LeNet), a sketch-specific sequential network (i.e., Sketch-a-Net), and the recent Residual Networks. The sketch-specific model trained from scratch outperforms other models and training strategies. Even for a challenging 150-class classification task, this model achieves 70.3\% average accuracy and proves itself promising in case of a small amount of cultural heritage shape data. Furthermore, we visualize the discriminative parts of glyphs with the recent Grad-CAM method, and demonstrate that the discriminative parts learned by the model agree, in general, with the expert annotation of the glyph specificity (diagnostic features). Finally, as a step toward systematic evaluation of these visualizations, we conduct a perceptual crowdsourcing study. Specifically, we analyze the interpretability of the representations from Sketch-a-Net and ResNet-50. Overall, our article takes two important steps toward providing tools to scholars in the digital humanities: increased performance for automation and improved interpretability of algorithms.}, pdf = {https://publications.idiap.ch/attachments/papers/2018/Can_ACMJOCCH_2018.pdf} }