%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 05:08:45 PM @INPROCEEDINGS{MiculicichWerlen_DISCOMTATEMNLP_2017, author = {Miculicich, Lesly and Popescu-Belis, Andrei}, projects = {Idiap, SUMMA, MODERN}, title = {Validation of an Automatic Metric for the Accuracy of Pronoun Translation (APT)}, booktitle = {Proceedings of the Third Workshop on Discourse in Machine Translation (DiscoMT)}, year = {2017}, publisher = {Association for Computational Linguistics (ACL)}, location = {Denmark, Copenhagen}, crossref = {Werlen_Idiap-RR-29-2016}, pdf = {https://publications.idiap.ch/attachments/papers/2017/MiculicichWerlen_DISCOMTATEMNLP_2017.pdf} } crossreferenced publications: @TECHREPORT{Werlen_Idiap-RR-29-2016, author = {Miculicich, Lesly and Popescu-Belis, Andrei}, projects = {Idiap, SUMMA}, month = {11}, title = {Validation of an Automatic Metric for the Accuracy of Pronoun Translation (APT)}, type = {Idiap-RR}, number = {Idiap-RR-29-2016}, year = {2016}, institution = {Idiap}, abstract = {In this paper, we define and assess a reference-based metric to evaluate the accuracy of pronoun translation (APT). The metric automatically aligns a candidate and a reference translation using GIZA++ augmented with specific heuristics, and then counts the number of identical or different pronouns, with provision for legitimate variations and omitted pronouns. All counts are then combined into one score. The metric is applied to the results of seven systems (including the baseline) that participated in the DiscoMT 2015 shared task on pronoun translation from English to French. The APT metric reaches around 0.993-0.999 Pearson correlation with human judges (depending on the parameters of APT), while other automatic metrics such as BLEU, METEOR, or those specific to pronouns used at DiscoMT 2015 reach only 0.972-0.986 Pearson correlation.}, pdf = {https://publications.idiap.ch/attachments/reports/2016/Werlen_Idiap-RR-29-2016.pdf} }