%Aigaion2 BibTeX export from Idiap Publications
%Sunday 22 December 2024 01:24:53 AM

@INPROCEEDINGS{Mohammadshahi_REP4NLPATACL2023_2023,
         author = {Mohammadshahi, Alireza and Henderson, James},
          month = jul,
          title = {Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling},
      booktitle = {Procceedings of 8th Workshop on Representation Learning for NLP},
           year = {2023},
            url = {https://arxiv.org/abs/2104.07704},
       crossref = {Mohammadshahi_ARXIV_2021},
       abstract = {Recent models have shown that incorporating syntactic knowledge into the semantic role labelling (SRL) task leads to a significant improvement. In this paper, we propose Syntax-aware Graph-to-Graph Transformer (SynG2G-Tr) model, which encodes the syntactic structure using a novel way to input graph relations as embeddings, directly into the self-attention mechanism of Transformer. This approach adds a soft bias towards attention patterns that follow the syntactic structure but also allows the model to use this information to learn alternative patterns. We evaluate our model on both span-based and dependency-based SRL datasets, and outperform previous alternative methods in both in-domain and out-of-domain settings, on CoNLL 2005 and CoNLL 2009 datasets.}
}



crossreferenced publications: 
@INPROCEEDINGS{Mohammadshahi_ARXIV_2021,
         author = {Mohammadshahi, Alireza and Henderson, James},
       projects = {Idiap, Intrepid},
          month = apr,
          title = {Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling},
      booktitle = {Arxiv},
           year = {2021},
       abstract = {The goal of semantic role labelling (SRL) is to recognise the predicate-argument structure of a sentence. Recent models have shown that syntactic information can enhance the SRL performance, but other syntax-agnostic approaches achieved reasonable performance. The best way to encode syntactic information for the SRL task is still an open question. In this paper, we propose the Syntax-aware Graph-to-Graph Transformer (SynG2G-Tr) architecture, which encodes the syntactic structure with a novel way to input graph relations as embeddings directly into the self-attention mechanism of Transformer. This approach adds a soft bias towards attention patterns that follow the syntactic structure but also allows the model to use this information to learn alternative patterns. We evaluate our model on both dependency-based and span-based SRL datasets, and outperform all previous syntax-aware and syntax-agnostic models in both in-domain and out-of-domain settings, on the CoNLL 2005 and CoNLL 2009 datasets. Our architecture is general and can be applied to encode any graph information for a desired downstream task.},
            pdf = {https://publications.idiap.ch/attachments/papers/2021/Mohammadshahi_ARXIV_2021.pdf}
}