%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 04:54:43 PM

@INPROCEEDINGS{Kew_EMNLP2023_2023,
         author = {Kew, Tannon and Chi, Alison and V{\'{a}}squez-Rodr{\'{\i}}guez, Laura and Agrawal, Sweta and Aumiller, Dennis and Alva-Manchego, Fernando and Shardlow, Matthew},
       keywords = {evaluation, LLM, NLP, Text Simplification},
       projects = {Idiap},
          month = dec,
          title = {BLESS: Benchmarking Large Language Models on Sentence Simplification},
      booktitle = {Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing},
           year = {2023},
       location = {Singapore},
       abstract = {We present BLESS, a comprehensive performance benchmark of the most recent state-of-the-art large language models (LLMs) on the task of text simplification (TS). 
We examine how well off-the-shelf LLMs can solve this challenging task, assessing a total of 44 models, differing in size, architecture, pre-training methods, and accessibility, on three test sets from different domains (Wikipedia, news, and medical) under a few-shot setting. 
Our analysis considers a suite of automatic metrics as well as a large-scale quantitative investigation into the types of common edit operations performed by the different models. 
Furthermore, we perform a manual qualitative analysis on a subset of model outputs to better gauge the quality of the generated simplifications.
Our evaluation indicates that the best LLMs, despite not being trained on TS, perform comparably with state-of-the-art TS baselines.
Additionally, we find that certain LLMs demonstrate a greater range and diversity of edit operations.
Our performance benchmark will be available as a resource for the development of future TS methods and evaluation metrics.},
            pdf = {https://publications.idiap.ch/attachments/papers/2023/Kew_EMNLP2023_2023.pdf}
}