%Aigaion2 BibTeX export from Idiap Publications
%Monday 29 April 2024 07:13:43 AM

@TECHREPORT{Janbakhshi_Idiap-RR-32-2020,
         author = {Janbakhshi, Parvaneh and Kodrasi, Ina and Bourlard, Herv{\'{e}}},
       projects = {Idiap, MOSPEEDI},
          month = {12},
          title = {AUTOMATIC DYSARTHRIC SPEECH DETECTION EXPLOITING PAIRWISE DISTANCE-BASED CONVOLUTIONAL NEURAL NETWORKS},
           type = {Idiap-RR},
         number = {Idiap-RR-32-2020},
           year = {2020},
    institution = {Idiap},
           note = {Submitted},
       abstract = {Automatic dysarthric speech detection can provide reliable and cost-effective computer-aided tools to assist the clinical diagnosis and management of dysarthria. In this paper we propose a novel automatic dysarthric speech detection approach based on analyses of pairwise distance matrices using convolutional neural networks (CNNs). We represent utterances through articulatory posteriors and consider pairs of phonetically-balanced representations, with one representation from a healthy speaker (i.e., the reference
representation) and the other representation from the test speaker (i.e., test representation). Given such pairs of reference and test representations, features are first extracted using a feature extraction front-end, a frame-level distance matrix is computed, and the obtained distance matrix is considered as an image by a CNN-based binary classifier. The feature extraction, distance matrix computation, and CNN-based classifier are jointly optimized in an end-to-end framework. Experimental results on two databases of healthy and dysarthric speakers for different languages and pathologies show that the proposed approach yields a high dysarthric speech detection performance, outperforming other CNN-based baseline approaches.},
            pdf = {https://publications.idiap.ch/attachments/reports/2020/Janbakhshi_Idiap-RR-32-2020.pdf}
}