%Aigaion2 BibTeX export from Idiap Publications %Tuesday 03 December 2024 06:38:26 PM @TECHREPORT{morris-COM-02-03, author = {Morris, Andrew}, keywords = {contingency tables, likelihood ratio, mutual information, word error rate}, projects = {Idiap}, title = {An information theoretic measure of sequence recognition performance}, type = {Idiap-Com}, number = {Idiap-Com-03-2002}, year = {2002}, institution = {IDIAP}, abstract = {Sequence recognition performance is often summarised first in terms of the number of hits (H,',','), substitutions (S,',','), deletions (D) and insertions (I,',','), and then as a single statistic by the "word error rate" WER = 100(S D I)/(H S D). While in common use, WER has two disadvantages as a performance measure. One is that it has no upper bound, so it doesn't tell you how good a system is, only that one is better than another. The other is that it is not D/I symmetric, although deletions and insertions are equally disadvantageous. At low error rates these limitations can be ignored. However, for the high error rates which can occur during tests for speech recognition in noise the WER measure starts to misbehave, giving far more weight to insertions than to deletions and regularly "exceeding 100\%". Here we derive an alternative summary statistic for sequence recognition accuracy: WIP = H^2/(H S D)(H S I). The WIP (word information preserved) measure results from an approximation to the proportion of the information about the true sequence which is preserved in the recognised sequence. It has comparable simplicity to WER but neither of its disadvantages.}, pdf = {https://publications.idiap.ch/attachments/reports/2002/com02-03.pdf}, postscript = {ftp://ftp.idiap.ch/pub/reports/2002/com02-03.ps.gz}, ipdmembership={speech}, }