%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 06:02:44 PM @INPROCEEDINGS{keller:nips:2005, author = {Keller, Mikaela and Bengio, Samy and Wong, Siew Yeung}, projects = {Idiap}, title = {Benchmarking Non-Parametric Statistical Tests}, booktitle = {Advances in Neural Information Processing Systems, NIPS 18. MIT Press}, year = {2005}, note = {IDIAP-RR 05-38}, crossref = {keller:rr05-38}, abstract = {Although non-parametric tests have already been proposed for that purpose, statistical significance tests for non-standard measures (different from the classification error) are less often used in the literature. This paper is an attempt at empirically verifying how these tests compare with more classical tests, on various conditions. More precisely, using a very large dataset to estimate the whole ``population'', we analyzed the behavior of several statistical test, varying the class unbalance, the compared models, the performance measure, and the sample size. The main result is that providing big enough evaluation sets non-parametric tests are relatively reliable in all conditions.}, pdf = {https://publications.idiap.ch/attachments/papers/2005/keller-nips-2005.pdf}, postscript = {ftp://ftp.idiap.ch/pub/papers/2005/keller-nips-2005.ps.gz}, ipdmembership={learning}, } crossreferenced publications: @TECHREPORT{keller:rr05-38, author = {Keller, Mikaela and Bengio, Samy and Wong, Siew Yeung}, projects = {Idiap}, title = {Benchmarking Non-Parametric Statistical Tests}, type = {Idiap-RR}, number = {Idiap-RR-38-2005}, year = {2005}, institution = {IDIAP}, note = {To appear in Advances in Neural Information Processing Systems, NIPS 18. MIT Press, 2005.}, abstract = {Although non-parametric tests have already been proposed for that purpose, statistical significance tests for non-standard measures (different from the classification error) are less often used in the literature. This paper is an attempt at empirically verifying how these tests compare with more classical tests, on various conditions. More precisely, using a very large dataset to estimate the whole ``population'', we analyzed the behavior of several statistical test, varying the class unbalance, the compared models, the performance measure, and the sample size. The main result is that providing big enough evaluation sets non-parametric tests are relatively reliable in all conditions.}, pdf = {https://publications.idiap.ch/attachments/reports/2005/keller-idiap-rr-05-38.pdf}, postscript = {ftp://ftp.idiap.ch/pub/reports/2005/keller-idiap-rr-05-38.ps.gz}, ipdmembership={learning}, }