%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 12:33:38 PM

@INPROCEEDINGS{Grandvalet_NIPS_2008,
         author = {Grandvalet, Yves and Rakotomamonjy, Alain and Keshet, Joseph and Canu, St{\'{e}}phane},
       projects = {DIRAC, Idiap},
          title = {Support Vector Machines with a Reject Option},
      booktitle = {Proceedings of the 22nd Annual Conference on Neural Information Processing Systems},
           year = {2008},
       crossref = {Grandvalet_Idiap-RR-01-2009},
       abstract = {We consider the problem of binary classification where the classifier may abstain 
instead of classifying each observation. The Bayes decision rule for this setup, 
known as Chow{\^{a}}€™s rule, is de{\"{\i}}¬ned by two thresholds on posterior probabilities. 
From simple desiderata, namely the consistency and the sparsity of the classifier, 
we derive the double hinge loss function that focuses on estimating conditional 
probabilities only in the vicinity of the threshold points of the optimal decision 
rule. We show that, for suitable kernel machines, our approach is universally 
consistent. We cast the problem of minimizing the double hinge loss as a quadratic 
program akin to the standard SVM optimization problem and propose an active set 
method to solve it efficiently. We finally provide preliminary experimental results 
illustrating the interest of our constructive approach to devising loss functions.},
            pdf = {https://publications.idiap.ch/attachments/papers/2009/Grandvalet_NIPS_2008.pdf}
}



crossreferenced publications: 
@TECHREPORT{Grandvalet_Idiap-RR-01-2009,
         author = {Grandvalet, Yves and Keshet, Joseph and Rakotomamonjy, Alain and Canu, St{\'{e}}phane},
       projects = {Idiap, DIRAC},
          month = {1},
          title = {Support Vector Machines with a Reject Option},
           type = {Idiap-RR},
         number = {Idiap-RR-01-2009},
           year = {2009},
    institution = {Idiap},
       abstract = {We consider the problem of binary classification where the classfier may abstain 
instead of classifying each observation. The Bayes decision rule for this setup, 
known as Chow{\^{a}}€™s rule, is de{\"{\i}}¬ned by two thresholds on posterior probabilities. 
From simple desiderata, namely the consistency and the sparsity of the classi{\"{\i}}¬er, 
we derive the double hinge loss function that focuses on estimating conditional 
probabilities only in the vicinity of the threshold points of the optimal decision 
rule. We show that, for suitable kernel machines, our approach is universally 
consistent. We cast the problem of minimizing the double hinge loss as a quadratic 
program akin to the standard SVM optimization problem and propose an active set 
method to solve it ef{\"{\i}}¬ciently. We {\"{\i}}¬nally provide preliminary experimental results 
We consider the problem of binary classi{\"{\i}}¬cation where the classi{\"{\i}}¬er may abstain 
instead of classifying each observation. The Bayes decision rule for this setup, 
known as Chow{\^{a}}€™s rule, is de{\"{\i}}¬ned by two thresholds on posterior probabilities. 
From simple desiderata, namely the consistency and the sparsity of the classi{\"{\i}}¬er, 
we derive the double hinge loss function that focuses on estimating conditional 
probabilities only in the vicinity of the threshold points of the optimal decision 
rule. We show that, for suitable kernel machines, our approach is universally 
consistent. We cast the problem of minimizing the double hinge loss as a quadratic 
program akin to the standard SVM optimization problem and propose an active set 
method to solve it ef{\"{\i}}¬ciently. We {\"{\i}}¬nally provide preliminary experimental results 
illustrating the interest of our constructive approach to devising loss functions.},
            pdf = {https://publications.idiap.ch/attachments/reports/2008/Grandvalet_Idiap-RR-01-2009.pdf}
}