%Aigaion2 BibTeX export from Idiap Publications
%Sunday 22 December 2024 02:09:24 AM

@INPROCEEDINGS{Orabona_ICML_2011,
         author = {Orabona, Francesco and Luo, Jie},
       projects = {Idiap},
          month = jun,
          title = {Ultra-Fast Optimization Algorithm for Sparse Multi Kernel Learning},
      booktitle = {Proceedings of the 28th International Conference on Machine Learning},
           year = {2011},
       crossref = {Orabona_Idiap-RR-11-2011},
       abstract = {Many state-of-the-art approaches for Multi Kernel Learning (MKL) struggle at finding a compromise between performance, sparsity of the solution and speed of the optimization process. In this paper we look at the MKL problem at the same time from a learning and optimization point of view. So, instead of designing a regularizer and then struggling to find an efficient method to minimize it, we design the regularizer while keeping the optimization algorithm in mind. Hence, we introduce a novel MKL formulation, which mixes elements of p-norm and elastic-net kind of regularization. We also propose a fast stochastic gradient descent method that solves the novel MKL formulation. We show theoretically and empirically that our method has 1) state-of-the-art performance on many classification tasks; 2) ex- act sparse solutions with a tunable level of sparsity; 3) a convergence rate bound that depends only logarithmically on the num- ber of kernels used, and is independent of the sparsity required; 4) independence on the particular convex loss function used.},
            pdf = {https://publications.idiap.ch/attachments/papers/2011/Orabona_ICML_2011.pdf}
}



crossreferenced publications: 
@TECHREPORT{Orabona_Idiap-RR-11-2011,
         author = {Orabona, Francesco and Luo, Jie},
       projects = {Idiap},
          month = {5},
          title = {Ultra-Fast Optimization Algorithm for Sparse Multi Kernel Learning},
           type = {Idiap-RR},
         number = {Idiap-RR-11-2011},
           year = {2011},
    institution = {Idiap},
       abstract = {Many state-of-the-art approaches for Multi Kernel Learning (MKL) struggle at finding a compromise between performance, sparsity of the solution and speed of the optimization process. In this paper we look at the MKL problem at the same time from a learning and optimization point of view. So, instead of designing a regularizer and then struggling to find an efficient method to minimize it, we design the regularizer while keeping the optimization algorithm in mind. Hence, we introduce a novel MKL formulation, which mixes elements of p-norm and elastic-net kind of regularization. We also propose a fast stochastic gradient descent method that solves the novel MKL formulation. We show theoretically and empirically that our method has 1) state-of-the-art performance on many classification tasks; 2) ex- act sparse solutions with a tunable level of sparsity; 3) a convergence rate bound that depends only logarithmically on the num- ber of kernels used, and is independent of the sparsity required; 4) independence on the particular convex loss function used.},
            pdf = {https://publications.idiap.ch/attachments/reports/2011/Orabona_Idiap-RR-11-2011.pdf}
}