%Aigaion2 BibTeX export from Idiap Publications %Thursday 21 November 2024 04:51:06 PM @INPROCEEDINGS{grandvalet:ICML-1:2007, author = {Rakotomamonjy, Alain and Bach, Francis and Canu, St{\'{e}}phane and Grandvalet, Yves}, projects = {Idiap}, title = {More Efficiency in Multiple Kernel Learning}, booktitle = {International Conference on Machine Learning ({ICML})}, year = {2007}, note = {IDIAP-RR 07-18}, crossref = {grandvalet:rr07-18}, abstract = {An efficient and general multiple kernel learning (MKL) algorithm has been recently proposed by \singleemcite{sonnenburg_mkljmlr}. This approach has opened new perspectives since it makes the MKL approach tractable for large-scale problems, by iteratively using existing support vector machine code. However, it turns out that this iterative algorithm needs several iterations before converging towards a reasonable solution. In this paper, we address the MKL problem through an adaptive 2-norm regularization formulation. Weights on each kernel matrix are included in the standard SVM empirical risk minimization problem with a $\ell_1$ constraint to encourage sparsity. We propose an algorithm for solving this problem and provide an new insight on MKL algorithms based on block 1-norm regularization by showing that the two approaches are equivalent. Experimental results show that the resulting algorithm converges rapidly and its efficiency compares favorably to other MKL algorithms.}, pdf = {https://publications.idiap.ch/attachments/papers/2007/grandvalet-ICML-1-2007.pdf}, postscript = {ftp://ftp.idiap.ch/pub/papers/2007/grandvalet-ICML-1-2007.ps.gz}, ipdmembership={learning}, } crossreferenced publications: @TECHREPORT{grandvalet:rr07-18, author = {Rakotomamonjy, Alain and Bach, Francis and Canu, St{\'{e}}phane and Grandvalet, Yves}, projects = {Idiap}, title = {More Efficiency in Multiple Kernel Learning}, type = {Idiap-RR}, number = {Idiap-RR-18-2007}, year = {2007}, institution = {IDIAP}, note = {To appear in \textit{Proceedings of the $\mathit{24}^{th}$ International Conference on Machine Learning}, Corvallis, OR, 2007}, abstract = {An efficient and general multiple kernel learning (MKL) algorithm has been recently proposed by \singleemcite{sonnenburg_mkljmlr}. This approach has opened new perspectives since it makes the MKL approach tractable for large-scale problems, by iteratively using existing support vector machine code. However, it turns out that this iterative algorithm needs several iterations before converging towards a reasonable solution. In this paper, we address the MKL problem through an adaptive 2-norm regularization formulation. Weights on each kernel matrix are included in the standard SVM empirical risk minimization problem with a $\ell_1$ constraint to encourage sparsity. We propose an algorithm for solving this problem and provide an new insight on MKL algorithms based on block 1-norm regularization by showing that the two approaches are equivalent. Experimental results show that the resulting algorithm converges rapidly and its efficiency compares favorably to other MKL algorithms.}, pdf = {https://publications.idiap.ch/attachments/reports/2007/grandvalet-idiap-rr-07-18.pdf}, postscript = {ftp://ftp.idiap.ch/pub/reports/2007/grandvalet-idiap-rr-07-18.ps.gz}, ipdmembership={learning}, }