%Aigaion2 BibTeX export from Idiap Publications %Thursday 21 November 2024 04:56:53 PM @INPROCEEDINGS{grandvalet:NIPS:2007, author = {Szafranski, Marie and Grandvalet, Yves and Morizet-Mahoudeaux, Pierre}, projects = {Idiap}, title = {Hierarchical Penalization}, booktitle = {Advances in Neural Information {P}rocessing {S}ystems 21}, year = {2007}, note = {IDIAP-RR 07-76}, crossref = {grandvalet:rr07-76}, abstract = {Hierarchical penalization is a generic framework for incorporating prior information in the fitting of statistical models, when the explicative variables are organized in a hierarchical structure. The penalizer is a convex functional that performs soft selection at the group level, and shrinks variables within each group. This favors solutions with few leading terms in the final combination. The framework, originally derived for taking prior knowledge into account, is shown to be useful in linear regression, when several parameters are used to model the influence of one feature, or in kernel regression, for learning multiple kernels.}, pdf = {https://publications.idiap.ch/attachments/papers/2007/grandvalet-NIPS-2007.pdf}, postscript = {ftp://ftp.idiap.ch/pub/papers/2007/grandvalet-NIPS-2007.ps.gz}, ipdmembership={learning}, } crossreferenced publications: @TECHREPORT{grandvalet:rr07-76, author = {Szafranski, Marie and Grandvalet, Yves and Morizet-Mahoudeaux, Pierre}, projects = {Idiap}, title = {Hierarchical Penalization}, type = {Idiap-RR}, number = {Idiap-RR-76-2007}, year = {2007}, institution = {IDIAP}, note = {To appear in Advances in Neural Information Processing Systems 21 (NIPS 2007)}, abstract = {Hierarchical penalization is a generic framework for incorporating prior information in the fitting of statistical models, when the explicative variables are organized in a hierarchical structure. The penalizer is a convex functional that performs soft selection at the group level, and shrinks variables within each group. This favors solutions with few leading terms in the final combination. The framework, originally derived for taking prior knowledge into account, is shown to be useful in linear regression, when several parameters are used to model the influence of one feature, or in kernel regression, for learning multiple kernels.}, pdf = {https://publications.idiap.ch/attachments/reports/2007/grandvalet-idiap-rr-07-76.pdf}, postscript = {ftp://ftp.idiap.ch/pub/reports/2007/grandvalet-idiap-rr-07-76.ps.gz}, ipdmembership={learning}, }