%Aigaion2 BibTeX export from Idiap Publications
%Sunday 22 December 2024 02:09:42 AM

@INPROCEEDINGS{Kuzborskij_ICIAP_2015,
         author = {Kuzborskij, Ilja and Orabona, Francesco and Caputo, Barbara},
       projects = {Idiap},
          month = sep,
          title = {Transfer Learning through Greedy Subset Selection},
      booktitle = {Image Analysis and Processing - ICIAP 2015},
         series = {Lecture Notes in Computer Science},
         volume = {9279},
           year = {2015},
          pages = {3-14},
      publisher = {Springer International Publishing},
       location = {Genoa, Italy},
           isbn = {978-3-319-23231-7; 978-3-319-232},
            doi = {10.1007/978-3-319-23231-7_1},
       crossref = {Kuzborskij_Idiap-RR-26-2015},
            pdf = {https://publications.idiap.ch/attachments/papers/2015/Kuzborskij_ICIAP_2015.pdf}
}



crossreferenced publications: 
@TECHREPORT{Kuzborskij_Idiap-RR-26-2015,
         author = {Kuzborskij, Ilja and Orabona, Francesco and Caputo, Barbara},
       projects = {Idiap, NINAPRO},
          month = {7},
          title = {Transfer Learning through Greedy Subset Selection},
           type = {Idiap-RR},
         number = {Idiap-RR-26-2015},
           year = {2015},
    institution = {Idiap},
       abstract = {In this work we study the binary transfer learning problem involving 10^2 -10^3 sources.
We focus on how to select sources from the large pool and how to combine them to yield a good performance on a target task.
In particular, we consider the transfer learning setting where one does not have direct
access to the source data, but rather employs the source hypotheses trained from them.
Building on results on greedy algorithms, we propose an efficient algorithm that selects relevant source hypotheses
and feature dimensions simultaneously.
On three computer vision datasets we achieve state-of-the-art results,
substantially outperforming both popular feature selection and transfer learning baselines when transferring in a small-sample setting.
Our experiments involve up to 1000 classes, totalling 1.2 million examples, with only 11 to 20 training examples from the target domain.
We corroborate our findings showing theoretically that, under reasonable assumptions on the source hypotheses, our algorithm can
learn effectively from few examples.},
            pdf = {https://publications.idiap.ch/attachments/reports/2014/Kuzborskij_Idiap-RR-26-2015.pdf}
}