%Aigaion2 BibTeX export from Idiap Publications
%Friday 05 December 2025 03:53:47 AM
@ARTICLE{Fleuret_PRL_2008,
author = {Fleuret, Francois},
projects = {Idiap, IM2},
title = {Multi-layer Boosting for Pattern Recognition},
journal = {Pattern Recognition Letter},
volume = {30},
year = {2009},
crossref = {Fleuret_Idiap-RR-76-2008},
abstract = {We extend the standard boosting procedure to train a two-layer classifier dedicated to handwritten char-
acter recognition. The scheme we propose relies on a hidden layer which extracts feature vectors on a
fixed number of points of interest, and an output layer which combines those feature vectors and the
point of interest locations into a final classification decision.
Our main contribution is to show that the classical AdaBoost procedure can be extended to train such a
multi-layered structure by propagating the error through the output layer. Such an extension allows for
the selection of optimal weak learners by minimizing a weighted error, in both the output layer and the
hidden layer. We provide experimental results on the MNIST database and compare to a classical unsu-
pervised EM-based feature extraction.}
}
crossreferenced publications:
@TECHREPORT{Fleuret_Idiap-RR-76-2008,
author = {Fleuret, Francois},
projects = {Idiap, IM2},
month = {12},
title = {Multi-layer Boosting for Pattern Recognition},
type = {Idiap-RR},
number = {Idiap-RR-76-2008},
year = {2008},
institution = {Idiap},
abstract = {We extend the standard boosting procedure to train
a two-layer classifier dedicated to handwritten
character recognition. The scheme we propose
relies on a hidden layer which extracts feature
vectors on a fixed number of points of interest,
and an output layer which combines those feature
vectors and the point of interest locations into a
final classification decision.
Our main contribution is to show that the
classical AdaBoost procedure can be extended to
train such a multi-layered structure by
propagating the error through the output
layer. Such an extension allows for the selection
of optimal weak learners by minimizing a weighted
error, in both the output layer and the hidden
layer. We provide experimental results on the
MNIST database and compare to a classical
unsupervised EM-based feature extraction.},
pdf = {https://publications.idiap.ch/attachments/reports/2008/Fleuret_Idiap-RR-76-2008.pdf}
}