%Aigaion2 BibTeX export from Idiap Publications
%Saturday 27 July 2024 02:00:05 PM

@INPROCEEDINGS{Moerland-97.3,
         author = {Moerland, Perry},
         editor = {Gerstner, W. and Germond, A. and Hasler, M. and Nicoud, J. -D.},
       projects = {Idiap},
          title = {Mixtures of Experts Estimate A Posteriori Probabilities},
      booktitle = {Proceedings of the International Conference on Artificial Neural Networks (ICANN'97)},
         series = {Lecture Notes in Computer Science},
         number = {1327},
           year = {1997},
      publisher = {Springer-Verlag},
        address = {Berlin},
           note = {(IDIAP-RR 97-07)},
       crossref = {moerland-97.5},
       abstract = {The mixtures of experts (ME) model offers a modular structure suitable for a divide-and-conquer approach to pattern recognition. It has a probabilistic interpretation in terms of a mixture model, which forms the basis for the error function associated with MEs. In this paper, it is shown that for classification problems the minimization of this ME error function leads to ME outputs estimating the a posteriori probabilities of class membership of the input vector.},
            pdf = {https://publications.idiap.ch/attachments/papers/1997/moerland-me-aposteriori.pdf},
ipdmembership={learning},
}



crossreferenced publications: 
@TECHREPORT{Moerland-97.5,
         author = {Moerland, Perry},
       projects = {Idiap},
          title = {Mixtures of Experts Estimate A Posteriori Probabilities},
           type = {Idiap-RR},
         number = {Idiap-RR-07-1997},
           year = {1997},
    institution = {IDIAP},
           note = {Published in ``Proceedings of the International Conference on Artificial Neural Networks (ICANN'97)''},
       abstract = {The mixtures of experts (ME) model offers a modular structure suitable for a divide-and-conquer approach to pattern recognition. It has a probabilistic interpretation in terms of a mixture model, which forms the basis for the error function associated with MEs. In this paper, it is shown that for classification problems the minimization of this ME error function leads to ME outputs estimating the a posteriori probabilities of class membership of the input vector.},
            pdf = {https://publications.idiap.ch/attachments/reports/1997/rr97-07.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/1997/rr97-07.ps.gz},
ipdmembership={learning},
}