%Aigaion2 BibTeX export from Idiap Publications
%Tuesday 30 April 2024 05:21:18 AM

@INPROCEEDINGS{stephenson02a,
         author = {Stephenson, Todd Andrew and Magimai.-Doss, Mathew and Bourlard, Herv{\'{e}}},
       projects = {Idiap},
          month = {8},
          title = {Mixed {B}ayesian Networks with Auxiliary Variables for Automatic Speech Recognition},
      booktitle = {International Conference on Pattern Recognition (ICPR~2002)},
         volume = {4},
           year = {2002},
        address = {Quebec City, PQ, Canada},
       crossref = {stephenson01c},
       abstract = {Standard hidden Markov models (HMMs,',','),
 as used in automatic speech recognition (ASR,',','),
 calculate their emission probabilities by an artificial neural network (ANN) or a Gaussian distribution conditioned on the hidden state variable, considering the emissions independent of any other variable in the model. Recent work showed the benefit of conditioning the emission distributions on a discrete auxiliary variable, which is observed in training and hidden in recognition. Related work has shown the utility of conditioning the emission distributions on a continuous auxiliary variable. We apply mixed Bayesian networks (BNs) to extend these works by introducing a continuous auxiliary variable that is observed in training but is hidden in recognition. We find that an auxiliary pitch variable conditioned itself upon the hidden state can degrade performance unless the auxiliary variable is also hidden. The performance, furthermore, can be improved by making the auxiliary pitch variable independent of the hidden state.},
            pdf = {https://publications.idiap.ch/attachments/papers/2002/todd-icpr2002.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2002/todd-icpr2002.ps.gz},
ipdmembership={speech},
}



crossreferenced publications: 
@TECHREPORT{stephenson01c,
         author = {Stephenson, Todd Andrew and Magimai.-Doss, Mathew and Bourlard, Herv{\'{e}}},
       projects = {Idiap},
          title = {Mixed {B}ayesian Networks with Auxiliary Variables for Automatic Speech Recognition},
           type = {Idiap-RR},
         number = {Idiap-RR-45-2001},
           year = {2001},
    institution = {IDIAP},
           note = {In ``International Conference on Pattern Recognition (ICPR~2002)'', 2002},
       abstract = {Standard hidden Markov models (HMMs,',','),
 as used in automatic speech recognition (ASR,',','),
 calculate their emission probabilities by an artificial neural network (ANN) or a Gaussian distribution conditioned on the hidden state variable, considering the emissions independent of any other variable in the model. Recent work showed the benefit of conditioning the emission distributions on a discrete auxiliary variable, which is observed in training and hidden in recognition. Related work has shown the utility of conditioning the emission distributions on a continuous auxiliary variable. We apply mixed Bayesian networks (BNs) to extend these works by introducing a continuous auxiliary variable that is observed in training but is hidden in recognition. We find that an auxiliary pitch variable conditioned itself upon the hidden state can degrade performance unless the auxiliary variable is also hidden. The performance, furthermore, can be improved by making the auxiliary pitch variable independent of the hidden state.},
            pdf = {https://publications.idiap.ch/attachments/reports/2001/rr01-45.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2001/rr01-45.ps.gz},
ipdmembership={speech},
}