%Aigaion2 BibTeX export from Idiap Publications
%Sunday 28 April 2024 08:43:45 PM

@INPROCEEDINGS{Schnell_MLSLP-18_2018,
         author = {Schnell, Bastian and Garner, Philip N.},
       projects = {Idiap, MASS},
          title = {A Neural Model to Predict Parameters for a Generalized Command Response Model of Intonation},
      booktitle = {MLSLP-18 Proceedings},
           year = {2018},
       location = {Hyderabad},
           note = {Satelite workshop of Interspeech '18},
            url = {https://sites.google.com/view/mlslp/home},
       crossref = {Schnell_INTERSPEECH2018_2018},
       abstract = {This abstract summarizes our paper accepted in the main
conference with the same title.},
            pdf = {https://publications.idiap.ch/attachments/papers/2018/Schnell_MLSLP-18_2018.pdf}
}



crossreferenced publications: 
@INPROCEEDINGS{Schnell_INTERSPEECH2018_2018,
         author = {Schnell, Bastian and Garner, Philip N.},
       projects = {Idiap, MASS},
          month = sep,
          title = {A Neural Model to Predict Parameters for a Generalized Command Response Model of Intonation},
      booktitle = {Proc. Interspeech 2018},
           year = {2018},
          pages = {3147-3151},
            doi = {/10.21437/interspeech.2018-1904},
       abstract = {The Generalised Command Response (GCR) model is a time-local model of intonation that has been shown to lend itself to (cross-language) transfer of emphasis. In order to generalise the model to longer prosodic sequences, we show that it can be driven by a recurrent neural network emulating a spiking neural network. We show that a loss function for error backpropagation can be formulated analogously to that of the Spike Pattern Association Neuron (SPAN) method for spiking networks. The resulting system is able to generate prosody comparable to a state-of-the-art deep neural network implementation, but potentially retaining the transfer capabilities of the GCR model.},
            pdf = {https://publications.idiap.ch/attachments/papers/2018/Schnell_INTERSPEECH2018_2018.pdf}
}