%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 12:32:09 PM

@INPROCEEDINGS{dimitrakakis:icann:2006,
         author = {Dimitrakakis, Christos},
       projects = {Idiap},
          title = {Nearly optimal exploration-exploitation decision thresholds},
      booktitle = {Int. Conf. on Artificial Neural Networks ({ICANN})},
           year = {2006},
           note = {IDIAP-RR 06-12},
       crossref = {dimitrakakis:rr06-12},
       abstract = {While in general trading off exploration and exploitation in reinforcement learning is hard, under some formulations relatively simple solutions exist. Optimal decision thresholds for the multi-armed bandit problem, one for the infinite horizon discounted reward case and one for the finite horizon undiscounted reward case are derived, which make the link between the reward horizon, uncertainty and the need for exploration explicit. From this result follow two practical approximate algorithms, which are illustrated experimentally.},
            pdf = {https://publications.idiap.ch/attachments/papers/2006/dimitrakakis-icann-2006.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/papers/2006/dimitrakakis-icann-2006.ps.gz},
ipdmembership={learning},
}



crossreferenced publications: 
@TECHREPORT{dimitrakakis:rr06-12,
         author = {Dimitrakakis, Christos},
       projects = {Idiap},
          title = {Nearly optimal exploration-exploitation decision thresholds},
           type = {Idiap-RR},
         number = {Idiap-RR-12-2006},
           year = {2006},
    institution = {IDIAP},
           note = {To appear in ICANN 2006},
       abstract = {While in general trading off exploration and exploitation in reinforcement learning is hard, under some formulations relatively simple solutions exist. Optimal decision thresholds for the multi-armed bandit problem, one for the infinite horizon discounted reward case and one for the finite horizon undiscounted reward case are derived, which make the link between the reward horizon, uncertainty and the need for exploration explicit. From this result follow two practical approximate algorithms, which are illustrated experimentally.},
            pdf = {https://publications.idiap.ch/attachments/reports/2006/dimitrakakis-idiap-rr-06-12.pdf},
     postscript = {ftp://ftp.idiap.ch/pub/reports/2006/dimitrakakis-idiap-rr-06-12.ps.gz},
ipdmembership={learning},
}