%Aigaion2 BibTeX export from Idiap Publications
%Friday 22 November 2024 03:38:22 PM

@INPROCEEDINGS{Vazquez-Canteli_BUILDSYS19_2019,
         author = {V{\'{a}}zquez-Canteli, Jos{\'{e}} and K{\"{a}}mpf, J{\'{e}}r{\^{o}}me and Henze, Gregor and Nagy, Zolt{\'{a}}n},
       projects = {Idiap},
          month = nov,
          title = {CityLearn v1.0: An OpenAI Gym Environment for Demand Response with Deep Reinforcement Learning},
      booktitle = {Proceedings of the 6th ACM International Conference on Systems for Energy-Efficient Buildings, Cities, and Transportation},
           year = {2019},
          pages = {356-357},
      publisher = {ACM},
       location = {New-York, USA},
            doi = {10.1145/3360322.3360998},
       abstract = {Demand response has the potential of reducing peaks of electricity demand by about 20\% in the US, where buildings represent roughly 70\% of the total electricity demand. Buildings are dynamic systems in constant change (i.e. occupants' behavior, refurbishment measures), which are costly to model and difficult to coordinate with other urban energy systems. Reinforcement learning is an adaptive control algorithm that can control these urban energy systems relying on historical and real-time data instead of models. Plenty of research has been conducted in the use of reinforcement learning for demand response applications in the last few years. However, most experiments are difficult to replicate, and the lack of standardization makes the performance of different algorithms difficult, if not impossible, to compare. In this demo, we introduce a new framework, CityLearn, based on the OpenAI Gym Environment, which will allow researchers to implement, share, replicate, and compare their implementations of reinforcement learning for demand response applications more easily. The framework is open source and modular, which allows researchers to modify and customize it, e.g., by adding additional storage, generation, or energy-consuming systems.}
}