@article{Marsetič_Šemrov_Žura_2014, title={Road Artery Traffic Light Optimization with Use of the Reinforcement Learning}, volume={26}, url={http://traffic.fpz.hr/index.php/PROMTT/article/view/1318}, DOI={10.7307/ptt.v26i2.1318}, abstractNote={<p>The basic principle of optimal traffic control is the appropriate real-time response to dynamic traffic flow changes. Signal plan efficiency depends on a large number of input parameters. An actuated signal system can adjust very well to traffic conditions, but cannot fully adjust to stochastic traffic volume oscillation. Due to the complexity of the problem analytical methods are not applicable for use in real time, therefore the purpose of this paper is to introduce heuristic method suitable for traffic light optimization in real time. With the evolution of artificial intelligence new possibilities for solving complex problems have been introduced. The goal of this paper is to demonstrate that the use of the Q learning algorithm for traffic lights optimization is suitable. The Q learning algorithm was verified on a road artery with three intersections. For estimation of the effectiveness and efficiency of the proposed algorithm comparison with an actuated signal plan was carried out. The results (average delay per vehicle and the number of vehicles that left road network) show that Q learning algorithm outperforms the actuated signal controllers. The proposed algorithm converges to the minimal delay per vehicle regardless of the stochastic nature of traffic. In this research the impact of the model parameters (learning rate, exploration rate, influence of communication between agents and reward type) on algorithm effectiveness were analysed as well.</p&gt;}, number={2}, journal={Promet - Traffic&Transportation}, author={Marsetič, Rok and Šemrov, Darja and Žura, Marijan}, year={2014}, month={Apr.}, pages={101-108} }