<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Subramanian, Sriram Ganapathi</style></author><author><style face="normal" font="default" size="100%">Sambee, Jaspreet Singh</style></author><author><style face="normal" font="default" size="100%">Ghojogh, Benyamin</style></author><author><style face="normal" font="default" size="100%">Crowley, Mark</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Decision Assist For Self-Driving Cars</style></title><secondary-title><style face="normal" font="default" size="100%">31st Canadian Conference on Artificial Intelligence, Candian AI 2018</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">autonomous cars</style></keyword><keyword><style  face="normal" font="default" size="100%">forcement learning</style></keyword><keyword><style  face="normal" font="default" size="100%">machine learning</style></keyword><keyword><style  face="normal" font="default" size="100%">obstacle avoidance</style></keyword><keyword><style  face="normal" font="default" size="100%">path planning</style></keyword><keyword><style  face="normal" font="default" size="100%">rein-</style></keyword><keyword><style  face="normal" font="default" size="100%">weather estimation</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-319-89656-4_44</style></url></web-urls></urls><edition><style face="normal" font="default" size="100%">Lecture Notes in Artificial Intelligence</style></edition><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Toronto, Ontario, Canada</style></pub-location><volume><style face="normal" font="default" size="100%">10832</style></volume><pages><style face="normal" font="default" size="100%">381-387</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Research into self-driving cars has grown enormously in the last decade primarily due to the advances in the fields of machine intelligence and image processing. An under-appreciated aspect of self-driving cars is actively avoiding high traffic zones, low visibility zones, and routes with rough weather conditions by learning different conditions and making decisions based on trained experiences.&amp;nbsp;&lt;br&gt;This paper addresses this challenge by introducing a novel hierarchical structure for dynamic path planning and experiential learning for vehicles.&amp;nbsp;&lt;br&gt;A multistage system is proposed for detecting and compensating for weather, lighting, and traffic conditions as well as a novel adaptive path planning algorithm named \textbf{Checked State A3C}. This algorithm improves upon the existing A3C Reinforcement Learning (RL) algorithm by adding state memory which provides the ability to learn an adaptive model of the best decisions to take from experience.</style></abstract></record></records></xml>