@proceedings{14, keywords = {behavioural fidelity, decision tree, distillation, DQN, imitation learning, Reinforcement learning, verification}, author = {Vahdat Abdelzad and Jaeyoung Lee and Sean Sedwards and Soheil Soltani and Krzysztof Czarnecki}, title = {Non-divergent Imitation for Verification of Complex Learned Controllers}, abstract = {
We consider the problem of verifying complex learned controllers using distillation. In contrast to previous work, we require that the distilled model maintains behavioural fidelity with an oracle, defining the notion of non-divergent path length (NPL) as a metric. We demonstrate that current distillation approaches with proven accuracy bounds do not have high expected NPL and can be out-performed by naive behavioural cloning. We thus propose a distillation algorithm that typically gives greater expected NPL, improved sample efficiency, and more compact models. We prove properties of NPL maximization and demonstrate the performance of our algorithm on deep Q-network controllers for three standard learning environments that have been used in this context: Pong, CartPole and MountainCar.
}, year = {2021}, publisher = {IEEE}, address = {Shenzhen, China (virtual)}, doi = {10.1109/IJCNN52387.2021.9533410}, }