@proceedings{16, keywords = {Atari games, deep Q-learning, DQN, forgetting, prioritized experience replay, Reinforcement learning, stability}, author = {Sanghwa Lee and Jaeyoung Lee and Ichiro Hasuo}, title = {Predictive PER: Balancing Priority and Diversity Towards Stable Deep Reinforcement Learning}, abstract = {
Prioritized experience replay (PER) samples important transitions, rather than uniformly, to improve data efficiency of a deep reinforcement learning agent. We claim that such prioritization must be balanced with sample diversity to make the deep Q-network (DQN) stabilized and prevent severe forgetting. Our proposed improvement over PER, called Predictive PER (PPER), takes three countermeasures (TDInit, TDClip, TDPred) for (i) eliminating priority outliers and explosions; (ii) improving the diversity of samples and their distributions, weighted by priorities. Both contribute to stabilizing the learning process, thus forgetting less. The most notable among the three is TDPred, the second DNN introduced for generalizing in-distribution priorities. Ablation and experimental studies with Atari games show that each countermeasure, in its own way, and PPER successfully contribute to enhancing stability hence performance, over PER.
}, year = {2021}, publisher = {IEEE}, address = {Shenzhen, China (virtual)}, doi = {10.1109/IJCNN52387.2021.9534243}, }