@inproceedings{32, author = {Sanghwa Lee and Jaeyoung Lee and Ichiro Hasuo}, title = {Predictive PER: Balancing Priority and Diversity towards Stable Deep Reinforcement Learning}, abstract = {
Prioritized experience replay (PER) samples important transitions, rather than uniformly, to improve the performance of a deep reinforcement learning agent. We\ claim that such prioritization has to be balanced with sample diversity for making\ the DQN stabilized and preventing forgetting. Our proposed improvement over\ PER, called Predictive PER (PPER), takes three\ countermeasures (TDInit, TD-Clip, TDPred) to (i) eliminate priority outliers and explosions and (ii) improve\ the sample diversity and\ distributions, weighted by priorities, both leading to stabilizing the DQN. The most notable among the three is the introduction of the\ second DNN called TDPred to generalize the in-distribution priorities. Ablation\ study and full experiments with Atari games show that each countermeasure by\ its own way and PPER contribute to successfully enhancing stability and thus\ performance over PER.
}, year = {2020}, journal = {Predictive PER: Balancing Priority and Diversity towards Stable Deep Reinforcement Learning}, url = {https://sites.google.com/view/deep-rl-workshop-neurips2020/home}, }