@inproceedings{17, keywords = {constrained Markov decision process, constrained reinforcement learning, learning instability, uniform optimality}, author = {Jaeyoung Lee and Sean Sedwards and Krzysztof Czarnecki}, title = {Recursive Constraints to Prevent Instability in Constrained Reinforcement Learning}, abstract = {

We consider the challenge of finding a deterministic policy for a Markov decision process that uniformly (in all states) maximizes one reward subject to a probabilistic constraint over a different reward. Existing solutions do not fully address our precise problem definition, which nevertheless arises naturally in the context of safety-critical robotic systems. This class of problem is known to be hard, but the combined requirements of determinism and uniform optimality can create learning instability. In this work, after describing and motivating our problem with a simple example, we present a suitable constrained reinforcement learning algorithm that prevents learning instability, using recursive constraints. Our proposed approach admits an approximative form that improves efficiency and is conservative w.r.t. the constraint.

}, year = {2021}, journal = {Recursive Constraints to Prevent Instability in Constrained Reinforcement Learning}, address = {Online at http://modem2021.cs.nuigalway.ie/}, url = {https://arxiv.org/abs/2201.07958}, }