@inproceedings{a6f92b9736c1462097fe7375820e1433,
title = "Guiding search with generalized policies for probabilistic planning",
abstract = "We examine techniques for combining generalized policies with search algorithms to exploit the strengths and overcome the weaknesses of each when solving probabilistic planning problems. The Action Schema Network (ASNet) is a recent contribution to planning that uses deep learning and neural networks to learn generalized policies for probabilistic planning problems. ASNets are well suited to problems where local knowledge of the environment can be exploited to improve performance, but may fail to generalize to problems they were not trained on. Monte-Carlo Tree Search (MCTS) is a forward-chaining state space search algorithm for optimal decision making which performs simulations to incrementally build a search tree and estimate the values of each state. Although MCTS can achieve state-of-the-art results when paired with domain-specific knowledge, without this knowledge, MCTS requires a large number of simulations in order to obtain reliable state-value estimates. By combining AS-Nets with MCTS, we are able to improve the capability of an ASNet to generalize beyond the distribution of problems it was trained on, as well as enhance the navigation of the search space by MCTS.",
author = "William Shen and Felipe Trevizan and Sam Toyer and Sylvie Thi{\'e}baux and Lexing Xie",
note = "Publisher Copyright: Copyright {\textcopyright} 2019, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.; 12th International Symposium on Combinatorial Search, SoCS 2019 ; Conference date: 16-07-2019 Through 17-07-2019",
year = "2019",
language = "English",
series = "Proceedings of the 12th International Symposium on Combinatorial Search, SoCS 2019",
publisher = "AAAI Press",
pages = "97--105",
editor = "Pavel Surynek and William Yeoh",
booktitle = "Proceedings of the 12th International Symposium on Combinatorial Search, SoCS 2019",
}