@inproceedings{discovery10083560, editor = {G Elidan and K Kersting}, booktitle = {Proceedings of 34th Conference on Uncertainty in Artificial Intelligence (uai 2018)}, pages = {642--652}, note = {This version is the version of record. For information on re-use, please refer to the publisher's terms and conditions.}, address = {Monterey, CA, USA}, volume = {34}, journal = {34th Conference on Uncertainty in Artificial Intelligence 2018, UAI 2018}, year = {2018}, title = {Meta reinforcement learning with latent variable Gaussian processes}, publisher = {Association for Uncertainty in Artificial Intelligence (AUAI)}, month = {August}, series = {Uncertainty in Artificial Intelligence (uai)}, abstract = {Learning from small data sets is critical in many practical applications where data collection is time consuming or expensive, e.g., robotics, animal experiments or drug design. Meta learning is one way to increase the data efficiency of learning algorithms by generalizing learned concepts from a set of training tasks to unseen, but related, tasks. Often, this relationship between tasks is hard coded or relies in some other way on human expertise. In this paper, we frame meta learning as a hierarchical latent variable model and infer the relationship between tasks automatically from data. We apply our framework in a modelbased reinforcement learning setting and show that our meta-learning model effectively generalizes to novel tasks by identifying how new tasks relate to prior ones from minimal data. This results in up to a 60\% reduction in the average interaction time needed to solve tasks compared to strong baselines.}, author = {S{\ae}mundsson, S and Hofmann, K and Deisenroth, MP}, url = {http://auai.org/uai2018/proceedings/papers/235.pdf} }