@inproceedings{GaierAsterothMouret2020, author = {Adam Gaier and Alexander Asteroth and Jean-Baptiste Mouret}, title = {Discovering Representations for Black-box Optimization}, series = {GECCO '20: Proceedings of the 2020 Genetic and Evolutionary Computation Conference, July 8–12, 2020, Canc{\´u}n, Mexico}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-7128-5}, doi = {10.1145/3377930.3390221}, pages = {103 -- 111}, year = {2020}, abstract = {The encoding of solutions in black-box optimization is a delicate, handcrafted balance between expressiveness and domain knowledge between exploring a wide variety of solutions, and ensuring that those solutions are useful. Our main insight is that this process can be automated by generating a dataset of high-performing solutions with a quality diversity algorithm (here, MAP-Elites), then learning a representation with a generative model (here, a Varia-tional Autoencoder) from that dataset. Our second insight is that this representation can be used to scale quality diversity optimization to higher dimensions-but only if we carefully mix solutions generated with the learned representation and those generated with traditional variation operators. We demonstrate these capabilities by learning an low-dimensional encoding for the inverse kinemat-ics of a thousand joint planar arm. The results show that learned representations make it possible to solve high-dimensional problems with orders of magnitude fewer evaluations than the standard MAP-Elites, and that, once solved, the produced encoding can be used for rapid optimization of novel, but similar, tasks. The presented techniques not only scale up quality diversity algorithms to high dimensions, but show that black-box optimization encodings can be automatically learned, rather than hand designed.}, language = {en} }