@article{discovery10194864, month = {January}, volume = {113}, publisher = {Springer Verlag}, pages = {73--96}, note = {This version is the author accepted manuscript. For information on re-use, please refer to the publisher's terms and conditions.}, number = {1}, journal = {Machine Learning}, year = {2024}, title = {Manas: multi-agent neural architecture search}, issn = {0885-6125}, author = {Lopes, Vasco and Carlucci, Fabio Maria and Esperanca, Pedro M and Singh, Marco and Yang, Antoine and Gabillon, Victor and Xu, Hang and Chen, Zewei and Wang, Jun}, abstract = {The Neural Architecture Search (NAS) problem is typically formulated as a graph search problem where the goal is to learn the optimal operations over edges in order to maximize a graph-level global objective. Due to the large architecture parameter space, efficiency is a key bottleneck preventing NAS from its practical use. In this work, we address the issue by framing NAS as a multi-agent problem where agents control a subset of the network and coordinate to reach optimal architectures. We provide two distinct lightweight implementations, with reduced memory requirements (1/8th of state-of-the-art), and performances above those of much more computationally expensive methods. Theoretically, we demonstrate vanishing regrets of the form O(T) , with T being the total number of rounds. Finally, we perform experiments on CIFAR-10 and ImageNet, and aware that random search and random sampling are (often ignored) effective baselines, we conducted additional experiments on 3 alternative datasets, with complexity constraints, and 2 network configurations, and achieve competitive results in comparison with the baselines and other methods.}, url = {http://dx.doi.org/10.1007/s10994-023-06379-w}, keywords = {AutoML, Computer Science, Computer Science, Artificial Intelligence, Computer vision, Multi arm bandits, Neural architecture search, Object recognition, Science \& Technology, Technology} }