@article { , title = {Accelerating neural network architecture search using multi-GPU high-performance computing}, abstract = {Neural networks stand out from artificial intelligence because they can complete challenging tasks, such as image classification. However, designing a neural network for a particular problem requires experience and tedious trial and error. Automating this process defines a research field usually relying on population-based meta-heuristics. This kind of optimizer generally needs numerous function evaluations, which are computationally demanding in this context as they involve building, training, and evaluating different neural networks. Fortunately, these algorithms are also well suited for parallel computing. This work describes how the teaching–learning-based optimization algorithm has been adapted for designing neural networks exploiting a multi-GPU high-performance computing environment. The optimizer, not applied before for this purpose up to the authors’ knowledge, has been selected because it lacks specific parameters and is compatible with large-scale optimization. Thus, its configuration does not result in another problem and could design architectures with many layers. The parallelization scheme is decoupled from the optimizer. It can be seen as an external evaluation service managing multiple GPUs for promising neural network designs, even at different machines, and multiple CPU’s for low-performing solutions. This strategy has been tested in designing a neural network for image classification based on the CIFAR-10 dataset. The architectures found outperform human designs, and the sequential process is accelerated 4.2 times with 4 GPUs and 96 cores thanks to parallelization, being the ideal speed up 4.39 in this case.}, doi = {10.1007/s11227-022-04960-z}, issn = {0920-8542}, journal = {Journal of Supercomputing}, pages = {7609-7625}, publicationstatus = {Published}, publisher = {Springer}, url = {http://researchrepository.napier.ac.uk/Output/2963070}, volume = {79}, keyword = {Centre for Artificial Intelligence and Robotic, AI and Technologies, Artificial neural networks, Neural network design, HPC, TLBO, Multi-GPU}, year = {2023}, author = {Lupión, Marcos and Cruz, N. C. and Sanjuan, Juan F. and Paechter, Ben and Ortigosa, Pilar M.} }