@article{discovery10188620, month = {March}, year = {2024}, title = {Hardware implementation of memristor-based artificial neural networks}, publisher = {Springer Science and Business Media LLC}, volume = {15}, note = {This article is licensed under a Creative Commons Attribution 4.0 International License, which permits use, sharing, adaptation, distribution and reproduction in any medium or format, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons licence, and indicate if changes were made. The images or other third party material in this article are included in the article's Creative Commons licence, unless indicated otherwise in a credit line to the material. If material is not included in the article's Creative Commons licence and your intended use is not permitted by statutory regulation or exceeds the permitted use, you will need to obtain permission directly from the copyright holder. To view a copy of this licence, visit http://creativecommons.org/licenses/by/4.0/.}, journal = {Nature Communications}, url = {https://doi.org/10.1038/s41467-024-45670-9}, author = {Aguirre, Fernando and Sebastian, Abu and Le Gallo, Manuel and Song, Wenhao and Wang, Tong and Yang, J Joshua and Lu, Wei and Chang, Meng-Fan and Ielmini, Daniele and Yang, Yuchao and Mehonic, Adnan and Kenyon, Anthony and Villena, Marco A and Rold{\'a}n, Juan B and Wu, Yuting and Hsu, Hung-Hsi and Raghavan, Nagarajan and Su{\~n}{\'e}, Jordi and Miranda, Enrique and Eltawil, Ahmed and Setti, Gianluca and Smagulova, Kamilya and Salama, Khaled N and Krestinskaya, Olga and Yan, Xiaobing and Ang, Kah-Wee and Jain, Samarth and Li, Sifan and Alharbi, Osamah and Pazos, Sebastian and Lanza, Mario}, abstract = {Artificial Intelligence (AI) is currently experiencing a bloom driven by deep learning (DL) techniques, which rely on networks of connected simple computing units operating in parallel. The low communication bandwidth between memory and processing units in conventional von Neumann machines does not support the requirements of emerging applications that rely extensively on large sets of data. More recent computing paradigms, such as high parallelization and near-memory computing, help alleviate the data communication bottleneck to some extent, but paradigm- shifting concepts are required. Memristors, a novel beyond-complementary metal-oxide-semiconductor (CMOS) technology, are a promising choice for memory devices due to their unique intrinsic device-level properties, enabling both storing and computing with a small, massively-parallel footprint at low power. Theoretically, this directly translates to a major boost in energy efficiency and computational throughput, but various practical challenges remain. In this work we review the latest efforts for achieving hardware-based memristive artificial neural networks (ANNs), describing with detail the working principia of each block and the different design alternatives with their own advantages and disadvantages, as well as the tools required for accurate estimation of performance metrics. Ultimately, we aim to provide a comprehensive protocol of the materials and methods involved in memristive neural networks to those aiming to start working in this field and the experts looking for a holistic approach.}, keywords = {Electrical and electronic engineering, Electronic devices}, issn = {2041-1723} }