@misc{10481/97790, year = {2024}, month = {11}, url = {https://hdl.handle.net/10481/97790}, abstract = {The past years have been characterized by an upsurge in opaque automatic decision support systems, such as Deep Neural Networks (DNNs). Although DNNs have great generalization and prediction abilities, it is difficult to obtain detailed explanations for their behavior. As opaque Machine Learning models are increasingly being employed to make important predictions in critical domains, there is a danger of creating and using decisions that are not justifiable or legitimate. Therefore, there is a general agreement on the importance of endowing DNNs with explainability. EXplainable Artificial Intelligence (XAI) techniques can serve to verify and certify model outputs and enhance them with desirable notions such as trustworthiness, accountability, transparency, and fairness. This guide is intended to be the go-to handbook for anyone with a computer science background aiming to obtain an intuitive insight from Machine Learning models accompanied by explanations out-of-the-box. The article aims to rectify the lack of a practical XAI guide by applying XAI techniques, in particular, day-to-day models, datasets and use-cases. In each chapter, the reader will find a description of the proposed method as well as one or several examples of use with Python notebooks. These can be easily modified to be applied to specific applications. We also explain what the prerequisites are for using each technique, what the user will learn about them, and which tasks they are aimed at.}, organization = {Juan de la Cierva Incorporación}, organization = {Austrian Science Fund (FWF), Project: P-32554}, organization = {Juan de la Cierva Incorporación grant IJC2019-039152-I funded by MCIN/AEI /10.13039/501100011033 by “ESF Investing in your future”}, organization = {MSCA Postdoctoral Fellowship (Grant agreement ID 101059332)}, organization = {Google Research Scholar Program}, organization = {2022 Leonardo Grant for Researchers and Cultural Creators from BBVA Foundation}, organization = {European Union’s Horizon 2020 research and innovation programme under grant agreement No 765955 (ANIMATAS Innovative Training Network)}, organization = {European Union’s Horizon 2020 research and innovation programme under grant agreement No. 826078 (Feature Cloud)}, organization = {PNRR project INEST - Interconnected North-East Innovation Ecosystem (ECS00000043), under the NRRP MUR program funded by the NextGenerationEU}, organization = {PNRR project FAIR - Future AI Research (PE00000013), under the NRRP MUR program funded by the NextGenerationEU}, publisher = {Association for Computing Machinery}, keywords = {Computer systems organization}, keywords = {Redundancy}, keywords = {Robotics}, keywords = {Network reliability}, title = {A Practical Tutorial on Explainable AI Techniques}, doi = {10.1145/3670685}, author = {Bennetot, Adrien and Donadello, Ivan and El Qadi El Haouari, Ayoub and Dragoni, Mauro and Frossard, Thomas and Wagner, Benedikt and Sarranti, Anna and Tulli, Silivia and Trocan, Maria and Holzinger, Andreas and d´Ávila Garcez, Artur and Díaz Rodríguez, Natalia Ana}, }