@misc{10481/92007, year = {2024}, month = {2}, url = {https://hdl.handle.net/10481/92007}, abstract = {Interpretable deep learning models are increasingly important in domains where transparent decision-making is required. In this field, the interaction of the user with themodel can contribute to the interpretability of themodel. In this research work, we present an innovative approach that combines soft decision trees, neural symbolic learning, and concept learning to create an image classificationmodel that enhances interpretability and user interaction, control, and intervention. The key novelty of our method relies on the fusion of an interpretable architecture with neural symbolic learning, allowing the incorporation of expert knowledge and user interaction. Furthermore, our solution facilitates the inspection of the model through queries in the form of first-order logic predicates. Our main contribution is a human-in-the-loop model as a result of the fusion of neural symbolic learning and an interpretable architecture.We validate the effectiveness of our approach through comprehensive experimental results, demonstrating competitive performance on challenging datasets when compared to state-of-the-art solutions.}, organization = {HAT.tec GmbH}, organization = {Funding for open access publishing: Universidad de Granada/CBUA.}, publisher = {Springer Nature}, keywords = {Soft decision trees}, keywords = {Concepts}, keywords = {XAI}, title = {Concept logic trees: enabling user interaction for transparent image classification and human-in-the-loop learning}, doi = {10.1007/s10489-024-05321-4}, author = {Morales Rodríguez, David and Pegalajar Cuéllar, Manuel and Morales Santos, Diego Pedro}, }