@misc{10481/102765, year = {2025}, month = {2}, url = {https://hdl.handle.net/10481/102765}, abstract = {Fairness in artificial intelligence has emerged as a critical ethical concern, with most research focusing on classification tasks despite the prevalence of regression problems in real-world applications. We address this gap by presenting a general procedure for measuring fairness in regression problems, focusing on statistical parity as a fairness metric. Through extensive experimental analysis, we evaluate how different methodological choices, such as discretizationmethods, algorithm selection, and parameter optimization, impact fairness outcomes in regression tasks. Our primary contribution is a systematic framework that helps practitioners assess and compare fairness across various approaches to solving regression problems, providing clear guidelines for selecting appropriate strategies based on specific problem requirements. The results demonstrate the importance of carefully considering procedural decisions when evaluating fairness in regression contexts, as these choices influence both model performance and fairness outcomes.}, organization = {Universidad de Granada/ CBUA}, organization = {Grant no. PI20/01435—funded by the National Institute of Health Carlos III (ISCIII) of Spain and co-funded by the European Union}, organization = {Grant no. C-ING-206-UGR23—Applied Research Projects of the University of Granada Research and Transfer Plan 2023, funded by the Andalusia ERDF Operational Program 2021-2027}, publisher = {Springer}, keywords = {Fair AI}, keywords = {Regression}, keywords = {Statistical parity}, title = {General procedure tomeasure fairness in regression problems}, doi = {10.1007/s41060-025-00721-2}, author = {Suárez Ferreira, Juliett and Slavkovik, Marija and Casillas Barranquero, Jorge}, }