@misc{10481/99358, year = {2022}, url = {https://hdl.handle.net/10481/99358}, abstract = {Federated Learning is a distributed machine learning paradigm vulnerable to different kind of adversarial attacks, since its distributed nature and the inaccessibility of the data by the central server. In this work, we focus on model-poisoning backdoor attacks, because they are characterized by their stealth and effectiveness. We claim that the model updates of the clients of a federated learning setting follow a Gaussian distribution, and those ones with an outlier behavior in that distribution are likely to be adversarial clients. We propose a new federated aggregation operator called Robust Filtering of one-dimensional Outliers (RFOut-1d), which works as a resilient defensive mechanism to model-poisoning backdoor attacks. RFOut-1d is based on an univariate outlier detection method that filters out the model updates of the adversarial clients. The results on three federated image classification dataset show that RFOut-1d dissipates the impact of the backdoor attacks to almost nullifying them throughout all the learning rounds, as well as it keeps the performance of the federated learning model and it outperforms that state-of-the-art defenses against backdoor attacks.}, publisher = {Elsevier}, keywords = {Federated Learning}, keywords = {Backdoor attacks}, keywords = {Resilient aggregation}, keywords = {Robust filtering of outliers}, title = {Backdoor attacks-resilient aggregation based on robust filtering of outliers in federated learning for image classification}, doi = {https://doi.org/10.1016/j.knosys.2022.108588}, author = {Rodríguez Barroso, Nuria and Martínez Cámara, Eugenio and Luzón García, María Victoria and Herrera Triguero, Francisco}, }