@misc{10481/88558, year = {2023}, url = {https://hdl.handle.net/10481/88558}, abstract = {In recent years, transformer-based models have played a significant role in advancing lan- guage modeling for natural language processing. However, they require substantial amounts of data and there is a shortage of high-quality non-English corpora. Some recent initiatives have introduced multilingual datasets obtained through web crawling. However, there are notable limitations in the results for some languages, including Spanish. These datasets are either smaller compared to other languages or suffer from lower quality due to insufficient cleaning and deduplication. In this paper, we present ESCORPIUS-M, a multilingual corpus extracted from around 1 petabyte of Common Crawl data. It is the most extensive corpus for some languages with such a level of high-quality content extraction, cleanliness, and deduplication. Our data curation process involves an efficient cleaning pipeline and various deduplication methods that maintain the integrity of document and paragraph boundaries. We also ensure compliance with EU regulations by retaining both the source web page URL and the WARC shared origin URL}, organization = {This publication is part of the project “CONVERSA: Effective and efficient resources and models for transformative conversational AI in Spanish and co-official languages” (TED2021-132470B-I00) funded by MCIN/AEI/10.13039/501100011033 and by the European Union “NextGenerationEU/PRTR”}, publisher = {MDPI}, keywords = {Corpus}, keywords = {Dataset}, keywords = {Massive}, title = {esCorpius-m: A massive multilingual crawling corpus with a focus on Spanish}, doi = {10.3390/app132212155}, author = {Gutiérrez Fandiño, Asier and Pérez Fernández, David and Armengol-Estapé, Jordi and Griol Barres, David and Kharitonova, Ksenia and Callejas Carrión, Zoraida}, }