|
"""EusCrawl dataset.""" |
|
|
|
import json |
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
EusCrawl (http://www.ixa.eus/euscrawl/) is a high-quality corpus for |
|
Basque comprising 12.5 million documents and 423 million tokens, |
|
totalling 2.1 GiB of uncompressed text. EusCrawl was built using |
|
ad-hoc scrapers to extract text from 33 Basque websites with |
|
high-quality content, resulting in cleaner text compared to general |
|
purpose approaches. |
|
|
|
We do not claim ownership of any document in the corpus. All documents |
|
we collected were published under a Creative Commons license in their |
|
original website, and the specific variant can be found in the |
|
"license" field of each document. Should you consider |
|
that our data contains material that is owned by you and you would not |
|
like to be reproduced here, please contact Aitor Soroa at |
|
a.soroa@ehu.eus. |
|
|
|
For more details about the corpus, refer to our paper "Artetxe M., |
|
Aldabe I., Agerri R., Perez-de-Viñaspre O, Soroa A. (2022). Does |
|
Corpus Quality Really Matter for Low-Resource Languages?" |
|
https://arxiv.org/abs/2203.08111 |
|
|
|
If you use our corpus or models for academic research, please cite the paper in question: |
|
@misc{artetxe2022euscrawl, |
|
title={Does corpus quality really matter for low-resource languages?}, |
|
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri, Olatz Perez-de-Viñaspre, Aitor Soroa}, |
|
year={2022}, |
|
eprint={2203.08111}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
|
|
For questions please contact Aitor Soroa at a.soroa@ehu.eus. |
|
""" |
|
_HOMEPAGE_URL = "https://ixa.ehu.eus/euscrawl/" |
|
_CITATION = """\ |
|
@misc{artetxe2022euscrawl, |
|
title={Does corpus quality really matter for low-resource languages?}, |
|
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri, |
|
Olatz Perez-de-Viñaspre, Aitor Soroa}, |
|
year={2022}, |
|
eprint={2203.08111}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_URL = "http://ixa.ehu.eus/euscrawl/files/euscrawl-v1-free-jsonl.tar.bz2" |
|
_FILEPATH = "euscrawl-v1-free-jsonl/euscrawl-v1.free.jsonl" |
|
|
|
|
|
class EusCrawl(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"license": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
}, |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
path = dl_manager.download(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepaths": dl_manager.iter_archive(path)}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepaths): |
|
for filepath, file in filepaths: |
|
if filepath == _FILEPATH: |
|
for id, line in enumerate(file): |
|
data = json.loads(line) |
|
|
|
|
|
yield id, { |
|
"id": id, |
|
"title": data.get("title", ""), |
|
"text": data.get("text", ""), |
|
"source": data.get("source", ""), |
|
"license": data.get("license", ""), |
|
"url": data.get("url", ""), |
|
} |
|
|