splittedspanish3bwc / splittedspanish3bwc.py
nanom's picture
Update splittedspanish3bwc.py
6a92027
raw
history blame
2.83 kB
import os
import datasets
import json
from paths import _URLS
_CITATION = """\
@dataset{jose_canete_2019_3247731,
author = {José Cañete},
title = {Compilation of Large Spanish Unannotated Corpora},
month = may,
year = 2019,
publisher = {Zenodo},
doi = {10.5281/zenodo.3247731},
url = {https://doi.org/10.5281/zenodo.3247731}
}
"""
_DESCRIPTION = "Unannotated Spanish 3 Billion Words Corpora. This repository gathers a compilation of corpus in Spanish language"
_HOMEPAGE = "https://github.com/josecannete/spanish-corpora"
_LICENSE = "MIT License (https://github.com/josecannete/spanish-corpora/blob/master/LICENSE)"
class NewDatasetConfig(datasets.BuilderConfig):
def __init__(self, name_into_zip, features, data_url, **kwargs):
super(NewDatasetConfig, self).__init__(description="",version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.data_url = data_url
self.name_into_zip = name_into_zip
class NewDataset(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = list(_URLS.keys())[0]
BUILDER_CONFIGS = [
NewDatasetConfig(
name=name,
name_into_zip="preprocessed_{}_lower_{}.json".format(name.split("_")[0], name.split("_")[1]),
features=["idx","text","subset"],
data_url=url
)
for name,url in _URLS.items()
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION + self.config.description,
features=datasets.Features(
{
"idx": datasets.Value(dtype="int32"),
"text": datasets.Value(dtype="string"),
"subset": datasets.Value(dtype="string")
}
),
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.ALL,
gen_kwargs={
"filepath": os.path.join(data_dir, self.config.name_into_zip),
"subset": self.config.name.split("_")[0]
},
)
]
def _generate_examples(self, filepath, subset):
with open(filepath, encoding="utf-8") as f:
id_ = 0
data = json.load(f)
for _, text in data['text'].items():
yield id_, {
"idx":id_,
"text":text.strip(),
"subset":subset
}
id_ += 1