Datasets:

Modalities:
Text
Languages:
Spanish
ArXiv:
Libraries:
Datasets
License:
mc4-es-sampled / mc4-es-sampled.py
versae's picture
Update mc4-es-sampled.py
7a98abb
"""Perplexity Sampled mC4 dataset based on Common Crawl."""
import gzip
import json
import datasets
import numpy as np
from numpy.random import default_rng
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
50 million documents in Spanish extracted from mC4 applying perplexity sampling via mc4-sampling: "https://huggingface.co/datasets/bertin-project/mc4-sampling". Please, refer to BERTIN Project. The original dataset is the Multlingual Colossal, Cleaned version of Common Crawl's web crawl corpus (mC4), based on the Common Crawl dataset: "https://commoncrawl.org", and processed by AllenAI.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_URL = "https://github.com/allenai/allennlp/discussions/5056"
_DATA_URL_VALIDATION = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-es-validation.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
_DATA_URL_TRAIN = "https://huggingface.co/datasets/bertin-project/mc4-es-sampled/resolve/main/mc4-es-train-50M-{config}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
_CONFIGS = [
"random",
"stepwise",
"gaussian",
]
_N_SHARDS_PER_SPLIT = {
"random": {"train": 1024, "validation": 16},
"stepwise": {"train": 1024, "validation": 16},
"gaussian": {"train": 1024, "validation": 16},
}
class Mc4EsSampledConfig(datasets.BuilderConfig):
"""BuilderConfig for mC4."""
def __init__(self, *args, configs, **kwargs):
"""BuilderConfig for mC4.
Args:
configs (:obj:`List[str]`): list of configs to load
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(configs),
**kwargs,
)
self.configs = configs
class Mc4EsSampled(datasets.GeneratorBasedBuilder):
"""mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
BUILDER_CONFIGS = [Mc4EsSampledConfig(configs=[config]) for config in _CONFIGS]
BUILDER_CONFIG_CLASS = Mc4EsSampledConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
data_urls["train"] = [
_DATA_URL_TRAIN.format(
config=config,
index=index + 1,
n_shards=_N_SHARDS_PER_SPLIT[config]["train"],
)
for config in self.config.configs
for index in range(_N_SHARDS_PER_SPLIT[config]["train"])
]
data_urls["validation"] = [
_DATA_URL_VALIDATION.format(
index=index + 1,
n_shards=_N_SHARDS_PER_SPLIT[config]["validation"],
)
for config in self.config.configs
for index in range(_N_SHARDS_PER_SPLIT[config]["validation"] - 1) # the last validation shard is not working in the original allen/c4 repo
]
train_downloaded_files = dl_manager.download(data_urls["train"])
validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1