mc4_validation / mc4_validation.py
asahi417's picture
Update mc4_validation.py
eb97f13
raw
history blame
12.8 kB
"""mC4 dataset based on Common Crawl."""
import gzip
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org".
This is the processed version of Google's mC4 dataset by AllenAI.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_URL = "https://github.com/allenai/allennlp/discussions/5056"
_DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
_LANGUAGES = [
"af",
"am",
"ar",
"az",
"be",
"bg",
"bg-Latn",
"bn",
"ca",
"ceb",
"co",
"cs",
"cy",
"da",
"de",
"el",
"el-Latn",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fil",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
"ha",
"haw",
"hi",
"hi-Latn",
"hmn",
"ht",
"hu",
"hy",
"id",
"ig",
"is",
"it",
"iw",
"ja",
"ja-Latn",
"jv",
"ka",
"kk",
"km",
"kn",
"ko",
"ku",
"ky",
"la",
"lb",
"lo",
"lt",
"lv",
"mg",
"mi",
"mk",
"ml",
"mn",
"mr",
"ms",
"mt",
"my",
"ne",
"nl",
"no",
"ny",
"pa",
"pl",
"ps",
"pt",
"ro",
"ru",
"ru-Latn",
"sd",
"si",
"sk",
"sl",
"sm",
"sn",
"so",
"sq",
"sr",
"st",
"su",
"sv",
"sw",
"ta",
"te",
"tg",
"th",
"tr",
"uk",
"und",
"ur",
"uz",
"vi",
"xh",
"yi",
"yo",
"zh",
"zh-Latn",
"zu",
]
_N_SHARDS_PER_SPLIT = {
'af': {'validation': 1},
'am': {'validation': 1},
'ar': {'validation': 4},
'az': {'validation': 1},
'be': {'validation': 1},
'bg': {'validation': 1},
'bg-Latn': {'validation': 1},
'bn': {'validation': 1},
'ca': {'validation': 1},
'ceb': {'validation': 1},
'co': {'validation': 1},
'cs': {'validation': 2},
'cy': {'validation': 1},
'da': {'validation': 1},
'de': {'validation': 16},
'el': {'validation': 2},
'el-Latn': {'validation': 1},
'en': {'validation': 128},
'eo': {'validation': 1},
'es': {'validation': 16},
'et': {'validation': 1},
'eu': {'validation': 1},
'fa': {'validation': 2},
'fi': {'validation': 1},
'fil': {'validation': 1},
'fr': {'validation': 16},
'fy': {'validation': 1},
'ga': {'validation': 1},
'gd': {'validation': 1},
'gl': {'validation': 1},
'gu': {'validation': 1},
'ha': {'validation': 1},
'haw': {'validation': 1},
'hi': {'validation': 2},
'hi-Latn': {'validation': 1},
'hmn': {'validation': 1},
'ht': {'validation': 1},
'hu': {'validation': 2},
'hy': {'validation': 1},
'id': {'validation': 4},
'ig': {'validation': 1},
'is': {'validation': 1},
'it': {'validation': 8},
'iw': {'validation': 1},
'ja': {'validation': 8},
'ja-Latn': {'validation': 1},
'jv': {'validation': 1},
'ka': {'validation': 1},
'kk': {'validation': 1},
'km': {'validation': 1},
'kn': {'validation': 1},
'ko': {'validation': 1},
'ku': {'validation': 1},
'ky': {'validation': 1},
'la': {'validation': 1},
'lb': {'validation': 1},
'lo': {'validation': 1},
'lt': {'validation': 1},
'lv': {'validation': 1},
'mg': {'validation': 1},
'mi': {'validation': 1},
'mk': {'validation': 1},
'ml': {'validation': 1},
'mn': {'validation': 1},
'mr': {'validation': 1},
'ms': {'validation': 1},
'mt': {'validation': 1},
'my': {'validation': 1},
'ne': {'validation': 1},
'nl': {'validation': 4},
'no': {'validation': 1},
'ny': {'validation': 1},
'pa': {'validation': 1},
'pl': {'validation': 4},
'ps': {'validation': 1},
'pt': {'validation': 4},
'ro': {'validation': 2},
'ru': {'validation': 32},
'ru-Latn': {'validation': 1},
'sd': {'validation': 1},
'si': {'validation': 1},
'sk': {'validation': 1},
'sl': {'validation': 1},
'sm': {'validation': 1},
'sn': {'validation': 1},
'so': {'validation': 1},
'sq': {'validation': 1},
'sr': {'validation': 1},
'st': {'validation': 1},
'su': {'validation': 1},
'sv': {'validation': 2},
'sw': {'validation': 1},
'ta': {'validation': 1},
'te': {'validation': 1},
'tg': {'validation': 1},
'th': {'validation': 1},
'tr': {'validation': 4},
'uk': {'validation': 2},
'und': {'validation': 32},
'ur': {'validation': 1},
'uz': {'validation': 1},
'vi': {'validation': 4},
'xh': {'validation': 1},
'yi': {'validation': 1},
'yo': {'validation': 1},
'zh': {'validation': 2},
'zh-Latn': {'validation': 1},
'zu': {'validation': 1}
}
class Mc4ValidationConfig(datasets.BuilderConfig):
"""BuilderConfig for mC4."""
def __init__(self, *args, languages, **kwargs):
"""BuilderConfig for mC4.
Args:
languages (:obj:`List[str]`): list of languages to load
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(languages),
**kwargs,
)
self.languages = languages
class Mc4Validation(datasets.GeneratorBasedBuilder):
"""mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
BUILDER_CONFIGS = [Mc4ValidationConfig(languages=[lang]) for lang in _LANGUAGES]
BUILDER_CONFIG_CLASS = Mc4ValidationConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
for split in ["validation"]:
data_urls[split] = [
_DATA_URL.format(
language=lang,
split_suffix="-validation" if split == "validation" else "",
index=index,
n_shards=_N_SHARDS_PER_SPLIT[lang][split],
)
for lang in self.config.languages
for index in range(_N_SHARDS_PER_SPLIT[lang][split])
]
train_downloaded_files = dl_manager.download(data_urls["train"])
validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
logger.info("generating examples from = %s", filepath)
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1