Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
extended
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Cleaned Dutch split of the mC4 corpus.""" | |
import json | |
import gzip | |
import textwrap | |
import datasets | |
from itertools import zip_longest | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """ | |
@article{JMLR:v21:20-074, | |
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, | |
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, | |
journal = {Journal of Machine Learning Research}, | |
year = {2020}, | |
volume = {21}, | |
number = {140}, | |
pages = {1-67}, | |
url = {http://jmlr.org/papers/v21/20-074.html} | |
} | |
""" | |
_DESCRIPTION = """\ | |
A thoroughly cleaned version of the Dutch portion of the multilingual | |
colossal, cleaned version of Common Crawl's web crawl corpus (mC4) by AllenAI. | |
Based on Common Crawl dataset: "https://commoncrawl.org". | |
This is the processed version of Google's mC4 dataset by AllenAI, with further cleaning | |
detailed in the repository README file. | |
""" | |
_HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056" | |
_LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0" | |
_DATA_URL_NL = "https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned/resolve/main/mc4_nl_cleaned/{split}/c4-nl{validation}-cleaned.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz" | |
_DATA_URL_EN = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz" | |
_C4_EN_VARIANT = "en" | |
_CONFIG_NAMES = ["micro", "tiny", "small", "medium", "large", "full"] | |
_CONFIG_EN_NL_SUFFIX = "_en_nl" | |
_CONFIGS = dict( | |
micro={"train": 2, "validation": 1, "estimate": "1GB"}, | |
tiny={"train": 100, "validation": 1, "estimate": "10GB"}, | |
small={"train": 250, "validation": 1, "estimate": "25GB"}, | |
medium={"train": 500, "validation": 2, "estimate": "50GB"}, | |
large={"train": 750, "validation": 3, "estimate": "75GB"}, | |
full={"train": 1024, "validation": 4, "estimate": "103GB"}, | |
) | |
class Mc4NlCleanedConfig(datasets.BuilderConfig): | |
"""BuilderConfig for mC4 NL Cleaned.""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig for mC4 NL Cleaned." | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super().__init__(**kwargs) | |
class Mc4(datasets.GeneratorBasedBuilder): | |
"""mC4, a colossal, cleaned version of Common Crawl's web crawl corpus.""" | |
BUILDER_CONFIGS = [ | |
Mc4NlCleanedConfig( | |
name=name, | |
version=datasets.Version("1.0.0"), | |
description=textwrap.dedent( | |
f"""\ | |
A {name} cleaned version of the Dutch portion of the multilingual C4 corpus. | |
Estimated size of compressed files: {_CONFIGS[name]['estimate']} | |
""" | |
), | |
) | |
for name in _CONFIG_NAMES | |
] | |
BUILDER_CONFIGS += [ | |
Mc4NlCleanedConfig( | |
name=f"{name}{_CONFIG_EN_NL_SUFFIX}", | |
version=datasets.Version("1.0.0"), | |
description=textwrap.dedent( | |
f"""\ | |
A {name} cleaned version of the Dutch and English portion of the multilingual C4 corpus. | |
""" | |
), | |
) | |
for name in _CONFIG_NAMES | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"text": datasets.Value("string"), | |
"timestamp": datasets.Value("string"), | |
"url": datasets.Value("string"), | |
} | |
), | |
supervised_keys=None, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
data_urls = {} | |
config = _CONFIGS[self.config.name.replace(_CONFIG_EN_NL_SUFFIX, "")] | |
for split in ["train", "validation"]: | |
start_file = config.get("start", 0) if split == "train" else 0 | |
num_files = config.get(split) | |
data_urls[split] = [] | |
for index in range(start_file, start_file + num_files): | |
data_urls[split].append( | |
_DATA_URL_NL.format( | |
split=split, | |
index=index, | |
validation="-validation" if split == "validation" else "", | |
n_shards=4 if split == "validation" else 1024, | |
) | |
) | |
if self.config.name.endswith(_CONFIG_EN_NL_SUFFIX): | |
data_urls[split].append( | |
_DATA_URL_EN.format( | |
name=_C4_EN_VARIANT, | |
split=split, | |
index=index, | |
validation="-validation" if split == "validation" else "", | |
n_shards=8 if split == "validation" else 1024, | |
) | |
) | |
train_downloaded_files = dl_manager.download(data_urls["train"]) | |
validation_downloaded_files = dl_manager.download(data_urls["validation"]) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"filepaths": train_downloaded_files}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={"filepaths": validation_downloaded_files}, | |
), | |
] | |
def grouper(iterable, n, fillvalue=None): | |
"""Collect data into fixed-length chunks or blocks""" | |
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" | |
args = [iter(iterable)] * n | |
return zip_longest(*args, fillvalue=fillvalue) | |
def gzip_open(filepath): | |
if filepath: | |
return gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") | |
def _generate_examples(self, filepaths): | |
"""This function returns the examples in the raw (text) form by iterating on all the files.""" | |
id_ = 0 | |
for files in self.grouper(filepaths, 2, None): | |
logger.info(f"Generating examples from {files}") | |
gzip_iters = [self.gzip_open(file) for file in files if file is not None] | |
for lines in zip(*gzip_iters): | |
for line in lines: | |
example = json.loads(line) | |
yield id_, example | |
id_ += 1 | |