Datasets:

Multilinguality:
monolingual
en-nl
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
extended
ArXiv:
License:
Yeb Havinga commited on
Commit
4afe684
1 Parent(s): ef5147c

Add dataset script

Browse files
Files changed (1) hide show
  1. mc4_nl_cleaned.py +172 -0
mc4_nl_cleaned.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Dutch split of the mC4 corpus."""
16
+
17
+
18
+ import json
19
+ import gzip
20
+ import textwrap
21
+ import datasets
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+ _CITATION = """
26
+ @article{JMLR:v21:20-074,
27
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
28
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
29
+ journal = {Journal of Machine Learning Research},
30
+ year = {2020},
31
+ volume = {21},
32
+ number = {140},
33
+ pages = {1-67},
34
+ url = {http://jmlr.org/papers/v21/20-074.html}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ A thoroughly cleaned version of the Dutch portion of the multilingual
40
+ colossal, cleaned version of Common Crawl's web crawl corpus (mC4) by AllenAI.
41
+
42
+ Based on Common Crawl dataset: "https://commoncrawl.org".
43
+
44
+ This is the processed version of Google's mC4 dataset by AllenAI, with further cleaning
45
+ detailed in the repository README file.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056"
49
+
50
+ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
51
+
52
+ _BASE_URL = "https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned/resolve/main/mc4_nl_cleaned/{split}/cleaned_c4_nl.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
53
+
54
+ _CONFIGS = dict(tiny={"train": 100, "validation": 1}, small={"train": 250, "validation": 2},
55
+ medium={"train": 500, "validation": 4}, large={"train": 750, "validation": 6},
56
+ full={"train": 1024, "validation": 8})
57
+
58
+
59
+ class Mc4NlCleanedConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for mC4 NL Cleaned."""
61
+
62
+ def __init__(self, **kwargs):
63
+ """BuilderConfig for mC4 NL Cleaned."
64
+ Args:
65
+ **kwargs: keyword arguments forwarded to super.
66
+ """
67
+ super().__init__(**kwargs)
68
+
69
+
70
+ class Mc4(datasets.GeneratorBasedBuilder):
71
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
72
+
73
+ BUILDER_CONFIGS = [
74
+ Mc4NlCleanedConfig(
75
+ name="tiny",
76
+ version=datasets.Version("1.0.0"),
77
+ description=textwrap.dedent(
78
+ f"""\
79
+ A tiny cleaned version of the Dutch portion of the multilingual C4 corpus.
80
+ Estimated size of compressed files: 10GB
81
+ """
82
+ )
83
+ ),
84
+ Mc4NlCleanedConfig(
85
+ name="small",
86
+ version=datasets.Version("1.0.0"),
87
+ description=textwrap.dedent(
88
+ f"""\
89
+ A small cleaned version of the Dutch portion of the multilingual C4 corpus.
90
+ Estimated size of compressed files: 25GB
91
+ """
92
+ )
93
+ ),
94
+ Mc4NlCleanedConfig(
95
+ name="medium",
96
+ version=datasets.Version("1.0.0"),
97
+ description=textwrap.dedent(
98
+ f"""\
99
+ A medium cleaned version of the Dutch portion of the multilingual C4 corpus.
100
+ Estimated size of compressed files: 50GB
101
+ """
102
+ )
103
+ ),
104
+ Mc4NlCleanedConfig(
105
+ name="large",
106
+ version=datasets.Version("1.0.0"),
107
+ description=textwrap.dedent(
108
+ f"""\
109
+ A large cleaned version of the Dutch portion of the multilingual C4 corpus.
110
+ Estimated size of compressed files: 75GB
111
+ """
112
+ )
113
+ ),
114
+ Mc4NlCleanedConfig(
115
+ name="full",
116
+ version=datasets.Version("1.0.0"),
117
+ description=textwrap.dedent(
118
+ f"""\
119
+ The full cleaned version of the Dutch portion of the multilingual C4 corpus.
120
+ Estimated size of compressed files: 103GB
121
+ """
122
+ )
123
+ )
124
+ ]
125
+
126
+ def _info(self):
127
+ return datasets.DatasetInfo(
128
+ description=_DESCRIPTION,
129
+ features=datasets.Features(
130
+ {
131
+ "text": datasets.Value("string"),
132
+ "timestamp": datasets.Value("string"),
133
+ "url": datasets.Value("string"),
134
+ }
135
+ ),
136
+ supervised_keys=None,
137
+ homepage=_HOMEPAGE,
138
+ license=_LICENSE,
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager):
143
+ data_urls = {}
144
+ for split in ["train", "validation"]:
145
+ data_urls[split] = [
146
+ _BASE_URL.format(
147
+ split=split,
148
+ index=index,
149
+ n_shards=8 if split == "validation" else 1024,
150
+ )
151
+ for index in range(_CONFIGS[self.config.name][split])
152
+ ]
153
+ train_downloaded_files = dl_manager.download(data_urls["train"])
154
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
155
+ return [
156
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
159
+ ),
160
+ ]
161
+
162
+ def _generate_examples(self, filepaths):
163
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
164
+ id_ = 0
165
+ for filepath in filepaths:
166
+ logger.info(f"Generating examples from {filepath}")
167
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
168
+ for line in f:
169
+ if line:
170
+ example = json.loads(line)
171
+ yield id_, example
172
+ id_ += 1