keshan commited on
Commit
8e77308
1 Parent(s): 6eef4fd

adding dataset config

Browse files
Files changed (2) hide show
  1. .gitattributes +1 -0
  2. clean-si-mc4.py +336 -0
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.json filter=lfs diff=lfs merge=lfs -text
clean-si-mc4.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """mC4 dataset based on Common Crawl."""
2
+
3
+
4
+ import gzip
5
+ import json
6
+ import re
7
+
8
+ import datasets
9
+
10
+
11
+ logger = datasets.logging.get_logger(__name__)
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ A colossal, cleaned version of Common Crawl's web crawl corpus.
16
+ Based on Common Crawl dataset: "https://commoncrawl.org".
17
+ This is the processed version of Google's mC4 dataset by AllenAI.
18
+ """
19
+
20
+ _CITATION = """
21
+ @article{2019t5,
22
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
23
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
24
+ journal = {arXiv e-prints},
25
+ year = {2019},
26
+ archivePrefix = {arXiv},
27
+ eprint = {1910.10683},
28
+ }
29
+ """
30
+
31
+ _URL = "https://github.com/allenai/allennlp/discussions/5056"
32
+
33
+ _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
34
+
35
+ _LANGUAGES = [
36
+ "af",
37
+ "am",
38
+ "ar",
39
+ "az",
40
+ "be",
41
+ "bg",
42
+ "bg-Latn",
43
+ "bn",
44
+ "ca",
45
+ "ceb",
46
+ "co",
47
+ "cs",
48
+ "cy",
49
+ "da",
50
+ "de",
51
+ "el",
52
+ "el-Latn",
53
+ "en",
54
+ "eo",
55
+ "es",
56
+ "et",
57
+ "eu",
58
+ "fa",
59
+ "fi",
60
+ "fil",
61
+ "fr",
62
+ "fy",
63
+ "ga",
64
+ "gd",
65
+ "gl",
66
+ "gu",
67
+ "ha",
68
+ "haw",
69
+ "hi",
70
+ "hi-Latn",
71
+ "hmn",
72
+ "ht",
73
+ "hu",
74
+ "hy",
75
+ "id",
76
+ "ig",
77
+ "is",
78
+ "it",
79
+ "iw",
80
+ "ja",
81
+ "ja-Latn",
82
+ "jv",
83
+ "ka",
84
+ "kk",
85
+ "km",
86
+ "kn",
87
+ "ko",
88
+ "ku",
89
+ "ky",
90
+ "la",
91
+ "lb",
92
+ "lo",
93
+ "lt",
94
+ "lv",
95
+ "mg",
96
+ "mi",
97
+ "mk",
98
+ "ml",
99
+ "mn",
100
+ "mr",
101
+ "ms",
102
+ "mt",
103
+ "my",
104
+ "ne",
105
+ "nl",
106
+ "no",
107
+ "ny",
108
+ "pa",
109
+ "pl",
110
+ "ps",
111
+ "pt",
112
+ "ro",
113
+ "ru",
114
+ "ru-Latn",
115
+ "sd",
116
+ "si",
117
+ "sk",
118
+ "sl",
119
+ "sm",
120
+ "sn",
121
+ "so",
122
+ "sq",
123
+ "sr",
124
+ "st",
125
+ "su",
126
+ "sv",
127
+ "sw",
128
+ "ta",
129
+ "te",
130
+ "tg",
131
+ "th",
132
+ "tr",
133
+ "uk",
134
+ "und",
135
+ "ur",
136
+ "uz",
137
+ "vi",
138
+ "xh",
139
+ "yi",
140
+ "yo",
141
+ "zh",
142
+ "zh-Latn",
143
+ "zu",
144
+ ]
145
+
146
+ _N_SHARDS_PER_SPLIT = {
147
+ "af": {"train": 64, "validation": 1},
148
+ "am": {"train": 16, "validation": 1},
149
+ "ar": {"train": 1024, "validation": 4},
150
+ "az": {"train": 256, "validation": 1},
151
+ "be": {"train": 128, "validation": 1},
152
+ "bg": {"train": 1024, "validation": 1},
153
+ "bg-Latn": {"train": 4, "validation": 1},
154
+ "bn": {"train": 512, "validation": 1},
155
+ "ca": {"train": 512, "validation": 1},
156
+ "ceb": {"train": 8, "validation": 1},
157
+ "co": {"train": 8, "validation": 1},
158
+ "cs": {"train": 1024, "validation": 2},
159
+ "cy": {"train": 256, "validation": 1},
160
+ "da": {"train": 1024, "validation": 1},
161
+ "de": {"train": 2048, "validation": 16},
162
+ "el": {"train": 1024, "validation": 2},
163
+ "el-Latn": {"train": 16, "validation": 1},
164
+ "en": {"train": 11264, "validation": 128},
165
+ "eo": {"train": 32, "validation": 1},
166
+ "es": {"train": 2048, "validation": 16},
167
+ "et": {"train": 256, "validation": 1},
168
+ "eu": {"train": 64, "validation": 1},
169
+ "fa": {"train": 1024, "validation": 2},
170
+ "fi": {"train": 1024, "validation": 1},
171
+ "fil": {"train": 64, "validation": 1},
172
+ "fr": {"train": 2048, "validation": 16},
173
+ "fy": {"train": 16, "validation": 1},
174
+ "ga": {"train": 16, "validation": 1},
175
+ "gd": {"train": 16, "validation": 1},
176
+ "gl": {"train": 128, "validation": 1},
177
+ "gu": {"train": 64, "validation": 1},
178
+ "ha": {"train": 8, "validation": 1},
179
+ "haw": {"train": 2, "validation": 1},
180
+ "hi": {"train": 1024, "validation": 2},
181
+ "hi-Latn": {"train": 16, "validation": 1},
182
+ "hmn": {"train": 8, "validation": 1},
183
+ "ht": {"train": 8, "validation": 1},
184
+ "hu": {"train": 1024, "validation": 2},
185
+ "hy": {"train": 128, "validation": 1},
186
+ "id": {"train": 1024, "validation": 4},
187
+ "ig": {"train": 4, "validation": 1},
188
+ "is": {"train": 128, "validation": 1},
189
+ "it": {"train": 1024, "validation": 8},
190
+ "iw": {"train": 1024, "validation": 1},
191
+ "ja": {"train": 1024, "validation": 8},
192
+ "ja-Latn": {"train": 8, "validation": 1},
193
+ "jv": {"train": 8, "validation": 1},
194
+ "ka": {"train": 256, "validation": 1},
195
+ "kk": {"train": 256, "validation": 1},
196
+ "km": {"train": 64, "validation": 1},
197
+ "kn": {"train": 64, "validation": 1},
198
+ "ko": {"train": 1024, "validation": 1},
199
+ "ku": {"train": 16, "validation": 1},
200
+ "ky": {"train": 64, "validation": 1},
201
+ "la": {"train": 64, "validation": 1},
202
+ "lb": {"train": 32, "validation": 1},
203
+ "lo": {"train": 8, "validation": 1},
204
+ "lt": {"train": 512, "validation": 1},
205
+ "lv": {"train": 256, "validation": 1},
206
+ "mg": {"train": 8, "validation": 1},
207
+ "mi": {"train": 4, "validation": 1},
208
+ "mk": {"train": 128, "validation": 1},
209
+ "ml": {"train": 128, "validation": 1},
210
+ "mn": {"train": 128, "validation": 1},
211
+ "mr": {"train": 1024, "validation": 1},
212
+ "ms": {"train": 512, "validation": 1},
213
+ "mt": {"train": 128, "validation": 1},
214
+ "my": {"train": 64, "validation": 1},
215
+ "ne": {"train": 256, "validation": 1},
216
+ "nl": {"train": 1024, "validation": 4},
217
+ "no": {"train": 1024, "validation": 1},
218
+ "ny": {"train": 4, "validation": 1},
219
+ "pa": {"train": 32, "validation": 1},
220
+ "pl": {"train": 1024, "validation": 4},
221
+ "ps": {"train": 16, "validation": 1},
222
+ "pt": {"train": 1024, "validation": 4},
223
+ "ro": {"train": 1024, "validation": 2},
224
+ "ru": {"train": 4096, "validation": 32},
225
+ "ru-Latn": {"train": 32, "validation": 1},
226
+ "sd": {"train": 64, "validation": 1},
227
+ "si": {"train": 64, "validation": 1},
228
+ "sk": {"train": 512, "validation": 1},
229
+ "sl": {"train": 256, "validation": 1},
230
+ "sm": {"train": 4, "validation": 1},
231
+ "sn": {"train": 8, "validation": 1},
232
+ "so": {"train": 64, "validation": 1},
233
+ "sq": {"train": 128, "validation": 1},
234
+ "sr": {"train": 256, "validation": 1},
235
+ "st": {"train": 2, "validation": 1},
236
+ "su": {"train": 4, "validation": 1},
237
+ "sv": {"train": 1024, "validation": 2},
238
+ "sw": {"train": 32, "validation": 1},
239
+ "ta": {"train": 256, "validation": 1},
240
+ "te": {"train": 128, "validation": 1},
241
+ "tg": {"train": 64, "validation": 1},
242
+ "th": {"train": 1024, "validation": 1},
243
+ "tr": {"train": 1024, "validation": 4},
244
+ "uk": {"train": 1024, "validation": 2},
245
+ "und": {"train": 3072, "validation": 32},
246
+ "ur": {"train": 128, "validation": 1},
247
+ "uz": {"train": 32, "validation": 1},
248
+ "vi": {"train": 1024, "validation": 4},
249
+ "xh": {"train": 2, "validation": 1},
250
+ "yi": {"train": 16, "validation": 1},
251
+ "yo": {"train": 2, "validation": 1},
252
+ "zh": {"train": 1024, "validation": 2},
253
+ "zh-Latn": {"train": 8, "validation": 1},
254
+ "zu": {"train": 8, "validation": 1},
255
+ }
256
+
257
+
258
+ class Mc4Config(datasets.BuilderConfig):
259
+ """BuilderConfig for mC4."""
260
+
261
+ def __init__(self, *args, languages, **kwargs):
262
+ """BuilderConfig for mC4.
263
+ Args:
264
+ languages (:obj:`List[str]`): list of languages to load
265
+ **kwargs: keyword arguments forwarded to super.
266
+ """
267
+ super().__init__(
268
+ *args,
269
+ name="+".join(languages),
270
+ **kwargs,
271
+ )
272
+ self.languages = languages
273
+
274
+
275
+ class Mc4(datasets.GeneratorBasedBuilder):
276
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
277
+
278
+ BUILDER_CONFIGS = [Mc4Config(languages=[lang]) for lang in _LANGUAGES]
279
+ BUILDER_CONFIG_CLASS = Mc4Config
280
+
281
+ def _info(self):
282
+ return datasets.DatasetInfo(
283
+ description=_DESCRIPTION,
284
+ features=datasets.Features(
285
+ {
286
+ "text": datasets.Value("string"),
287
+ "timestamp": datasets.Value("string"),
288
+ "url": datasets.Value("string"),
289
+ }
290
+ ),
291
+ supervised_keys=None,
292
+ homepage=_URL,
293
+ citation=_CITATION,
294
+ )
295
+
296
+ def _split_generators(self, dl_manager):
297
+ data_urls = {}
298
+ for split in ["train", "validation"]:
299
+ data_urls[split] = [
300
+ _DATA_URL.format(
301
+ language=self.config.name,
302
+ split_suffix="-validation" if split == "validation" else "",
303
+ index=index,
304
+ n_shards=_N_SHARDS_PER_SPLIT[lang][split],
305
+ )
306
+ for lang in self.config.languages
307
+ for index in range(_N_SHARDS_PER_SPLIT[lang][split])
308
+ ]
309
+ train_downloaded_files = dl_manager.download(data_urls["train"])
310
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
311
+ return [
312
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
313
+ datasets.SplitGenerator(
314
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
315
+ ),
316
+ ]
317
+
318
+ def _clean_prompts(self, example):
319
+ example["text"] = re.sub('[\<\>\»\|\🕔\◂\▸\෴]', '', example["text"])
320
+ example["text"] = re.sub('[a-zA-Z]+', '', example["text"])
321
+ example["text"] = re.sub('[\t\n]', ' ', example["text"])
322
+ example["text"] = re.sub('[\,\/\?\.\!\-\;\:\"\“\%\‘\”\�\_]{2,}', ' ', example["text"])
323
+ example["text"] = re.sub('[\s]{2,}', ' ', example["text"])
324
+ return example
325
+
326
+ def _generate_examples(self, filepaths):
327
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
328
+ id_ = 0
329
+ for filepath in filepaths:
330
+ logger.info("generating examples from = %s", filepath)
331
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
332
+ for line in f:
333
+ if line:
334
+ example = self._clean_prompts(json.loads(line))
335
+ yield id_, example
336
+ id_ += 1