holylovenia commited on
Commit
971f398
1 Parent(s): 78eded3

Upload oscar_2201.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. oscar_2201.py +340 -0
oscar_2201.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import gzip
3
+ import json
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+ from urllib.parse import urljoin
7
+
8
+ import datasets
9
+
10
+ from seacrowd.utils import schemas
11
+ from seacrowd.utils.configs import SEACrowdConfig
12
+ from seacrowd.utils.constants import Tasks, Licenses
13
+
14
+ _CITATION = """\
15
+ @inproceedings{abadji2022cleaner,
16
+ author = {Julien Abadji and
17
+ Pedro Javier Ortiz Su{\'{a}}rez and
18
+ Laurent Romary and
19
+ Beno{\^{\i}}t Sagot},
20
+ title = {Towards a Cleaner Document-Oriented Multilingual Crawled Corpus},
21
+ booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference,
22
+ {LREC} 2022, Marseille, France, 20-25 June 2022},
23
+ pages = {4344--4355},
24
+ publisher = {European Language Resources Association},
25
+ year = {2022},
26
+ url = {https://aclanthology.org/2022.lrec-1.463},
27
+ }
28
+
29
+ @inproceedings{abadji2021ungoliant,
30
+ author = {Julien Abadji and
31
+ Pedro Javier Ortiz Su{\'a}rez and
32
+ Laurent Romary and
33
+ Beno{\^i}t Sagot},
34
+ title = {Ungoliant: An optimized pipeline for the generation of a very large-scale multilingual web corpus},
35
+ series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora
36
+ (CMLC-9) 2021. Limerick, 12 July 2021 (Online-Event)},
37
+ editor = {Harald L{\"u}ngen and
38
+ Marc Kupietz and
39
+ Piotr Bański and
40
+ Adrien Barbaresi and
41
+ Simon Clematide and
42
+ Ines Pisetta},
43
+ publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache},
44
+ address = {Mannheim},
45
+ doi = {10.14618/ids-pub-10468},
46
+ url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-104688},
47
+ pages = {1 -- 9},
48
+ year = {2021},
49
+ abstract = {Since the introduction of large language models in Natural Language
50
+ Processing, large raw corpora have played a crucial role in Computational Linguistics.
51
+ However, most of these large raw corpora are either available only for English or not
52
+ available to the general public due to copyright issues. Nevertheless, there are some
53
+ examples of freely available multilingual corpora for training Deep Learning NLP
54
+ models, such as the OSCAR and Paracrawl corpora. However, they have quality issues,
55
+ especially for low-resource languages. Moreover, recreating or updating these corpora
56
+ is very complex. In this work, we try to reproduce and improve the goclassy pipeline
57
+ used to create the OSCAR corpus. We propose a new pipeline that is faster, modular,
58
+ parameterizable, and well documented. We use it to create a corpus similar to OSCAR
59
+ but larger and based on recent data. Also, unlike OSCAR, the metadata information is
60
+ at the document level. We release our pipeline under an open source license and
61
+ publish the corpus under a research-only license.},
62
+ language = {en}
63
+ }
64
+
65
+ @article{kreutzer2022quality,
66
+ title = {Quality at a Glance: An Audit of Web-Crawled Multilingual Datasets},
67
+ author = {Kreutzer, Julia and
68
+ Caswell, Isaac and
69
+ Wang, Lisa and
70
+ Wahab, Ahsan and
71
+ van Esch, Daan and
72
+ Ulzii-Orshikh, Nasanbayar and
73
+ Tapo, Allahsera and
74
+ Subramani, Nishant and
75
+ Sokolov, Artem and
76
+ Sikasote, Claytone and
77
+ Setyawan, Monang and
78
+ Sarin, Supheakmungkol and
79
+ Samb, Sokhar and
80
+ Sagot, Beno{\^\i}t and
81
+ Rivera, Clara and
82
+ Rios, Annette and
83
+ Papadimitriou, Isabel and
84
+ Osei, Salomey and
85
+ Suarez, Pedro Ortiz and
86
+ Orife, Iroro and
87
+ Ogueji, Kelechi and
88
+ Rubungo, Andre Niyongabo and
89
+ Nguyen, Toan Q. and
90
+ M{\"u}ller, Mathias and
91
+ M{\"u}ller, Andr{\'e} and
92
+ Muhammad, Shamsuddeen Hassan and
93
+ Muhammad, Nanda and
94
+ Mnyakeni, Ayanda and
95
+ Mirzakhalov, Jamshidbek and
96
+ Matangira, Tapiwanashe and
97
+ Leong, Colin and
98
+ Lawson, Nze and
99
+ Kudugunta, Sneha and
100
+ Jernite, Yacine and
101
+ Jenny, Mathias and
102
+ Firat, Orhan and
103
+ Dossou, Bonaventure F. P. and
104
+ Dlamini, Sakhile and
105
+ de Silva, Nisansa and
106
+ {\c{C}}abuk Ball{\i}, Sakine and
107
+ Biderman, Stella and
108
+ Battisti, Alessia and
109
+ Baruwa, Ahmed and
110
+ Bapna, Ankur and
111
+ Baljekar, Pallavi and
112
+ Azime, Israel Abebe and
113
+ Awokoya, Ayodele and
114
+ Ataman, Duygu and
115
+ Ahia, Orevaoghene and
116
+ Ahia, Oghenefego and
117
+ Agrawal, Sweta and
118
+ Adeyemi, Mofetoluwa},
119
+ editor = {Roark, Brian and
120
+ Nenkova, Ani},
121
+ journal = {Transactions of the Association for Computational Linguistics},
122
+ volume = {10},
123
+ year = {2022},
124
+ address = {Cambridge, MA},
125
+ publisher = {MIT Press},
126
+ url = {https://aclanthology.org/2022.tacl-1.4},
127
+ doi = {10.1162/tacl_a_00447},
128
+ pages = {50--72},
129
+ abstract = {With the success of large-scale pre-training and multilingual modeling in
130
+ Natural Language Processing (NLP), recent years have seen a proliferation of large,
131
+ Web-mined text datasets covering hundreds of languages. We manually audit the quality
132
+ of 205 language-specific corpora released with five major public datasets (CCAligned,
133
+ ParaCrawl, WikiMatrix, OSCAR, mC4). Lower-resource corpora have systematic issues: At
134
+ least 15 corpora have no usable text, and a significant fraction contains less than
135
+ 50{\%} sentences of acceptable quality. In addition, many are mislabeled or use
136
+ nonstandard/ambiguous language codes. We demonstrate that these issues are easy to
137
+ detect even for non-proficient speakers, and supplement the human audit with automatic
138
+ analyses. Finally, we recommend techniques to evaluate and improve multilingual
139
+ corpora and discuss potential risks that come with low-quality data releases.},
140
+ }
141
+
142
+ @inproceedings{ortizsuarez2020monolingual,
143
+ title = {A Monolingual Approach to Contextualized Word Embeddings for Mid-Resource Languages},
144
+ author = {Ortiz Su{'a}rez, Pedro Javier and
145
+ Romary, Laurent and
146
+ Sagot, Benoit},
147
+ booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
148
+ month = {jul},
149
+ year = {2020},
150
+ address = {Online},
151
+ publisher = {Association for Computational Linguistics},
152
+ url = {https://www.aclweb.org/anthology/2020.acl-main.156},
153
+ pages = {1703--1714},
154
+ abstract = {We use the multilingual OSCAR corpus, extracted from Common Crawl via
155
+ language classification, filtering and cleaning, to train monolingual contextualized
156
+ word embeddings (ELMo) for five mid-resource languages. We then compare the
157
+ performance of OSCAR-based and Wikipedia-based ELMo embeddings for these languages on
158
+ the part-of-speech tagging and parsing tasks. We show that, despite the noise in the
159
+ Common-Crawl-based OSCAR data, embeddings trained on OSCAR perform much better than
160
+ monolingual embeddings trained on Wikipedia. They actually equal or improve the
161
+ current state of the art in tagging and parsing for all five languages. In particular,
162
+ they also improve over multilingual Wikipedia-based contextual embeddings
163
+ (multilingual BERT), which almost always constitutes the previous state of the art,
164
+ thereby showing that the benefit of a larger, more diverse corpus surpasses the
165
+ cross-lingual benefit of multilingual embedding architectures.},
166
+ }
167
+
168
+ @inproceedings{ortizsuarez2019asynchronous,
169
+ author = {Pedro Javier {Ortiz Su{'a}rez} and
170
+ Benoit Sagot and
171
+ Laurent Romary},
172
+ title = {Asynchronous pipelines for processing huge corpora on medium to low resource infrastructures},
173
+ series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora
174
+ (CMLC-7) 2019. Cardiff, 22nd July 2019},
175
+ editor = {Piotr Bański and
176
+ Adrien Barbaresi and
177
+ Hanno Biber and
178
+ Evelyn Breiteneder and
179
+ Simon Clematide and
180
+ Marc Kupietz and
181
+ Harald L{"u}ngen and
182
+ Caroline Iliadi},
183
+ publisher = {Leibniz-Institut f{"u}r Deutsche Sprache},
184
+ address = {Mannheim},
185
+ doi = {10.14618/ids-pub-9021},
186
+ url = {http://nbn-resolving.de/urn:nbn:de:bsz:mh39-90215},
187
+ pages = {9 -- 16},
188
+ year = {2019},
189
+ abstract = {Common Crawl is a considerably large, heterogeneous multilingual corpus
190
+ comprised of crawled documents from the internet, surpassing 20TB of data and
191
+ distributed as a set of more than 50 thousand plain text files where each contains
192
+ many documents written in a wide variety of languages. Even though each document has a
193
+ metadata block associated to it, this data lacks any information about the language in
194
+ which each document is written, making it extremely difficult to use Common Crawl for
195
+ monolingual applications. We propose a general, highly parallel, multithreaded
196
+ pipeline to clean and classify Common Crawl by language; we specifically design it so
197
+ that it runs efficiently on medium to low resource infrastructures where I/O speeds
198
+ are the main constraint. We develop the pipeline so that it can be easily reapplied to
199
+ any kind of heterogeneous corpus and so that it can be parameterised to a wide range
200
+ of infrastructures. We also distribute a 6.3TB version of Common Crawl, filtered,
201
+ classified by language, shuffled at line level in order to avoid copyright issues, and
202
+ ready to be used for NLP applications.},
203
+ language = {en}
204
+ }
205
+ """
206
+
207
+ _DATASETNAME = "oscar_2201"
208
+ _DESCRIPTION = """\
209
+ OSCAR or Open Super-large Crawled Aggregated coRpus is a huge multilingual corpus
210
+ obtained by language classification and filtering of the Common Crawl corpus using
211
+ the ungoliant architecture. Data is distributed by language in both original and
212
+ deduplicated form.
213
+ """
214
+
215
+ _HOMEPAGE = "https://huggingface.co/datasets/oscar-corpus/OSCAR-2201"
216
+ _LICENSE = Licenses.CC0_1_0.value
217
+ _BASE_URL = "https://huggingface.co/datasets/oscar-corpus/OSCAR-2201/resolve/main/compressed/{lang}_meta/"
218
+
219
+ _LOCAL = False
220
+ _LANGUAGES = ["war", "ceb", "min", "vie", "ilo", "tgl", "lao", "khm", "mya", "jav", "ind", "tha", "sun", "zlm"]
221
+
222
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
223
+ _SOURCE_VERSION = "2022.1.0"
224
+ _SEACROWD_VERSION = "2024.06.20"
225
+
226
+
227
+ class Oscar2201Dataset(datasets.GeneratorBasedBuilder):
228
+ """OSCAR subset for SEA languages, version 2201."""
229
+
230
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
231
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
232
+
233
+ SEACROWD_SCHEMA_NAME = "ssp"
234
+ SUBSETS = ["war", "ceb", "min", "vi", "ta", "ilo", "tl", "lo", "km", "my", "jv", "id", "th", "su", "ms"]
235
+
236
+ BUILDER_CONFIGS = [
237
+ SEACrowdConfig(
238
+ name=f"{_DATASETNAME}_{subset}_source",
239
+ version=datasets.Version(_SOURCE_VERSION),
240
+ description=f"{_DATASETNAME} {subset} source schema",
241
+ schema="source",
242
+ subset_id=subset,
243
+ ) for subset in SUBSETS
244
+ ] + [
245
+ SEACrowdConfig(
246
+ name=f"{_DATASETNAME}_{subset}_seacrowd_ssp",
247
+ version=datasets.Version(_SEACROWD_VERSION),
248
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
249
+ schema="seacrowd_ssp",
250
+ subset_id=subset,
251
+ )
252
+ for subset in SUBSETS
253
+ ]
254
+
255
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_jv_source"
256
+
257
+ def _info(self) -> datasets.DatasetInfo:
258
+ if self.config.schema == "source":
259
+ features = datasets.Features(
260
+ {
261
+ "id": datasets.Value("int64"),
262
+ "text": datasets.Value("string"),
263
+ "meta": {
264
+ "warc_headers": {
265
+ "warc-record-id": datasets.Value("string"),
266
+ "warc-date": datasets.Value("string"),
267
+ "content-type": datasets.Value("string"),
268
+ "content-length": datasets.Value("int32"),
269
+ "warc-type": datasets.Value("string"),
270
+ "warc-identified-content-language": datasets.Value("string"),
271
+ "warc-refers-to": datasets.Value("string"),
272
+ "warc-target-uri": datasets.Value("string"),
273
+ "warc-block-digest": datasets.Value("string"),
274
+ },
275
+ "identification": {
276
+ "label": datasets.Value("string"),
277
+ "prob": datasets.Value("float"),
278
+ },
279
+ "annotations": datasets.Sequence(datasets.Value("string")),
280
+ "line_identifications": [
281
+ {
282
+ "label": datasets.Value("string"),
283
+ "prob": datasets.Value("float"),
284
+ }
285
+ ],
286
+ },
287
+ }
288
+ )
289
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
290
+ features = schemas.ssp_features
291
+
292
+ return datasets.DatasetInfo(
293
+ description=_DESCRIPTION,
294
+ features=features,
295
+ homepage=_HOMEPAGE,
296
+ license=_LICENSE,
297
+ citation=_CITATION,
298
+ )
299
+
300
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
301
+ """Returns SplitGenerators."""
302
+ base_path = _BASE_URL.format(lang=self.config.name.split("_")[2])
303
+
304
+ checksum_url = urljoin(base_path, "checksum.sha256")
305
+ checksum_path = Path(dl_manager.download(checksum_url))
306
+ with open(checksum_path, encoding="utf-8") as f:
307
+ filenames = [line.split()[1] for line in f if line]
308
+ filenames = sorted(filenames, key=lambda x: int(re.search(r"\d+", x).group()) if re.search(r"\d+", x) else x)
309
+ data_urls = [urljoin(base_path, filename) for filename in filenames]
310
+
311
+ data_paths = list(map(Path, dl_manager.download([url for url in data_urls if url.endswith(".jsonl.gz")])))
312
+
313
+ return [
314
+ datasets.SplitGenerator(
315
+ name=datasets.Split.TRAIN,
316
+ gen_kwargs={
317
+ "filepaths": data_paths,
318
+ "split": "train",
319
+ },
320
+ )
321
+ ]
322
+
323
+ def _generate_examples(self, filepaths: [Path], split: str) -> Tuple[int, Dict]:
324
+ """Yields examples as (key, example) tuples."""
325
+ key = 0
326
+ for filepath in filepaths:
327
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
328
+ for line in f:
329
+ doc = json.loads(line)
330
+ if self.config.schema == "source":
331
+ meta = dict()
332
+ meta["warc_headers"] = doc["warc_headers"]
333
+ meta["warc_headers"]["warc-identified-content-language"] = doc["warc_headers"].get("warc-identified-content-language")
334
+ meta["identification"] = doc["metadata"]["identification"]
335
+ meta["annotations"] = doc["metadata"]["annotation"]
336
+ meta["line_identifications"] = doc["metadata"]["sentence_identifications"]
337
+ yield key, {"id": key, "text": doc["content"], "meta": meta}
338
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
339
+ yield key, {"id": str(key), "text": doc["content"]}
340
+ key += 1