holylovenia commited on
Commit
9f37da5
1 Parent(s): 6aec44d

Upload qed.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. qed.py +324 -0
qed.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ QED - The QCRI Educational Domain Corpus (formerly QCRI AMARA Corpus) is an open multilingual collection of subtitles for educational videos and lectures collaboratively transcribed and translated over the AMARA web-based platform.
18
+ It's developed by Qatar Computing Research Institute, Arabic Language Technologies Group. Along with English, it covers multiple SEA languages, such as vie (Vietnamese), mya (Burnmese), jav (Javanese), id (Indonesia), tha (Thai),
19
+ tl (Tagalog),ms (Malaysia).
20
+ """
21
+ import os
22
+ from pathlib import Path
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from seacrowd.utils import schemas
28
+ from seacrowd.utils.configs import SEACrowdConfig
29
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
30
+
31
+ _CITATION = """\
32
+ @inproceedings{abdelali-etal-2014-amara,
33
+ title = "The {AMARA} Corpus: Building Parallel Language Resources for the Educational Domain",
34
+ author = "Abdelali, Ahmed and
35
+ Guzman, Francisco and
36
+ Sajjad, Hassan and
37
+ Vogel, Stephan",
38
+ editor = "Calzolari, Nicoletta and
39
+ Choukri, Khalid and
40
+ Declerck, Thierry and
41
+ Loftsson, Hrafn and
42
+ Maegaard, Bente and
43
+ Mariani, Joseph and
44
+ Moreno, Asuncion and
45
+ Odijk, Jan and
46
+ Piperidis, Stelios",
47
+ booktitle = "Proceedings of the Ninth International Conference on Language Resources and Evaluation ({LREC}'14)",
48
+ month = may,
49
+ year = "2014",
50
+ address = "Reykjavik, Iceland",
51
+ publisher = "European Language Resources Association (ELRA)",
52
+ url = "http://www.lrec-conf.org/proceedings/lrec2014/pdf/877_Paper.pdf",
53
+ pages = "1856--1862",
54
+ abstract = "This paper presents the AMARA corpus of on-line educational content: a new parallel corpus of educational video subtitles, multilingually aligned for 20 languages, i.e. 20 monolingual corpora and 190 parallel corpora.
55
+ This corpus includes both resource-rich languages such as English and Arabic, and resource-poor languages such as Hindi and Thai. In this paper, we describe the gathering, validation, and preprocessing of a large collection of parallel,
56
+ community-generated subtitles. Furthermore, we describe the methodology used to prepare the data for Machine Translation tasks. Additionally, we provide a document-level, jointly aligned development and test sets for 14 language pairs,
57
+ designed for tuning and testing Machine Translation systems. We provide baseline results for these tasks, and highlight some of the challenges we face when building machine translation systems for educational content.",
58
+ }
59
+ """
60
+
61
+ _DATASETNAME = "qed"
62
+
63
+ _DESCRIPTION = """\
64
+ QED - The QCRI Educational Domain Corpus (formerly QCRI AMARA Corpus) is an open multilingual collection of subtitles for educational videos and lectures collaboratively transcribed and translated over the AMARA web-based platform.
65
+ It's developed by Qatar Computing Research Institute, Arabic Language Technologies Group. Along with English, it covers multiple SEA languages, such as vie (Vietnamese), mya (Burnmese), jav (Javanese), id (Indonesia), tha (Thai), tl (Tagalog),
66
+ ms (Malaysia).
67
+ """
68
+
69
+ _HOMEPAGE = "https://opus.nlpl.eu/QED/corpus/version/QED"
70
+
71
+ _LANGUAGES = ["eng", "vie", "tha", "mya", "jav", "ind", "tgl", "zlm", "ceb", "fil", "khm", "lao", "mad", "pam"]
72
+
73
+ _LICENSE = Licenses.OTHERS.value
74
+
75
+ _LOCAL = False
76
+
77
+ _FILE = "QED.{}.{}" # E.g. QED.en-id.id
78
+
79
+ _PAIR_URL = "https://object.pouta.csc.fi/OPUS-QED/v2.0a/moses/{}.txt.zip"
80
+ _MONO_URL = "https://object.pouta.csc.fi/OPUS-QED/v2.0a/mono/{}.txt.gz"
81
+
82
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION, Tasks.SELF_SUPERVISED_PRETRAINING]
83
+
84
+ _SOURCE_VERSION = "2.0.0"
85
+
86
+ _SEACROWD_VERSION = "2024.06.20"
87
+
88
+ _LANG_MAPPER = {
89
+ "eng": "en",
90
+ "vie": "vi",
91
+ "tha": "th",
92
+ "mya": "my",
93
+ "jav": "jv",
94
+ "ind": "id",
95
+ "tgl": "tl",
96
+ "zlm": "ms",
97
+ "ceb": "ceb",
98
+ "fil": "fil",
99
+ "khm": "km",
100
+ "lao": "lo",
101
+ "mad": "mad",
102
+ "pam": "pam",
103
+ }
104
+
105
+
106
+ class QEDDataset(datasets.GeneratorBasedBuilder):
107
+ """QED - The QCRI Educational Domain Corpus (formerly QCRI AMARA Corpus) is an open multilingual collection of subtitles for educational videos and lectures collaboratively transcribed and translated over the AMARA web-based platform.
108
+ It's developed by Qatar Computing Research Institute, Arabic Language Technologies Group. Along with English, it covers multiple SEA languages, such as vie (Vietnamese), mya (Burnmese), jav (Javanese), id (Indonesia), tha (Thai), tl (Tagalog),
109
+ ms (Malaysia)."""
110
+
111
+ SEACROWD_SCHEMA = TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()
112
+
113
+ LANG_PAIRS = [
114
+ ("eng", "vie"),
115
+ ("eng", "tha"),
116
+ ("eng", "mya"),
117
+ ("eng", "jav"),
118
+ ("eng", "ind"),
119
+ ("eng", "tgl"),
120
+ ("eng", "zlm"),
121
+ ("eng", "fil"),
122
+ ("eng", "khm"),
123
+ ("eng", "lao"),
124
+ ("eng", "mad"),
125
+ ("eng", "pam"),
126
+ ("fil", "vie"),
127
+ ("khm", "vie"),
128
+ ("lao", "vie"),
129
+ ("pam", "vie"),
130
+ ("fil", "tha"),
131
+ ("khm", "tha"),
132
+ ("lao", "tha"),
133
+ ("pam", "tha"),
134
+ ("fil", "mya"),
135
+ ("khm", "mya"),
136
+ ("lao", "mya"),
137
+ ("fil", "jav"),
138
+ ("jav", "lao"),
139
+ ("fil", "ind"),
140
+ ("ind", "khm"),
141
+ ("ind", "lao"),
142
+ ("fil", "tgl"),
143
+ ("khm", "tgl"),
144
+ ("lao", "tgl"),
145
+ ("fil", "zlm"),
146
+ ("khm", "zlm"),
147
+ ("lao", "zlm"),
148
+ ("tha", "vie"),
149
+ ("tha", "mya"),
150
+ ("tha", "jav"),
151
+ ("tha", "tgl"),
152
+ ("mya", "tgl"),
153
+ ("mya", "vie"),
154
+ ("jav", "vie"),
155
+ ("jav", "mya"),
156
+ ("jav", "tgl"),
157
+ ("jav", "zlm"),
158
+ ("ind", "jav"),
159
+ ("ind", "tha"),
160
+ ("ind", "vie"),
161
+ ("ind", "mya"),
162
+ ("ind", "tgl"),
163
+ ("ind", "zlm"),
164
+ ("tgl", "vie"),
165
+ ("zlm", "tgl"),
166
+ ("zlm", "tha"),
167
+ ("zlm", "vie"),
168
+ ("zlm", "mya"),
169
+ ("ceb", "eng"),
170
+ ("ceb", "vie"),
171
+ ("ceb", "tha"),
172
+ ("ceb", "mya"),
173
+ ("ceb", "jav"),
174
+ ("ceb", "ind"),
175
+ ("ceb", "tgl"),
176
+ ("ceb", "zlm"),
177
+ ("ceb", "fil"),
178
+ ("ceb", "khm"),
179
+ ("ceb", "lao"),
180
+ ("ceb", "pam"),
181
+ ("fil", "khm"),
182
+ ("fil", "lao"),
183
+ ("khm", "lao"),
184
+ ]
185
+
186
+ MONO_LANGS = ["eng", "vie", "tha", "mya", "jav", "ind", "tgl", "zlm", "ceb", "fil", "khm", "lao", "mad", "pam"]
187
+
188
+ BUILDER_CONFIGS = (
189
+ [
190
+ SEACrowdConfig(
191
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_source",
192
+ version=datasets.Version(_SOURCE_VERSION),
193
+ description=f"{_DATASETNAME} source schema for translation from {lang1} to {lang2}",
194
+ schema="source",
195
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}",
196
+ )
197
+ for lang1, lang2 in LANG_PAIRS
198
+ ]
199
+ + [
200
+ SEACrowdConfig(
201
+ name=f"{_DATASETNAME}_{lang}_source",
202
+ version=datasets.Version(_SEACROWD_VERSION),
203
+ description=f"{_DATASETNAME} source {lang} schema for Self-supervised Pretraining task",
204
+ schema="source",
205
+ subset_id=f"{_DATASETNAME}_{lang}",
206
+ )
207
+ for lang in MONO_LANGS
208
+ ]
209
+ + [
210
+ SEACrowdConfig(
211
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_seacrowd_t2t",
212
+ version=datasets.Version(_SEACROWD_VERSION),
213
+ description=f"{_DATASETNAME} SEACrowd schema for translation from {lang1} to {lang2} for Machine Translation task",
214
+ schema="seacrowd_t2t",
215
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}",
216
+ )
217
+ for lang1, lang2 in LANG_PAIRS
218
+ ]
219
+ + [
220
+ SEACrowdConfig(
221
+ name=f"{_DATASETNAME}_{lang}_seacrowd_ssp",
222
+ version=datasets.Version(_SEACROWD_VERSION),
223
+ description=f"{_DATASETNAME} SEACrowd {lang} schema for Self-supervised Pretraining task",
224
+ schema="seacrowd_ssp",
225
+ subset_id=f"{_DATASETNAME}_{lang}",
226
+ )
227
+ for lang in MONO_LANGS
228
+ ]
229
+ )
230
+
231
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_eng-ind_source"
232
+
233
+ def _info(self) -> datasets.DatasetInfo:
234
+ if self.config.schema == "source":
235
+ if len(self.config.subset_id.split("_")[-1].split("-")) == 2: # MT TASK
236
+ lang1, lang2 = self.config.subset_id.split("_")[-1].split("-")
237
+ features = datasets.Features(
238
+ {
239
+ "id": datasets.Value("int32"),
240
+ "translation": datasets.Translation(languages=(lang1, lang2)),
241
+ }
242
+ )
243
+ elif len(self.config.subset_id.split("_")[-1].split("-")) == 1: # ssp task
244
+ features = datasets.Features(
245
+ {
246
+ "id": datasets.Value("int32"),
247
+ "text": datasets.Value("string"),
248
+ }
249
+ )
250
+
251
+ elif self.config.schema == "seacrowd_t2t":
252
+ features = schemas.text2text_features
253
+
254
+ elif self.config.schema == "seacrowd_ssp":
255
+ features = schemas.ssp_features
256
+
257
+ return datasets.DatasetInfo(
258
+ description=_DESCRIPTION,
259
+ features=features,
260
+ homepage=_HOMEPAGE,
261
+ license=_LICENSE,
262
+ citation=_CITATION,
263
+ )
264
+
265
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
266
+ """Returns SplitGenerators."""
267
+ lang_pair = self.config.subset_id.split("_")[-1]
268
+ lang_info = "-".join([_LANG_MAPPER[lang] for lang in lang_pair.split("-")])
269
+
270
+ if len(self.config.subset_id.split("_")[-1].split("-")) == 1: # SSP Task
271
+ url = _MONO_URL.format(lang_info)
272
+ elif len(self.config.subset_id.split("_")[-1].split("-")) == 2: # MT Task
273
+ url = _PAIR_URL.format(lang_info)
274
+
275
+ data_dir = dl_manager.download_and_extract(url)
276
+
277
+ return [
278
+ datasets.SplitGenerator(
279
+ name=datasets.Split.TRAIN,
280
+ gen_kwargs={
281
+ "filepath": data_dir,
282
+ },
283
+ )
284
+ ]
285
+
286
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
287
+ """Yields examples as (key, example) tuples."""
288
+
289
+ if len(self.config.subset_id.split("_")[-1].split("-")) == 2: # MT Task
290
+ lang_pair = self.config.subset_id.split("_")[-1]
291
+ lang1, lang2 = lang_pair.split("-")
292
+
293
+ l1_path = os.path.join(filepath, _FILE.format("-".join([_LANG_MAPPER[lang1], _LANG_MAPPER[lang2]]), _LANG_MAPPER[lang1]))
294
+ l2_path = os.path.join(filepath, _FILE.format("-".join([_LANG_MAPPER[lang1], _LANG_MAPPER[lang2]]), _LANG_MAPPER[lang2]))
295
+
296
+ if self.config.schema == "source":
297
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
298
+ for i, (x, y) in enumerate(zip(f1, f2)):
299
+ yield i, {
300
+ "id": i,
301
+ "translation": {
302
+ lang1: x.strip(),
303
+ lang2: y.strip(),
304
+ },
305
+ }
306
+
307
+ elif self.config.schema == "seacrowd_t2t":
308
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
309
+ for i, (x, y) in enumerate(zip(f1, f2)):
310
+ yield i, {
311
+ "id": str(i),
312
+ "text_1": x.strip(),
313
+ "text_2": y.strip(),
314
+ "text_1_name": lang1,
315
+ "text_2_name": lang2,
316
+ },
317
+
318
+ elif len(self.config.subset_id.split("_")[-1].split("-")) == 1: # SSP Task
319
+ with open(filepath, "r", encoding="utf-8") as f:
320
+ for i, x in enumerate(f.readlines()):
321
+ yield i, {
322
+ "id": str(i),
323
+ "text": x.strip(),
324
+ }