albertvillanova HF staff commited on
Commit
1764907
1 Parent(s): 2a43003

Delete loading script

Browse files
Files changed (1) hide show
  1. alt.py +0 -406
alt.py DELETED
@@ -1,406 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- """Asian Language Treebank (ALT) Project"""
4
-
5
-
6
- import os
7
-
8
- import datasets
9
-
10
-
11
- _CITATION = """\
12
- @inproceedings{riza2016introduction,
13
- title={Introduction of the asian language treebank},
14
- author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
15
- booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
16
- pages={1--6},
17
- year={2016},
18
- organization={IEEE}
19
- }
20
- """
21
-
22
- _HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/"
23
-
24
- _DESCRIPTION = """\
25
- The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).
26
- """
27
-
28
- _URLs = {
29
- "alt": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip",
30
- "alt-en": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/English-ALT-20210218.zip",
31
- "alt-jp": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/Japanese-ALT-20210218.zip",
32
- "alt-my": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-alt-190530.zip",
33
- "alt-my-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-en-transliteration.zip",
34
- "alt-my-west-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/western-myanmar-transliteration.zip",
35
- "alt-km": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/km-nova-181101.zip",
36
- }
37
-
38
- _SPLIT = {
39
- "train": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt",
40
- "dev": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt",
41
- "test": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt",
42
- }
43
-
44
- _WIKI_URL = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206/URL.txt"
45
-
46
-
47
- class AltParallelConfig(datasets.BuilderConfig):
48
- """BuilderConfig for ALT Parallel."""
49
-
50
- def __init__(self, languages, **kwargs):
51
- """BuilderConfig for ALT Parallel.
52
-
53
- Args:
54
- languages: languages that will be used for translation.
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
-
58
- name = "alt-parallel"
59
-
60
- description = "ALT Parallel Corpus"
61
- super().__init__(
62
- name=name,
63
- description=description,
64
- version=datasets.Version("1.0.0", ""),
65
- **kwargs,
66
- )
67
-
68
- available_langs = {"bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"}
69
- for language in languages:
70
- assert language in available_langs
71
-
72
- self.languages = languages
73
-
74
-
75
- class Alt(datasets.GeneratorBasedBuilder):
76
- """Asian Language Treebank (ALT) Project"""
77
-
78
- BUILDER_CONFIGS = [
79
- AltParallelConfig(
80
- languages=["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
81
- ),
82
- datasets.BuilderConfig(name="alt-en", version=datasets.Version("2.0.0"), description="English ALT 2021 version"),
83
- datasets.BuilderConfig(name="alt-jp", version=datasets.Version("2.0.0"), description="Japanese ALT 2021 version"),
84
- datasets.BuilderConfig(name="alt-my", version=datasets.Version("1.0.0"), description="Myanmar ALT"),
85
- datasets.BuilderConfig(name="alt-km", version=datasets.Version("1.0.0"), description="Khmer ALT"),
86
- datasets.BuilderConfig(
87
- name="alt-my-transliteration",
88
- version=datasets.Version("1.0.0"),
89
- description="Myanmar-English Transliteration Dataset",
90
- ),
91
- datasets.BuilderConfig(
92
- name="alt-my-west-transliteration",
93
- version=datasets.Version("1.0.0"),
94
- description="Latin-Myanmar Transliteration Dataset",
95
- ),
96
- ]
97
-
98
- DEFAULT_CONFIG_NAME = "alt-parallel"
99
-
100
- def _info(self):
101
- if self.config.name.startswith("alt-parallel"):
102
- features = datasets.Features(
103
- {
104
- "SNT.URLID": datasets.Value("string"),
105
- "SNT.URLID.SNTID": datasets.Value("string"),
106
- "url": datasets.Value("string"),
107
- "translation": datasets.features.Translation(languages=self.config.languages),
108
- }
109
- )
110
- elif self.config.name == "alt-en":
111
- features = datasets.Features(
112
- {
113
- "SNT.URLID": datasets.Value("string"),
114
- "SNT.URLID.SNTID": datasets.Value("string"),
115
- "url": datasets.Value("string"),
116
- "status": datasets.Value("string"),
117
- "value": datasets.Value("string"),
118
- }
119
- )
120
- elif self.config.name == "alt-jp":
121
- features = datasets.Features(
122
- {
123
- "SNT.URLID": datasets.Value("string"),
124
- "SNT.URLID.SNTID": datasets.Value("string"),
125
- "url": datasets.Value("string"),
126
- "status": datasets.Value("string"),
127
- "value": datasets.Value("string"),
128
- "word_alignment": datasets.Value("string"),
129
- "jp_tokenized": datasets.Value("string"),
130
- "en_tokenized": datasets.Value("string"),
131
- }
132
- )
133
- elif self.config.name == "alt-my":
134
- features = datasets.Features(
135
- {
136
- "SNT.URLID": datasets.Value("string"),
137
- "SNT.URLID.SNTID": datasets.Value("string"),
138
- "url": datasets.Value("string"),
139
- "value": datasets.Value("string"),
140
- }
141
- )
142
- elif self.config.name == "alt-my-transliteration":
143
- features = datasets.Features(
144
- {
145
- "en": datasets.Value("string"),
146
- "my": datasets.Sequence(datasets.Value("string")),
147
- }
148
- )
149
- elif self.config.name == "alt-my-west-transliteration":
150
- features = datasets.Features(
151
- {
152
- "en": datasets.Value("string"),
153
- "my": datasets.Sequence(datasets.Value("string")),
154
- }
155
- )
156
- elif self.config.name == "alt-km":
157
- features = datasets.Features(
158
- {
159
- "SNT.URLID": datasets.Value("string"),
160
- "SNT.URLID.SNTID": datasets.Value("string"),
161
- "url": datasets.Value("string"),
162
- "km_pos_tag": datasets.Value("string"),
163
- "km_tokenized": datasets.Value("string"),
164
- }
165
- )
166
- else:
167
- raise
168
-
169
- return datasets.DatasetInfo(
170
- description=_DESCRIPTION,
171
- features=features,
172
- supervised_keys=None,
173
- homepage=_HOMEPAGE,
174
- citation=_CITATION,
175
- )
176
-
177
- def _split_generators(self, dl_manager):
178
- if self.config.name.startswith("alt-parallel"):
179
- data_path = dl_manager.download_and_extract(_URLs["alt"])
180
- else:
181
- data_path = dl_manager.download_and_extract(_URLs[self.config.name])
182
-
183
- if self.config.name in {"alt-my-transliteration", "alt-my-west-transliteration"}:
184
- return [
185
- datasets.SplitGenerator(
186
- name=datasets.Split.TRAIN,
187
- gen_kwargs={"basepath": data_path, "split": None},
188
- )
189
- ]
190
- else:
191
- data_split = dl_manager.download(_SPLIT)
192
-
193
- return [
194
- datasets.SplitGenerator(
195
- name=datasets.Split.TRAIN,
196
- gen_kwargs={"basepath": data_path, "split": data_split["train"]},
197
- ),
198
- datasets.SplitGenerator(
199
- name=datasets.Split.VALIDATION,
200
- gen_kwargs={"basepath": data_path, "split": data_split["dev"]},
201
- ),
202
- datasets.SplitGenerator(
203
- name=datasets.Split.TEST,
204
- gen_kwargs={"basepath": data_path, "split": data_split["test"]},
205
- ),
206
- ]
207
-
208
- def _generate_examples(self, basepath, split=None):
209
- allow_urls = {}
210
- if split is not None:
211
- with open(split, encoding="utf-8") as fin:
212
- for line in fin:
213
- sp = line.strip().split("\t")
214
- urlid = sp[0].replace("URL.", "")
215
- allow_urls[urlid] = {"SNT.URLID": urlid, "url": sp[1]}
216
-
217
- if self.config.name.startswith("alt-parallel"):
218
- data = {}
219
- for lang in self.config.languages:
220
- file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
221
- with open(file_path, encoding="utf-8") as fin:
222
- for line in fin:
223
- line = line.strip()
224
- sp = line.split("\t")
225
-
226
- _, urlid, sntid = sp[0].split(".")
227
- # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
228
- sntid = sntid.strip()
229
- if urlid not in allow_urls:
230
- continue
231
-
232
- if sntid not in data:
233
- data[sntid] = {
234
- "SNT.URLID": urlid,
235
- "SNT.URLID.SNTID": sntid,
236
- "url": allow_urls[urlid]["url"],
237
- "translation": {},
238
- }
239
-
240
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
241
- if len(sp) >= 2:
242
- data[sntid]["translation"][lang] = sp[1]
243
-
244
- for _id, item in enumerate(data.values()):
245
- yield _id, item
246
-
247
- elif self.config.name == "alt-en":
248
- data = {}
249
- for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
250
- file_path = os.path.join(basepath, "English-ALT-20210218", fname)
251
- with open(file_path, encoding="utf-8") as fin:
252
- for line in fin:
253
- line = line.strip()
254
- sp = line.split("\t")
255
-
256
- _, urlid, sntid = sp[0].split(".")
257
- if urlid not in allow_urls:
258
- continue
259
-
260
- d = {
261
- "SNT.URLID": urlid,
262
- "SNT.URLID.SNTID": sntid,
263
- "url": allow_urls[urlid]["url"],
264
- "status": "draft" if fname == "English-ALT-Draft.txt" else "reviewed",
265
- "value": sp[1],
266
- }
267
-
268
- data[sntid] = d
269
-
270
- for _id, item in enumerate(data.values()):
271
- yield _id, item
272
-
273
- elif self.config.name == "alt-jp":
274
- data = {}
275
- for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
276
- file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
277
- with open(file_path, encoding="utf-8") as fin:
278
- for line in fin:
279
- line = line.strip()
280
- sp = line.split("\t")
281
- _, urlid, sntid = sp[0].split(".")
282
- if urlid not in allow_urls:
283
- continue
284
-
285
- d = {
286
- "SNT.URLID": urlid,
287
- "SNT.URLID.SNTID": sntid,
288
- "url": allow_urls[urlid]["url"],
289
- "value": sp[1],
290
- "status": "draft" if fname == "Japanese-ALT-Draft.txt" else "reviewed",
291
- "word_alignment": None,
292
- "en_tokenized": None,
293
- "jp_tokenized": None,
294
- }
295
-
296
- data[sntid] = d
297
-
298
- keys = {
299
- "word_alignment": "word-alignment/data_ja.en-ja",
300
- "en_tokenized": "word-alignment/data_ja.en-tok",
301
- "jp_tokenized": "word-alignment/data_ja.ja-tok",
302
- }
303
- for k in keys:
304
- file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
305
- with open(file_path, encoding="utf-8") as fin:
306
- for line in fin:
307
- line = line.strip()
308
- sp = line.split("\t")
309
-
310
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
311
- if len(sp) < 2:
312
- continue
313
-
314
- _, urlid, sntid = sp[0].split(".")
315
- if urlid not in allow_urls:
316
- continue
317
-
318
- if sntid in data:
319
- data[sntid][k] = sp[1]
320
-
321
- for _id, item in enumerate(data.values()):
322
- yield _id, item
323
-
324
- elif self.config.name == "alt-my":
325
- _id = 0
326
- for fname in ["data"]:
327
- file_path = os.path.join(basepath, "my-alt-190530", fname)
328
- with open(file_path, encoding="utf-8") as fin:
329
- for line in fin:
330
- line = line.strip()
331
- sp = line.split("\t")
332
- _, urlid, sntid = sp[0].split(".")
333
- if urlid not in allow_urls:
334
- continue
335
-
336
- yield _id, {
337
- "SNT.URLID": urlid,
338
- "SNT.URLID.SNTID": sntid,
339
- "url": allow_urls[urlid]["url"],
340
- "value": sp[1],
341
- }
342
- _id += 1
343
-
344
- elif self.config.name == "alt-km":
345
- data = {}
346
- for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
347
- file_path = os.path.join(basepath, "km-nova-181101", fname)
348
- with open(file_path, encoding="utf-8") as fin:
349
- for line in fin:
350
- line = line.strip()
351
- sp = line.split("\t")
352
- _, urlid, sntid = sp[0].split(".")
353
- if urlid not in allow_urls:
354
- continue
355
-
356
- k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
357
- if sntid not in data:
358
- data[sntid] = {
359
- "SNT.URLID": urlid,
360
- "SNT.URLID.SNTID": sntid,
361
- "url": allow_urls[urlid]["url"],
362
- "km_pos_tag": None,
363
- "km_tokenized": None,
364
- }
365
- data[sntid][k] = sp[1]
366
-
367
- for _id, item in enumerate(data.values()):
368
- yield _id, item
369
-
370
- elif self.config.name == "alt-my-transliteration":
371
- file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
372
- # Need to set errors='ignore' because of the unknown error
373
- # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
374
- # It might due to some issues related to Myanmar alphabets
375
- with open(file_path, encoding="utf-8", errors="ignore") as fin:
376
- for _id, line in enumerate(fin):
377
- line = line.strip()
378
-
379
- # I don't know why there are \x00 between |||. They don't show in the editor.
380
- line = line.replace("\x00", "")
381
- sp = line.split("|||")
382
-
383
- # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
384
- if len(sp) < 2:
385
- continue
386
-
387
- yield _id, {
388
- "en": sp[0].strip(),
389
- "my": [sp[1].strip()],
390
- }
391
-
392
- elif self.config.name == "alt-my-west-transliteration":
393
- file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
394
- # Need to set errors='ignore' because of the unknown error
395
- # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
396
- # It might due to some issues related to Myanmar alphabets
397
- with open(file_path, encoding="utf-8", errors="ignore") as fin:
398
- for _id, line in enumerate(fin):
399
- line = line.strip()
400
- line = line.replace("\x00", "")
401
- sp = line.split("|||")
402
-
403
- yield _id, {
404
- "en": sp[0].strip(),
405
- "my": [k.strip() for k in sp[1].split("|")],
406
- }