albertvillanova HF staff commited on
Commit
f6569d8
1 Parent(s): 03d8b46

Refactor with context managers for reading files (#9)

Browse files

- Refactor dict literals, docstring and download splits (a2a086c3cc01c82a8e42d23442ca06eec344d0f0)
- Use context managers for reading files (8a5bfcbec0586eaf4c23950c6e06becf2d539e02)
- Refactor (52edc1241ee7657ed7521c13ab1d4d1c138e9669)
- Refactor yield per config (8c13accca8a7f73dc3129d093f1a245ef86ce23f)

Files changed (1) hide show
  1. alt.py +144 -162
alt.py CHANGED
@@ -45,31 +45,27 @@ _WIKI_URL = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel
45
 
46
 
47
  class AltParallelConfig(datasets.BuilderConfig):
48
- """BuilderConfig for ALT."""
49
 
50
  def __init__(self, languages, **kwargs):
51
- """BuilderConfig for ALT.
52
 
53
  Args:
54
- for the `datasets.features.text.TextEncoder` used for the features feature.
55
-
56
- languages: languages that will be used for translation. it should be one of the
57
  **kwargs: keyword arguments forwarded to super.
58
  """
59
 
60
  name = "alt-parallel"
61
 
62
  description = "ALT Parallel Corpus"
63
- super(AltParallelConfig, self).__init__(
64
  name=name,
65
  description=description,
66
  version=datasets.Version("1.0.0", ""),
67
  **kwargs,
68
  )
69
 
70
- available_langs = set(
71
- ["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
72
- )
73
  for language in languages:
74
  assert language in available_langs
75
 
@@ -184,7 +180,7 @@ class Alt(datasets.GeneratorBasedBuilder):
184
  else:
185
  data_path = dl_manager.download_and_extract(_URLs[self.config.name])
186
 
187
- if self.config.name == "alt-my-transliteration" or self.config.name == "alt-my-west-transliteration":
188
  return [
189
  datasets.SplitGenerator(
190
  name=datasets.Split.TRAIN,
@@ -192,9 +188,7 @@ class Alt(datasets.GeneratorBasedBuilder):
192
  )
193
  ]
194
  else:
195
- data_split = {}
196
- for k in _SPLIT:
197
- data_split[k] = dl_manager.download_and_extract(_SPLIT[k])
198
 
199
  return [
200
  datasets.SplitGenerator(
@@ -220,95 +214,86 @@ class Alt(datasets.GeneratorBasedBuilder):
220
  urlid = sp[0].replace("URL.", "")
221
  allow_urls[urlid] = {"SNT.URLID": urlid, "url": sp[1]}
222
 
223
- data = {}
224
  if self.config.name.startswith("alt-parallel"):
225
- files = self.config.languages
226
-
227
  data = {}
228
- for lang in files:
229
  file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
230
- fin = open(file_path, encoding="utf-8")
231
- for line in fin:
232
- line = line.strip()
233
- sp = line.split("\t")
234
-
235
- _, urlid, sntid = sp[0].split(".")
236
- sntid = sntid.strip() # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
237
- if urlid not in allow_urls:
238
- continue
239
-
240
- if sntid not in data:
241
- data[sntid] = {}
242
- data[sntid]["SNT.URLID"] = urlid
243
- data[sntid]["SNT.URLID.SNTID"] = sntid
244
- data[sntid]["url"] = allow_urls[urlid]["url"]
245
- data[sntid]["translation"] = {}
246
-
247
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
248
- if len(sp) >= 2:
249
- data[sntid]["translation"][lang] = sp[1]
250
-
251
- fin.close()
 
 
 
252
 
253
  elif self.config.name == "alt-en":
254
  data = {}
255
  for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
256
  file_path = os.path.join(basepath, "English-ALT-20210218", fname)
257
- fin = open(file_path, encoding="utf-8")
258
- for line in fin:
259
- line = line.strip()
260
- sp = line.split("\t")
261
 
262
- _, urlid, sntid = sp[0].split(".")
263
- if urlid not in allow_urls:
264
- continue
265
 
266
- d = {
267
- "SNT.URLID": urlid,
268
- "SNT.URLID.SNTID": sntid,
269
- "url": allow_urls[urlid]["url"],
270
- "status": None,
271
- "value": None,
272
- }
 
 
273
 
274
- d["value"] = sp[1]
275
- if fname == "English-ALT-Draft.txt":
276
- d["status"] = "draft"
277
- else:
278
- d["status"] = "reviewed"
279
 
280
- data[sntid] = d
281
- fin.close()
282
  elif self.config.name == "alt-jp":
283
  data = {}
284
  for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
285
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
286
- fin = open(file_path, encoding="utf-8")
287
- for line in fin:
288
- line = line.strip()
289
- sp = line.split("\t")
290
- _, urlid, sntid = sp[0].split(".")
291
- if urlid not in allow_urls:
292
- continue
293
-
294
- d = {
295
- "SNT.URLID": urlid,
296
- "SNT.URLID.SNTID": sntid,
297
- "url": allow_urls[urlid]["url"],
298
- "value": None,
299
- "status": None,
300
- "word_alignment": None,
301
- "en_tokenized": None,
302
- "jp_tokenized": None,
303
- }
304
 
305
- d["value"] = sp[1]
306
- if fname == "Japanese-ALT-Draft.txt":
307
- d["status"] = "draft"
308
- else:
309
- d["status"] = "reviewed"
310
- data[sntid] = d
311
- fin.close()
312
 
313
  keys = {
314
  "word_alignment": "word-alignment/data_ja.en-ja",
@@ -317,108 +302,105 @@ class Alt(datasets.GeneratorBasedBuilder):
317
  }
318
  for k in keys:
319
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
320
- fin = open(file_path, encoding="utf-8")
321
- for line in fin:
322
- line = line.strip()
323
- sp = line.split("\t")
324
 
325
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
326
- if len(sp) < 2:
327
- continue
328
 
329
- _, urlid, sntid = sp[0].split(".")
330
- if urlid not in allow_urls:
331
- continue
332
 
333
- if sntid in data:
 
334
 
335
- data[sntid][k] = sp[1]
336
- fin.close()
337
 
338
  elif self.config.name == "alt-my":
339
- data = {}
340
  for fname in ["data"]:
341
  file_path = os.path.join(basepath, "my-alt-190530", fname)
342
- fin = open(file_path, encoding="utf-8")
343
- for line in fin:
344
- line = line.strip()
345
- sp = line.split("\t")
346
- _, urlid, sntid = sp[0].split(".")
347
- if urlid not in allow_urls:
348
- continue
349
-
350
- data[sntid] = {
351
- "SNT.URLID": urlid,
352
- "SNT.URLID.SNTID": sntid,
353
- "url": allow_urls[urlid]["url"],
354
- "value": sp[1],
355
- }
356
- fin.close()
357
 
358
  elif self.config.name == "alt-km":
359
  data = {}
360
  for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
361
  file_path = os.path.join(basepath, "km-nova-181101", fname)
362
- fin = open(file_path, encoding="utf-8")
363
- for line in fin:
364
- line = line.strip()
365
- sp = line.split("\t")
366
- _, urlid, sntid = sp[0].split(".")
367
- if urlid not in allow_urls:
368
- continue
369
-
370
- k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
371
- if sntid in data:
 
 
 
 
 
 
 
372
  data[sntid][k] = sp[1]
373
- else:
374
- data[sntid] = {
375
- "SNT.URLID": urlid,
376
- "SNT.URLID.SNTID": sntid,
377
- "url": allow_urls[urlid]["url"],
378
- "km_pos_tag": None,
379
- "km_tokenized": None,
380
- }
381
- data[sntid][k] = sp[1]
382
- fin.close()
383
 
384
  elif self.config.name == "alt-my-transliteration":
385
  file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
386
  # Need to set errors='ignore' because of the unknown error
387
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
388
  # It might due to some issues related to Myanmar alphabets
389
- fin = open(file_path, encoding="utf-8", errors="ignore")
390
- _id = 0
391
- for line in fin:
392
- line = line.strip()
393
 
394
- # I don't know why there are \x00 between |||. They don't show in the editor.
395
- line = line.replace("\x00", "")
396
- sp = line.split("|||")
397
 
398
- # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
399
- if len(sp) < 2:
400
- continue
 
 
 
 
 
401
 
402
- data[_id] = {"en": sp[0].strip(), "my": [sp[1].strip()]}
403
- _id += 1
404
- fin.close()
405
  elif self.config.name == "alt-my-west-transliteration":
406
  file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
407
  # Need to set errors='ignore' because of the unknown error
408
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
409
  # It might due to some issues related to Myanmar alphabets
410
- fin = open(file_path, encoding="utf-8", errors="ignore")
411
- _id = 0
412
- for line in fin:
413
- line = line.strip()
414
- line = line.replace("\x00", "")
415
- sp = line.split("|||")
416
-
417
- data[_id] = {"en": sp[0].strip(), "my": [k.strip() for k in sp[1].split("|")]}
418
- _id += 1
419
- fin.close()
420
-
421
- _id = 1
422
- for k in data:
423
- yield _id, data[k]
424
- _id += 1
 
45
 
46
 
47
  class AltParallelConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for ALT Parallel."""
49
 
50
  def __init__(self, languages, **kwargs):
51
+ """BuilderConfig for ALT Parallel.
52
 
53
  Args:
54
+ languages: languages that will be used for translation.
 
 
55
  **kwargs: keyword arguments forwarded to super.
56
  """
57
 
58
  name = "alt-parallel"
59
 
60
  description = "ALT Parallel Corpus"
61
+ super().__init__(
62
  name=name,
63
  description=description,
64
  version=datasets.Version("1.0.0", ""),
65
  **kwargs,
66
  )
67
 
68
+ available_langs = {"bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"}
 
 
69
  for language in languages:
70
  assert language in available_langs
71
 
 
180
  else:
181
  data_path = dl_manager.download_and_extract(_URLs[self.config.name])
182
 
183
+ if self.config.name in {"alt-my-transliteration", "alt-my-west-transliteration"}:
184
  return [
185
  datasets.SplitGenerator(
186
  name=datasets.Split.TRAIN,
 
188
  )
189
  ]
190
  else:
191
+ data_split = dl_manager.download(_SPLIT)
 
 
192
 
193
  return [
194
  datasets.SplitGenerator(
 
214
  urlid = sp[0].replace("URL.", "")
215
  allow_urls[urlid] = {"SNT.URLID": urlid, "url": sp[1]}
216
 
 
217
  if self.config.name.startswith("alt-parallel"):
 
 
218
  data = {}
219
+ for lang in self.config.languages:
220
  file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
221
+ with open(file_path, encoding="utf-8") as fin:
222
+ for line in fin:
223
+ line = line.strip()
224
+ sp = line.split("\t")
225
+
226
+ _, urlid, sntid = sp[0].split(".")
227
+ # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
228
+ sntid = sntid.strip()
229
+ if urlid not in allow_urls:
230
+ continue
231
+
232
+ if sntid not in data:
233
+ data[sntid] = {
234
+ "SNT.URLID": urlid,
235
+ "SNT.URLID.SNTID": sntid,
236
+ "url": allow_urls[urlid]["url"],
237
+ "translation": {},
238
+ }
239
+
240
+ # Note that Japanese and Myanmar texts have empty sentence fields in this release.
241
+ if len(sp) >= 2:
242
+ data[sntid]["translation"][lang] = sp[1]
243
+
244
+ for _id, item in enumerate(data.values()):
245
+ yield _id, item
246
 
247
  elif self.config.name == "alt-en":
248
  data = {}
249
  for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
250
  file_path = os.path.join(basepath, "English-ALT-20210218", fname)
251
+ with open(file_path, encoding="utf-8") as fin:
252
+ for line in fin:
253
+ line = line.strip()
254
+ sp = line.split("\t")
255
 
256
+ _, urlid, sntid = sp[0].split(".")
257
+ if urlid not in allow_urls:
258
+ continue
259
 
260
+ d = {
261
+ "SNT.URLID": urlid,
262
+ "SNT.URLID.SNTID": sntid,
263
+ "url": allow_urls[urlid]["url"],
264
+ "status": "draft" if fname == "English-ALT-Draft.txt" else "reviewed",
265
+ "value": sp[1],
266
+ }
267
+
268
+ data[sntid] = d
269
 
270
+ for _id, item in enumerate(data.values()):
271
+ yield _id, item
 
 
 
272
 
 
 
273
  elif self.config.name == "alt-jp":
274
  data = {}
275
  for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
276
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
277
+ with open(file_path, encoding="utf-8") as fin:
278
+ for line in fin:
279
+ line = line.strip()
280
+ sp = line.split("\t")
281
+ _, urlid, sntid = sp[0].split(".")
282
+ if urlid not in allow_urls:
283
+ continue
284
+
285
+ d = {
286
+ "SNT.URLID": urlid,
287
+ "SNT.URLID.SNTID": sntid,
288
+ "url": allow_urls[urlid]["url"],
289
+ "value": sp[1],
290
+ "status": "draft" if fname == "Japanese-ALT-Draft.txt" else "reviewed",
291
+ "word_alignment": None,
292
+ "en_tokenized": None,
293
+ "jp_tokenized": None,
294
+ }
295
 
296
+ data[sntid] = d
 
 
 
 
 
 
297
 
298
  keys = {
299
  "word_alignment": "word-alignment/data_ja.en-ja",
 
302
  }
303
  for k in keys:
304
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
305
+ with open(file_path, encoding="utf-8") as fin:
306
+ for line in fin:
307
+ line = line.strip()
308
+ sp = line.split("\t")
309
 
310
+ # Note that Japanese and Myanmar texts have empty sentence fields in this release.
311
+ if len(sp) < 2:
312
+ continue
313
 
314
+ _, urlid, sntid = sp[0].split(".")
315
+ if urlid not in allow_urls:
316
+ continue
317
 
318
+ if sntid in data:
319
+ data[sntid][k] = sp[1]
320
 
321
+ for _id, item in enumerate(data.values()):
322
+ yield _id, item
323
 
324
  elif self.config.name == "alt-my":
325
+ _id = 0
326
  for fname in ["data"]:
327
  file_path = os.path.join(basepath, "my-alt-190530", fname)
328
+ with open(file_path, encoding="utf-8") as fin:
329
+ for line in fin:
330
+ line = line.strip()
331
+ sp = line.split("\t")
332
+ _, urlid, sntid = sp[0].split(".")
333
+ if urlid not in allow_urls:
334
+ continue
335
+
336
+ yield _id, {
337
+ "SNT.URLID": urlid,
338
+ "SNT.URLID.SNTID": sntid,
339
+ "url": allow_urls[urlid]["url"],
340
+ "value": sp[1],
341
+ }
342
+ _id += 1
343
 
344
  elif self.config.name == "alt-km":
345
  data = {}
346
  for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
347
  file_path = os.path.join(basepath, "km-nova-181101", fname)
348
+ with open(file_path, encoding="utf-8") as fin:
349
+ for line in fin:
350
+ line = line.strip()
351
+ sp = line.split("\t")
352
+ _, urlid, sntid = sp[0].split(".")
353
+ if urlid not in allow_urls:
354
+ continue
355
+
356
+ k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
357
+ if sntid not in data:
358
+ data[sntid] = {
359
+ "SNT.URLID": urlid,
360
+ "SNT.URLID.SNTID": sntid,
361
+ "url": allow_urls[urlid]["url"],
362
+ "km_pos_tag": None,
363
+ "km_tokenized": None,
364
+ }
365
  data[sntid][k] = sp[1]
366
+
367
+ for _id, item in enumerate(data.values()):
368
+ yield _id, item
 
 
 
 
 
 
 
369
 
370
  elif self.config.name == "alt-my-transliteration":
371
  file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
372
  # Need to set errors='ignore' because of the unknown error
373
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
374
  # It might due to some issues related to Myanmar alphabets
375
+ with open(file_path, encoding="utf-8", errors="ignore") as fin:
376
+ for _id, line in enumerate(fin):
377
+ line = line.strip()
 
378
 
379
+ # I don't know why there are \x00 between |||. They don't show in the editor.
380
+ line = line.replace("\x00", "")
381
+ sp = line.split("|||")
382
 
383
+ # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
384
+ if len(sp) < 2:
385
+ continue
386
+
387
+ yield _id, {
388
+ "en": sp[0].strip(),
389
+ "my": [sp[1].strip()],
390
+ }
391
 
 
 
 
392
  elif self.config.name == "alt-my-west-transliteration":
393
  file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
394
  # Need to set errors='ignore' because of the unknown error
395
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
396
  # It might due to some issues related to Myanmar alphabets
397
+ with open(file_path, encoding="utf-8", errors="ignore") as fin:
398
+ for _id, line in enumerate(fin):
399
+ line = line.strip()
400
+ line = line.replace("\x00", "")
401
+ sp = line.split("|||")
402
+
403
+ yield _id, {
404
+ "en": sp[0].strip(),
405
+ "my": [k.strip() for k in sp[1].split("|")],
406
+ }