albertvillanova HF staff commited on
Commit
8a5bfcb
1 Parent(s): a2a086c

Use context managers for reading files

Browse files
Files changed (1) hide show
  1. alt.py +135 -145
alt.py CHANGED
@@ -221,88 +221,83 @@ class Alt(datasets.GeneratorBasedBuilder):
221
  data = {}
222
  for lang in files:
223
  file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
224
- fin = open(file_path, encoding="utf-8")
225
- for line in fin:
226
- line = line.strip()
227
- sp = line.split("\t")
228
-
229
- _, urlid, sntid = sp[0].split(".")
230
- sntid = sntid.strip() # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
231
- if urlid not in allow_urls:
232
- continue
233
-
234
- if sntid not in data:
235
- data[sntid] = {}
236
- data[sntid]["SNT.URLID"] = urlid
237
- data[sntid]["SNT.URLID.SNTID"] = sntid
238
- data[sntid]["url"] = allow_urls[urlid]["url"]
239
- data[sntid]["translation"] = {}
240
-
241
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
242
- if len(sp) >= 2:
243
- data[sntid]["translation"][lang] = sp[1]
244
-
245
- fin.close()
246
 
247
  elif self.config.name == "alt-en":
248
  data = {}
249
  for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
250
  file_path = os.path.join(basepath, "English-ALT-20210218", fname)
251
- fin = open(file_path, encoding="utf-8")
252
- for line in fin:
253
- line = line.strip()
254
- sp = line.split("\t")
255
 
256
- _, urlid, sntid = sp[0].split(".")
257
- if urlid not in allow_urls:
258
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
260
- d = {
261
- "SNT.URLID": urlid,
262
- "SNT.URLID.SNTID": sntid,
263
- "url": allow_urls[urlid]["url"],
264
- "status": None,
265
- "value": None,
266
- }
267
-
268
- d["value"] = sp[1]
269
- if fname == "English-ALT-Draft.txt":
270
- d["status"] = "draft"
271
- else:
272
- d["status"] = "reviewed"
273
-
274
- data[sntid] = d
275
- fin.close()
276
  elif self.config.name == "alt-jp":
277
  data = {}
278
  for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
279
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
280
- fin = open(file_path, encoding="utf-8")
281
- for line in fin:
282
- line = line.strip()
283
- sp = line.split("\t")
284
- _, urlid, sntid = sp[0].split(".")
285
- if urlid not in allow_urls:
286
- continue
 
 
 
 
 
 
 
 
 
 
 
287
 
288
- d = {
289
- "SNT.URLID": urlid,
290
- "SNT.URLID.SNTID": sntid,
291
- "url": allow_urls[urlid]["url"],
292
- "value": None,
293
- "status": None,
294
- "word_alignment": None,
295
- "en_tokenized": None,
296
- "jp_tokenized": None,
297
- }
298
-
299
- d["value"] = sp[1]
300
- if fname == "Japanese-ALT-Draft.txt":
301
- d["status"] = "draft"
302
- else:
303
- d["status"] = "reviewed"
304
- data[sntid] = d
305
- fin.close()
306
 
307
  keys = {
308
  "word_alignment": "word-alignment/data_ja.en-ja",
@@ -311,106 +306,101 @@ class Alt(datasets.GeneratorBasedBuilder):
311
  }
312
  for k in keys:
313
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
314
- fin = open(file_path, encoding="utf-8")
315
- for line in fin:
316
- line = line.strip()
317
- sp = line.split("\t")
318
-
319
- # Note that Japanese and Myanmar texts have empty sentence fields in this release.
320
- if len(sp) < 2:
321
- continue
322
 
323
- _, urlid, sntid = sp[0].split(".")
324
- if urlid not in allow_urls:
325
- continue
326
 
327
- if sntid in data:
 
 
328
 
329
- data[sntid][k] = sp[1]
330
- fin.close()
331
 
332
  elif self.config.name == "alt-my":
333
  data = {}
334
  for fname in ["data"]:
335
  file_path = os.path.join(basepath, "my-alt-190530", fname)
336
- fin = open(file_path, encoding="utf-8")
337
- for line in fin:
338
- line = line.strip()
339
- sp = line.split("\t")
340
- _, urlid, sntid = sp[0].split(".")
341
- if urlid not in allow_urls:
342
- continue
343
 
344
- data[sntid] = {
345
- "SNT.URLID": urlid,
346
- "SNT.URLID.SNTID": sntid,
347
- "url": allow_urls[urlid]["url"],
348
- "value": sp[1],
349
- }
350
- fin.close()
351
-
352
- elif self.config.name == "alt-km":
353
- data = {}
354
- for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
355
- file_path = os.path.join(basepath, "km-nova-181101", fname)
356
- fin = open(file_path, encoding="utf-8")
357
- for line in fin:
358
- line = line.strip()
359
- sp = line.split("\t")
360
- _, urlid, sntid = sp[0].split(".")
361
- if urlid not in allow_urls:
362
- continue
363
-
364
- k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
365
- if sntid in data:
366
- data[sntid][k] = sp[1]
367
- else:
368
  data[sntid] = {
369
  "SNT.URLID": urlid,
370
  "SNT.URLID.SNTID": sntid,
371
  "url": allow_urls[urlid]["url"],
372
- "km_pos_tag": None,
373
- "km_tokenized": None,
374
  }
375
- data[sntid][k] = sp[1]
376
- fin.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
 
378
  elif self.config.name == "alt-my-transliteration":
379
  file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
380
  # Need to set errors='ignore' because of the unknown error
381
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
382
  # It might due to some issues related to Myanmar alphabets
383
- fin = open(file_path, encoding="utf-8", errors="ignore")
384
- _id = 0
385
- for line in fin:
386
- line = line.strip()
387
-
388
- # I don't know why there are \x00 between |||. They don't show in the editor.
389
- line = line.replace("\x00", "")
390
- sp = line.split("|||")
391
-
392
- # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
393
- if len(sp) < 2:
394
- continue
395
-
396
- data[_id] = {"en": sp[0].strip(), "my": [sp[1].strip()]}
397
- _id += 1
398
- fin.close()
399
  elif self.config.name == "alt-my-west-transliteration":
400
  file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
401
  # Need to set errors='ignore' because of the unknown error
402
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
403
  # It might due to some issues related to Myanmar alphabets
404
- fin = open(file_path, encoding="utf-8", errors="ignore")
405
- _id = 0
406
- for line in fin:
407
- line = line.strip()
408
- line = line.replace("\x00", "")
409
- sp = line.split("|||")
410
-
411
- data[_id] = {"en": sp[0].strip(), "my": [k.strip() for k in sp[1].split("|")]}
412
- _id += 1
413
- fin.close()
414
 
415
  _id = 1
416
  for k in data:
 
221
  data = {}
222
  for lang in files:
223
  file_path = os.path.join(basepath, "ALT-Parallel-Corpus-20191206", f"data_{lang}.txt")
224
+ with open(file_path, encoding="utf-8") as fin:
225
+ for line in fin:
226
+ line = line.strip()
227
+ sp = line.split("\t")
228
+
229
+ _, urlid, sntid = sp[0].split(".")
230
+ sntid = sntid.strip() # Some lines have a trailing blank space: "SNT.102053.5598 " in data_fil.txt
231
+ if urlid not in allow_urls:
232
+ continue
233
+
234
+ if sntid not in data:
235
+ data[sntid] = {}
236
+ data[sntid]["SNT.URLID"] = urlid
237
+ data[sntid]["SNT.URLID.SNTID"] = sntid
238
+ data[sntid]["url"] = allow_urls[urlid]["url"]
239
+ data[sntid]["translation"] = {}
240
+
241
+ # Note that Japanese and Myanmar texts have empty sentence fields in this release.
242
+ if len(sp) >= 2:
243
+ data[sntid]["translation"][lang] = sp[1]
 
 
244
 
245
  elif self.config.name == "alt-en":
246
  data = {}
247
  for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
248
  file_path = os.path.join(basepath, "English-ALT-20210218", fname)
249
+ with open(file_path, encoding="utf-8") as fin:
250
+ for line in fin:
251
+ line = line.strip()
252
+ sp = line.split("\t")
253
 
254
+ _, urlid, sntid = sp[0].split(".")
255
+ if urlid not in allow_urls:
256
+ continue
257
+
258
+ d = {
259
+ "SNT.URLID": urlid,
260
+ "SNT.URLID.SNTID": sntid,
261
+ "url": allow_urls[urlid]["url"],
262
+ "status": None,
263
+ "value": sp[1],
264
+ }
265
+
266
+ if fname == "English-ALT-Draft.txt":
267
+ d["status"] = "draft"
268
+ else:
269
+ d["status"] = "reviewed"
270
+
271
+ data[sntid] = d
272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
  elif self.config.name == "alt-jp":
274
  data = {}
275
  for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
276
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", fname)
277
+ with open(file_path, encoding="utf-8") as fin:
278
+ for line in fin:
279
+ line = line.strip()
280
+ sp = line.split("\t")
281
+ _, urlid, sntid = sp[0].split(".")
282
+ if urlid not in allow_urls:
283
+ continue
284
+
285
+ d = {
286
+ "SNT.URLID": urlid,
287
+ "SNT.URLID.SNTID": sntid,
288
+ "url": allow_urls[urlid]["url"],
289
+ "value": sp[1],
290
+ "status": None,
291
+ "word_alignment": None,
292
+ "en_tokenized": None,
293
+ "jp_tokenized": None,
294
+ }
295
 
296
+ if fname == "Japanese-ALT-Draft.txt":
297
+ d["status"] = "draft"
298
+ else:
299
+ d["status"] = "reviewed"
300
+ data[sntid] = d
 
 
 
 
 
 
 
 
 
 
 
 
 
301
 
302
  keys = {
303
  "word_alignment": "word-alignment/data_ja.en-ja",
 
306
  }
307
  for k in keys:
308
  file_path = os.path.join(basepath, "Japanese-ALT-20210218", keys[k])
309
+ with open(file_path, encoding="utf-8") as fin:
310
+ for line in fin:
311
+ line = line.strip()
312
+ sp = line.split("\t")
 
 
 
 
313
 
314
+ # Note that Japanese and Myanmar texts have empty sentence fields in this release.
315
+ if len(sp) < 2:
316
+ continue
317
 
318
+ _, urlid, sntid = sp[0].split(".")
319
+ if urlid not in allow_urls:
320
+ continue
321
 
322
+ if sntid in data:
323
+ data[sntid][k] = sp[1]
324
 
325
  elif self.config.name == "alt-my":
326
  data = {}
327
  for fname in ["data"]:
328
  file_path = os.path.join(basepath, "my-alt-190530", fname)
329
+ with open(file_path, encoding="utf-8") as fin:
330
+ for line in fin:
331
+ line = line.strip()
332
+ sp = line.split("\t")
333
+ _, urlid, sntid = sp[0].split(".")
334
+ if urlid not in allow_urls:
335
+ continue
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
  data[sntid] = {
338
  "SNT.URLID": urlid,
339
  "SNT.URLID.SNTID": sntid,
340
  "url": allow_urls[urlid]["url"],
341
+ "value": sp[1],
 
342
  }
343
+
344
+ elif self.config.name == "alt-km":
345
+ data = {}
346
+ for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
347
+ file_path = os.path.join(basepath, "km-nova-181101", fname)
348
+ with open(file_path, encoding="utf-8") as fin:
349
+ for line in fin:
350
+ line = line.strip()
351
+ sp = line.split("\t")
352
+ _, urlid, sntid = sp[0].split(".")
353
+ if urlid not in allow_urls:
354
+ continue
355
+
356
+ k = "km_pos_tag" if fname == "data_km.km-tag.nova" else "km_tokenized"
357
+ if sntid in data:
358
+ data[sntid][k] = sp[1]
359
+ else:
360
+ data[sntid] = {
361
+ "SNT.URLID": urlid,
362
+ "SNT.URLID.SNTID": sntid,
363
+ "url": allow_urls[urlid]["url"],
364
+ "km_pos_tag": None,
365
+ "km_tokenized": None,
366
+ }
367
+ data[sntid][k] = sp[1]
368
 
369
  elif self.config.name == "alt-my-transliteration":
370
  file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
371
  # Need to set errors='ignore' because of the unknown error
372
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
373
  # It might due to some issues related to Myanmar alphabets
374
+ with open(file_path, encoding="utf-8", errors="ignore") as fin:
375
+ _id = 0
376
+ for line in fin:
377
+ line = line.strip()
378
+
379
+ # I don't know why there are \x00 between |||. They don't show in the editor.
380
+ line = line.replace("\x00", "")
381
+ sp = line.split("|||")
382
+
383
+ # When I read data, it seems to have empty sentence betweem the actual sentence. Don't know why?
384
+ if len(sp) < 2:
385
+ continue
386
+
387
+ data[_id] = {"en": sp[0].strip(), "my": [sp[1].strip()]}
388
+ _id += 1
389
+
390
  elif self.config.name == "alt-my-west-transliteration":
391
  file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
392
  # Need to set errors='ignore' because of the unknown error
393
  # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
394
  # It might due to some issues related to Myanmar alphabets
395
+ with open(file_path, encoding="utf-8", errors="ignore") as fin:
396
+ _id = 0
397
+ for line in fin:
398
+ line = line.strip()
399
+ line = line.replace("\x00", "")
400
+ sp = line.split("|||")
401
+
402
+ data[_id] = {"en": sp[0].strip(), "my": [k.strip() for k in sp[1].split("|")]}
403
+ _id += 1
 
404
 
405
  _id = 1
406
  for k in data: