albertvillanova HF staff commited on
Commit
21ffe29
1 Parent(s): f949956

Refactor loading script (#6)

Browse files

- Refactor loading script (8741701b871316fd66a147cea3af4e381cf0f361)

Files changed (1) hide show
  1. samanantar.py +9 -17
samanantar.py CHANGED
@@ -14,9 +14,7 @@
14
  # limitations under the License.
15
  """Samanantar dataset."""
16
 
17
- import re
18
-
19
- import pandas as pd
20
 
21
  import datasets
22
 
@@ -78,25 +76,19 @@ class Samanantar(datasets.GeneratorBasedBuilder):
78
 
79
  def _split_generators(self, dl_manager):
80
  urls = _URLS[str(self.config.version)]
81
- archive = dl_manager.download_and_extract(urls)
82
  return [
83
  datasets.SplitGenerator(
84
  name=datasets.Split.TRAIN,
85
  gen_kwargs={
86
- "paths": dl_manager.iter_files([archive]),
87
  },
88
  ),
89
  ]
90
 
91
- def _generate_examples(self, paths):
92
- id_ = 0
93
- for path in paths:
94
- if f"/en-{self.config.language}/train.{self.config.language}" in path:
95
- with open(path, encoding="utf-8") as config_language, \
96
- open(path.replace(f"train.{self.config.language}", "train.en"), encoding="utf-8") as en:
97
-
98
- for line_src, line_tgt in zip(en, config_language):
99
- yield id_, {"idx": id_ ,"src": line_src.strip(), "tgt": line_tgt.strip()}
100
- id_ += 1
101
-
102
- break # to prevent it from getting repeated examples (it seems the dataset gets repeated in en-te folder)
 
14
  # limitations under the License.
15
  """Samanantar dataset."""
16
 
17
+ from pathlib import Path
 
 
18
 
19
  import datasets
20
 
 
76
 
77
  def _split_generators(self, dl_manager):
78
  urls = _URLS[str(self.config.version)]
79
+ data_dir = dl_manager.download_and_extract(urls)
80
  return [
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TRAIN,
83
  gen_kwargs={
84
+ "data_dir": (Path(data_dir) / "v2" / f"en-{self.config.language}"),
85
  },
86
  ),
87
  ]
88
 
89
+ def _generate_examples(self, data_dir):
90
+ src_path = data_dir / "train.en"
91
+ tgt_path = data_dir / f"train.{self.config.language}"
92
+ with src_path.open(encoding="utf-8") as src_file, tgt_path.open(encoding="utf-8") as tgt_file:
93
+ for idx, (src_line, tgt_line) in enumerate(zip(src_file, tgt_file)):
94
+ yield idx, {"idx": idx, "src": src_line.strip(), "tgt": tgt_line.strip()}