system HF staff commited on
Commit
7ec181d
1 Parent(s): e609eaa

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. ted_hrlr.py +20 -15
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  paperswithcode_id: null
3
  ---
4
 
 
1
  ---
2
+ pretty_name: TEDHrlr
3
  paperswithcode_id: null
4
  ---
5
 
ted_hrlr.py CHANGED
@@ -17,8 +17,6 @@
17
  """TED talk high/low-resource paired language data set from Qi, et al. 2018."""
18
 
19
 
20
- import os
21
-
22
  import datasets
23
 
24
 
@@ -116,41 +114,48 @@ class TedHrlr(datasets.GeneratorBasedBuilder):
116
  )
117
 
118
  def _split_generators(self, dl_manager):
119
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
120
  source, target = self.config.language_pair
121
 
122
- data_dir = os.path.join(dl_dir, "datasets", "%s_to_%s" % (source, target))
123
 
124
  return [
125
  datasets.SplitGenerator(
126
  name=datasets.Split.TRAIN,
127
  gen_kwargs={
128
- "source_file": os.path.join(data_dir, "{}.train".format(source.replace("_", "-"))),
129
- "target_file": os.path.join(data_dir, "{}.train".format(target)),
 
130
  },
131
  ),
132
  datasets.SplitGenerator(
133
  name=datasets.Split.VALIDATION,
134
  gen_kwargs={
135
- "source_file": os.path.join(data_dir, "{}.dev".format(source.split("_")[0])),
136
- "target_file": os.path.join(data_dir, "{}.dev".format(target)),
 
137
  },
138
  ),
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TEST,
141
  gen_kwargs={
142
- "source_file": os.path.join(data_dir, "{}.test".format(source.split("_")[0])),
143
- "target_file": os.path.join(data_dir, "{}.test".format(target)),
 
144
  },
145
  ),
146
  ]
147
 
148
- def _generate_examples(self, source_file, target_file):
149
  """This function returns the examples in the raw (text) form."""
150
- with open(source_file, encoding="utf-8") as f:
151
- source_sentences = f.read().split("\n")
152
- with open(target_file, encoding="utf-8") as f:
153
- target_sentences = f.read().split("\n")
 
 
 
 
154
 
155
  assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
156
  len(source_sentences),
 
17
  """TED talk high/low-resource paired language data set from Qi, et al. 2018."""
18
 
19
 
 
 
20
  import datasets
21
 
22
 
 
114
  )
115
 
116
  def _split_generators(self, dl_manager):
117
+ archive = dl_manager.download(_DATA_URL)
118
  source, target = self.config.language_pair
119
 
120
+ data_dir = "datasets/%s_to_%s" % (source, target)
121
 
122
  return [
123
  datasets.SplitGenerator(
124
  name=datasets.Split.TRAIN,
125
  gen_kwargs={
126
+ "source_file": data_dir + "/" + f"{source.replace('_', '-')}.train",
127
+ "target_file": data_dir + "/" + f"{target}.train",
128
+ "files": dl_manager.iter_archive(archive),
129
  },
130
  ),
131
  datasets.SplitGenerator(
132
  name=datasets.Split.VALIDATION,
133
  gen_kwargs={
134
+ "source_file": data_dir + "/" + f"{source.split('_')[0]}.dev",
135
+ "target_file": data_dir + "/" + f"{target}.dev",
136
+ "files": dl_manager.iter_archive(archive),
137
  },
138
  ),
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TEST,
141
  gen_kwargs={
142
+ "source_file": data_dir + "/" + f"{source.split('_')[0]}.test",
143
+ "target_file": data_dir + "/" + f"{target}.test",
144
+ "files": dl_manager.iter_archive(archive),
145
  },
146
  ),
147
  ]
148
 
149
+ def _generate_examples(self, source_file, target_file, files):
150
  """This function returns the examples in the raw (text) form."""
151
+ source_sentences, target_sentences = None, None
152
+ for path, f in files:
153
+ if path == source_file:
154
+ source_sentences = f.read().decode("utf-8").split("\n")
155
+ elif path == target_file:
156
+ target_sentences = f.read().decode("utf-8").split("\n")
157
+ if source_sentences is not None and target_sentences is not None:
158
+ break
159
 
160
  assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
161
  len(source_sentences),