system HF staff commited on
Commit
fd84f42
1 Parent(s): 6aac196

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. setimes.py +23 -18
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - found
4
  language_creators:
1
  ---
2
+ pretty_name: SETimes – A Parallel Corpus of English and South-East European Languages
3
  annotations_creators:
4
  - found
5
  language_creators:
setimes.py CHANGED
@@ -14,7 +14,6 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
- import os
18
 
19
  import datasets
20
 
@@ -125,29 +124,35 @@ class Setimes(datasets.GeneratorBasedBuilder):
125
  return _BASE_URL.format(lang1, lang2)
126
 
127
  download_url = _base_url(self.config.lang1, self.config.lang2)
128
- path = dl_manager.download_and_extract(download_url)
129
  return [
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TRAIN,
132
- gen_kwargs={"datapath": path},
 
 
 
133
  )
134
  ]
135
 
136
- def _generate_examples(self, datapath):
137
  l1, l2 = self.config.lang1, self.config.lang2
138
  l1_file = _BASE_NAME.format(l1, l2, l1)
139
  l2_file = _BASE_NAME.format(l1, l2, l2)
140
- l1_path = os.path.join(datapath, l1_file)
141
- l2_path = os.path.join(datapath, l2_file)
142
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
143
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
144
- x = x.strip()
145
- y = y.strip()
146
- result = (
147
- sentence_counter,
148
- {
149
- "id": str(sentence_counter),
150
- "translation": {l1: x, l2: y},
151
- },
152
- )
153
- yield result
 
 
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
 
17
 
18
  import datasets
19
 
124
  return _BASE_URL.format(lang1, lang2)
125
 
126
  download_url = _base_url(self.config.lang1, self.config.lang2)
127
+ archive = dl_manager.download(download_url)
128
  return [
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "l1_files": dl_manager.iter_archive(archive),
133
+ "l2_files": dl_manager.iter_archive(archive),
134
+ },
135
  )
136
  ]
137
 
138
+ def _generate_examples(self, l1_files, l2_files):
139
  l1, l2 = self.config.lang1, self.config.lang2
140
  l1_file = _BASE_NAME.format(l1, l2, l1)
141
  l2_file = _BASE_NAME.format(l1, l2, l2)
142
+ for path1, f1 in l1_files:
143
+ if path1 == l1_file:
144
+ for path2, f2 in l2_files:
145
+ if path2 == l2_file:
146
+ for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
147
+ x = x.decode("utf-8").strip()
148
+ y = y.decode("utf-8").strip()
149
+ result = (
150
+ sentence_counter,
151
+ {
152
+ "id": str(sentence_counter),
153
+ "translation": {l1: x, l2: y},
154
+ },
155
+ )
156
+ yield result
157
+ break
158
+ break