system HF staff commited on
Commit
c5a2439
1 Parent(s): 76d5b90

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (1) hide show
  1. scielo.py +17 -17
scielo.py CHANGED
@@ -15,8 +15,6 @@
15
  """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -74,7 +72,7 @@ class Scielo(datasets.GeneratorBasedBuilder):
74
 
75
  def _split_generators(self, dl_manager):
76
  """Returns SplitGenerators."""
77
- data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
78
  lang_pair = self.config.name.split("-")
79
  fname = self.config.name.replace("-", "_")
80
 
@@ -83,9 +81,10 @@ class Scielo(datasets.GeneratorBasedBuilder):
83
  datasets.SplitGenerator(
84
  name=datasets.Split.TRAIN,
85
  gen_kwargs={
86
- "source_file": os.path.join(data_dir, f"{fname}.en"),
87
- "target_file": os.path.join(data_dir, f"{fname}.pt"),
88
- "target_file_2": os.path.join(data_dir, f"{fname}.es"),
 
89
  },
90
  ),
91
  ]
@@ -94,24 +93,25 @@ class Scielo(datasets.GeneratorBasedBuilder):
94
  datasets.SplitGenerator(
95
  name=datasets.Split.TRAIN,
96
  gen_kwargs={
97
- "source_file": os.path.join(data_dir, f"{fname}.{lang_pair[0]}"),
98
- "target_file": os.path.join(data_dir, f"{fname}.{lang_pair[1]}"),
 
99
  },
100
  ),
101
  ]
102
 
103
- def _generate_examples(self, source_file, target_file, target_file_2=None):
104
- with open(source_file, encoding="utf-8") as f:
105
- source_sentences = f.read().split("\n")
106
- with open(target_file, encoding="utf-8") as f:
107
- target_sentences = f.read().split("\n")
 
 
 
108
 
109
  if self.config.name == "en-pt-es":
110
- with open(target_file_2, encoding="utf-8") as f:
111
- target_2_sentences = f.read().split("\n")
112
-
113
  source, target, target_2 = tuple(self.config.name.split("-"))
114
- for idx, (l1, l2, l3) in enumerate(zip(source_sentences, target_sentences, target_2_sentences)):
115
  result = {"translation": {source: l1, target: l2, target_2: l3}}
116
  yield idx, result
117
  else:
15
  """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
72
 
73
  def _split_generators(self, dl_manager):
74
  """Returns SplitGenerators."""
75
+ archive = dl_manager.download(_URLS[self.config.name])
76
  lang_pair = self.config.name.split("-")
77
  fname = self.config.name.replace("-", "_")
78
 
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TRAIN,
83
  gen_kwargs={
84
+ "source_file": f"{fname}.en",
85
+ "target_file": f"{fname}.pt",
86
+ "target_file_2": f"{fname}.es",
87
+ "files": dl_manager.iter_archive(archive),
88
  },
89
  ),
90
  ]
93
  datasets.SplitGenerator(
94
  name=datasets.Split.TRAIN,
95
  gen_kwargs={
96
+ "source_file": f"{fname}.{lang_pair[0]}",
97
+ "target_file": f"{fname}.{lang_pair[1]}",
98
+ "files": dl_manager.iter_archive(archive),
99
  },
100
  ),
101
  ]
102
 
103
+ def _generate_examples(self, source_file, target_file, files, target_file_2=None):
104
+ for path, f in files:
105
+ if path == source_file:
106
+ source_sentences = f.read().decode("utf-8").split("\n")
107
+ elif path == target_file:
108
+ target_sentences = f.read().decode("utf-8").split("\n")
109
+ elif self.config.name == "en-pt-es" and path == target_file_2:
110
+ target_sentences_2 = f.read().decode("utf-8").split("\n")
111
 
112
  if self.config.name == "en-pt-es":
 
 
 
113
  source, target, target_2 = tuple(self.config.name.split("-"))
114
+ for idx, (l1, l2, l3) in enumerate(zip(source_sentences, target_sentences, target_sentences_2)):
115
  result = {"translation": {source: l1, target: l2, target_2: l3}}
116
  yield idx, result
117
  else: