mathiascreutz commited on
Commit
3e9ef65
1 Parent(s): e81e681

Data loader handles new training set files

Browse files
Files changed (1) hide show
  1. opusparcus.py +16 -11
opusparcus.py CHANGED
@@ -41,15 +41,14 @@ _HOMEPAGE = ""
41
 
42
  _LICENSE = ""
43
 
44
- # The HuggingFace dataset library don't host the datasets but only point to the original files
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLs = {
48
-
49
- "train": None, # actual value set in the `_split_generators` method
50
  "validation": "validation.jsonl",
51
  "test": "test.jsonl"
52
 
 
53
  }
54
 
55
  _VERSION = datasets.Version("1.0.0", "")
@@ -136,12 +135,18 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
136
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
137
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
138
 
139
- if self.config.quality > 95:
140
- # No training data matches this quality criterion
141
- del _URLs["train"]
142
- else:
143
- _URLs["train"] = "train_{0}.jsonl.bz2".format(self.config.lang)
144
-
 
 
 
 
 
 
145
  data_dir = dl_manager.download_and_extract(_URLs)
146
 
147
  splits = [
@@ -176,7 +181,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
176
  gen_kwargs={
177
  "lang": self.config.lang,
178
  "quality": self.config.quality,
179
- "filepath": [data_dir["train"], data_dir["train"], data_dir["train"]],
180
  "split": "train",
181
  },
182
  )
@@ -192,7 +197,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
192
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
193
  # The `key` is here for legacy reason (tfds) and is not important in itself.
194
  if split == datasets.Split.TRAIN:
195
- with bz2.open(filepath[0], "rt", encoding="utf-8") as f:
196
  # We know that this file only contains the desired language,
197
  # because for the training sets the languages are in separate
198
  # files, and only the desired language has been downloaded
 
41
 
42
  _LICENSE = ""
43
 
44
+ # The HuggingFace dataset library doesn't host the datasets but only point to the original files
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLs = {
 
 
48
  "validation": "validation.jsonl",
49
  "test": "test.jsonl"
50
 
51
+ # NB: the "train" split file is defined dynamically inside the `_split_generators` method
52
  }
53
 
54
  _VERSION = datasets.Version("1.0.0", "")
 
135
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
136
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
137
 
138
+ if self.config.quality < 70:
139
+ # We need to retrieve the largest training set file
140
+ # containing the full training set for the desired language
141
+ _URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang)
142
+ elif self.config.quality <= 95:
143
+ # We can do with a smaller version of the training set
144
+ # for the desired language
145
+ _URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang)
146
+
147
+ # Otherwise, if the desired quality is above 95, we do not
148
+ # download any training data, because there is no matching data
149
+
150
  data_dir = dl_manager.download_and_extract(_URLs)
151
 
152
  splits = [
 
181
  gen_kwargs={
182
  "lang": self.config.lang,
183
  "quality": self.config.quality,
184
+ "filepath": data_dir["train"],
185
  "split": "train",
186
  },
187
  )
 
197
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
198
  # The `key` is here for legacy reason (tfds) and is not important in itself.
199
  if split == datasets.Split.TRAIN:
200
+ with bz2.open(filepath, "rt", encoding="utf-8") as f:
201
  # We know that this file only contains the desired language,
202
  # because for the training sets the languages are in separate
203
  # files, and only the desired language has been downloaded