mathiascreutz commited on
Commit
e389063
1 Parent(s): 57d631f

Data loader produces full test and validation split as well

Browse files
Files changed (1) hide show
  1. opusparcus.py +3 -5
opusparcus.py CHANGED
@@ -47,8 +47,6 @@ _LICENSE = ""
47
  _URLs = {
48
  "validation": "validation.jsonl",
49
  "test": "test.jsonl",
50
- "full-validation": "validation.jsonl",
51
- "full-test": "test.jsonl"
52
  # NB: the "train" split file is defined dynamically inside the `_split_generators` method
53
  }
54
 
@@ -172,7 +170,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
172
  },
173
  ),
174
  datasets.SplitGenerator(
175
- name="full-test",
176
  # These kwargs will be passed to _generate_examples
177
  gen_kwargs={
178
  "lang": self.config.lang,
@@ -182,7 +180,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
182
  },
183
  ),
184
  datasets.SplitGenerator(
185
- name="full-validation",
186
  # These kwargs will be passed to _generate_examples
187
  gen_kwargs={
188
  "lang": self.config.lang,
@@ -236,7 +234,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
236
  "quality": data["quality"],
237
  }
238
  else:
239
- keep_all = (split == "full-validation" or split == "full-test")
240
  with open(filepath, encoding="utf-8") as f:
241
  for id_, row in enumerate(f):
242
  data = json.loads(row)
 
47
  _URLs = {
48
  "validation": "validation.jsonl",
49
  "test": "test.jsonl",
 
 
50
  # NB: the "train" split file is defined dynamically inside the `_split_generators` method
51
  }
52
 
 
170
  },
171
  ),
172
  datasets.SplitGenerator(
173
+ name="test.full",
174
  # These kwargs will be passed to _generate_examples
175
  gen_kwargs={
176
  "lang": self.config.lang,
 
180
  },
181
  ),
182
  datasets.SplitGenerator(
183
+ name="validation.full",
184
  # These kwargs will be passed to _generate_examples
185
  gen_kwargs={
186
  "lang": self.config.lang,
 
234
  "quality": data["quality"],
235
  }
236
  else:
237
+ keep_all = (split == "validation.full" or split == "test.full")
238
  with open(filepath, encoding="utf-8") as f:
239
  for id_, row in enumerate(f):
240
  data = json.loads(row)