Datasets:
GEM
/

Tasks:
Other
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
expert-created
Source Datasets:
original
License:
mathiascreutz commited on
Commit
4840694
1 Parent(s): 6bb0ab7

Testing configs

Browse files
Files changed (1) hide show
  1. opusparcus.py +25 -25
opusparcus.py CHANGED
@@ -45,7 +45,7 @@ _LICENSE = ""
45
 
46
  _URLs = {
47
 
48
- #"train": "train",
49
  "validation": "validation.jsonl",
50
  "test": "test.jsonl"
51
 
@@ -136,20 +136,22 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
136
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
137
  data_dir = dl_manager.download_and_extract(_URLs)
138
  return [
139
- # datasets.SplitGenerator(
140
- # name=datasets.Split.TRAIN,
141
- # # These kwargs will be passed to _generate_examples
142
- # gen_kwargs={
143
- # "filepath": data_dir["train"],
144
- # "split": "train",
145
- # },
146
- # ),
147
  datasets.SplitGenerator(
148
- name=datasets.Split.TEST,
149
  # These kwargs will be passed to _generate_examples
150
  gen_kwargs={
151
  "lang": self.config.lang,
152
  "quality": self.config.quality,
 
 
 
 
 
 
 
 
 
 
153
  "filepath": data_dir["test"],
154
  "split": "test"
155
  },
@@ -159,7 +161,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
159
  # These kwargs will be passed to _generate_examples
160
  gen_kwargs={
161
  "lang": self.config.lang,
162
- "quality": self.config.quality,
163
  "filepath": data_dir["validation"],
164
  "split": "validation",
165
  },
@@ -170,21 +172,19 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
170
  self, lang, quality, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
171
  ):
172
 
173
- # lang, qualitystr = configname.split("\.")
174
- # quality = int(qualitystr)
175
-
176
  """ Yields examples as (key, example) tuples. """
177
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
178
  # The `key` is here for legacy reason (tfds) and is not important in itself.
179
- with open(filepath, encoding="utf-8") as f:
180
- for id_, row in enumerate(f):
181
- data = json.loads(row)
182
- if data["lang"] == lang:
183
- yield id_, {
184
- "lang": data["lang"] + "-quality-" + str(quality),
185
- "sent1": data["sent1"],
186
- "sent2": data["sent2"],
187
- "annot_score": data["annot_score"],
188
- "gem_id": data["gem_id"],
189
- }
 
190
 
 
45
 
46
  _URLs = {
47
 
48
+ "train": "train_",
49
  "validation": "validation.jsonl",
50
  "test": "test.jsonl"
51
 
 
136
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
137
  data_dir = dl_manager.download_and_extract(_URLs)
138
  return [
 
 
 
 
 
 
 
 
139
  datasets.SplitGenerator(
140
+ name=datasets.Split.TRAIN,
141
  # These kwargs will be passed to _generate_examples
142
  gen_kwargs={
143
  "lang": self.config.lang,
144
  "quality": self.config.quality,
145
+ "filepath": data_dir["train"],
146
+ "split": "train",
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TEST,
151
+ # These kwargs will be passed to _generate_examples
152
+ gen_kwargs={
153
+ "lang": self.config.lang,
154
+ "quality": 100,
155
  "filepath": data_dir["test"],
156
  "split": "test"
157
  },
 
161
  # These kwargs will be passed to _generate_examples
162
  gen_kwargs={
163
  "lang": self.config.lang,
164
+ "quality": 100,
165
  "filepath": data_dir["validation"],
166
  "split": "validation",
167
  },
 
172
  self, lang, quality, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
173
  ):
174
 
 
 
 
175
  """ Yields examples as (key, example) tuples. """
176
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
177
  # The `key` is here for legacy reason (tfds) and is not important in itself.
178
+ if split == datasets.Split.TEST:
179
+ with open(filepath, encoding="utf-8") as f:
180
+ for id_, row in enumerate(f):
181
+ data = json.loads(row)
182
+ if data["lang"] == lang:
183
+ yield id_, {
184
+ "lang": data["lang"] + "-quality-" + str(quality),
185
+ "sent1": data["sent1"],
186
+ "sent2": data["sent2"],
187
+ "annot_score": data["annot_score"],
188
+ "gem_id": data["gem_id"],
189
+ }
190