mathiascreutz commited on
Commit
ee6c327
1 Parent(s): daced4d

More comments

Browse files
Files changed (1) hide show
  1. opusparcus.py +31 -14
opusparcus.py CHANGED
@@ -41,15 +41,16 @@ _HOMEPAGE = ""
41
 
42
  _LICENSE = ""
43
 
44
- # The HuggingFace dataset library doesn't host the datasets but only point to the original files
45
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
-
47
  _URLs = {
48
  "validation": "validation.jsonl",
49
  "test": "test.jsonl",
50
  "validation.full": "validation.jsonl",
51
  "test.full": "test.jsonl",
52
- # NB: the "train" split file is defined dynamically inside the `_split_generators` method
 
53
  }
54
 
55
  _VERSION = datasets.Version("1.0.0", "")
@@ -73,8 +74,22 @@ class OpusparcusConfig(datasets.BuilderConfig):
73
  self.lang = lang
74
  self.quality = quality
75
 
 
 
 
76
  LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ]
77
 
 
 
 
 
 
 
 
 
 
 
 
78
  QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ]
79
 
80
  class Opusparcus(datasets.GeneratorBasedBuilder):
@@ -95,7 +110,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
95
  ]
96
 
97
  # There is no default configuration. User always needs to specify one:
98
- #DEFAULT_CONFIG_NAME = None
99
 
100
  def _info(self):
101
  # This method specifies the datasets.DatasetInfo object which
@@ -139,7 +154,8 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
139
  # self.config.lang and self.config.quality.
140
 
141
  if self.config.lang is None:
142
- # This is an error, nothing to do here
 
143
  return []
144
 
145
  # Select which file of the training data contains the matching data:
@@ -212,7 +228,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
212
 
213
  # If the desired quality value is 100, no subset of the
214
  # training set is good enough, and we only produce validation
215
- # and test sets, in order to save space and time.
216
 
217
  if self.config.quality <= 95:
218
  # In this case there is matching training data, so we produce
@@ -243,7 +259,7 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
243
  # The `key` is here for legacy reason (tfds) and is not important in itself.
244
 
245
  if split == datasets.Split.TRAIN:
246
- # Training sets are in compressed bz2 files.
247
  # They contain a field "quality" missing from the validation and test sets.
248
  # We also know that this file only contains the desired language,
249
  # because for the training sets the languages are in separate
@@ -264,10 +280,11 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
264
  }
265
  else:
266
  # The validation and test sets are in jsonl files.
267
- # They contain the fields "lang" and "annot_score" that we filter on.
268
- # If we ask for the full sets, we will keep all data entries, also
269
- # the sentence pairs that were not considered paraphrases by the
270
- # annotators:
 
271
  keep_all = (split == "validation.full" or split == "test.full")
272
  with open(filepath, encoding="utf-8") as f:
273
  for id_, row in enumerate(f):
@@ -276,8 +293,8 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
276
  if keep_all or data["annot_score"] >= 3.0:
277
  # for full sets keep all;
278
  # for standard test and validation sets, keep only
279
- # the paraphrases (annot_score >= 3.0 means "good
280
- # or mostly good example of paraphrases")
281
  yield id_, {
282
  "lang": data["lang"],
283
  "sent1": data["sent1"],
 
41
 
42
  _LICENSE = ""
43
 
44
+ # The HuggingFace dataset library doesn't host the datasets but only
45
+ # points to the original files. This can be an arbitrary nested
46
+ # dict/list of URLs (see below in `_split_generators` method):
47
  _URLs = {
48
  "validation": "validation.jsonl",
49
  "test": "test.jsonl",
50
  "validation.full": "validation.jsonl",
51
  "test.full": "test.jsonl",
52
+ # NB: the "train" split file is defined dynamically inside the
53
+ # `_split_generators` method
54
  }
55
 
56
  _VERSION = datasets.Version("1.0.0", "")
 
74
  self.lang = lang
75
  self.quality = quality
76
 
77
+
78
+ # Languages in Opusparcus: German (de), English (en), Finnish (fi),
79
+ # French (fr), Russian (ru), Swedish (sv):
80
  LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ]
81
 
82
+ # The training sets (train splits) come in eight sizes (95 .. 60),
83
+ # where the number indicates the estimated proportion [%] of true
84
+ # paraphrases in the set. The higher the number the smaller (but
85
+ # ideally cleaner) the set. The lower the number, the larger (but
86
+ # noisier) the set is. The smaller sets are included as subsets of
87
+ # larger sets. The special value 100 matches no training data at all,
88
+ # so if you are only interested in validation and test sets, you can
89
+ # use the value 100 in order to save time and space. (The quality
90
+ # value is irrelevant for the validation and test sets, which have
91
+ # been annotated manually, and each example has an annotation score
92
+ # attached to it.)
93
  QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ]
94
 
95
  class Opusparcus(datasets.GeneratorBasedBuilder):
 
110
  ]
111
 
112
  # There is no default configuration. User always needs to specify one:
113
+ # DEFAULT_CONFIG_NAME = None
114
 
115
  def _info(self):
116
  # This method specifies the datasets.DatasetInfo object which
 
154
  # self.config.lang and self.config.quality.
155
 
156
  if self.config.lang is None:
157
+ # This is an error: nothing to do here if no language
158
+ # has been defined:
159
  return []
160
 
161
  # Select which file of the training data contains the matching data:
 
228
 
229
  # If the desired quality value is 100, no subset of the
230
  # training set is good enough, and we only produce validation
231
+ # and test sets, in order to save space and time:
232
 
233
  if self.config.quality <= 95:
234
  # In this case there is matching training data, so we produce
 
259
  # The `key` is here for legacy reason (tfds) and is not important in itself.
260
 
261
  if split == datasets.Split.TRAIN:
262
+ # Training sets are in jsonl files that have been compressed using bzip2.
263
  # They contain a field "quality" missing from the validation and test sets.
264
  # We also know that this file only contains the desired language,
265
  # because for the training sets the languages are in separate
 
280
  }
281
  else:
282
  # The validation and test sets are in jsonl files.
283
+ # They contain the fields "lang" and "annot_score" that we
284
+ # filter on. If we ask for the full sets, we will keep
285
+ # all data entries for the desired language, also the
286
+ # sentence pairs that were not considered paraphrases by
287
+ # the annotators:
288
  keep_all = (split == "validation.full" or split == "test.full")
289
  with open(filepath, encoding="utf-8") as f:
290
  for id_, row in enumerate(f):
 
293
  if keep_all or data["annot_score"] >= 3.0:
294
  # for full sets keep all;
295
  # for standard test and validation sets, keep only
296
+ # the actual paraphrases (annot_score >= 3.0 means
297
+ # "good or mostly good example of paraphrases")
298
  yield id_, {
299
  "lang": data["lang"],
300
  "sent1": data["sent1"],