mathiascreutz
commited on
Commit
•
48f737e
1
Parent(s):
201a4b4
Added comments
Browse files- opusparcus.py +49 -19
opusparcus.py
CHANGED
@@ -106,7 +106,6 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
106 |
"sent2": datasets.Value("string"),
|
107 |
"annot_score": datasets.Value("float"),
|
108 |
"gem_id": datasets.Value("string"),
|
109 |
-
#"quality": datasets.Value("uint8")
|
110 |
}
|
111 |
)
|
112 |
|
@@ -130,24 +129,37 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
130 |
|
131 |
def _split_generators(self, dl_manager):
|
132 |
"""Returns SplitGenerators."""
|
133 |
-
#
|
134 |
-
#
|
135 |
-
#
|
136 |
-
#
|
137 |
-
#
|
|
|
|
|
138 |
|
|
|
139 |
if self.config.quality < 70:
|
140 |
# We need to retrieve the largest training set file
|
141 |
# containing the full training set for the desired language
|
142 |
_URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang)
|
|
|
143 |
elif self.config.quality <= 95:
|
144 |
# We can do with a smaller version of the training set
|
145 |
# for the desired language
|
146 |
_URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang)
|
147 |
|
148 |
# Otherwise, if the desired quality is above 95, we do not
|
149 |
-
# download any training data, because there is no matching data
|
|
|
|
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
data_dir = dl_manager.download_and_extract(_URLs)
|
152 |
|
153 |
splits = [
|
@@ -193,8 +205,13 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
193 |
),
|
194 |
]
|
195 |
|
|
|
|
|
|
|
|
|
196 |
if self.config.quality <= 95:
|
197 |
-
#
|
|
|
198 |
splits.append(
|
199 |
datasets.SplitGenerator(
|
200 |
name=datasets.Split.TRAIN,
|
@@ -211,43 +228,56 @@ class Opusparcus(datasets.GeneratorBasedBuilder):
|
|
211 |
return splits
|
212 |
|
213 |
def _generate_examples(
|
214 |
-
self, lang, quality, filepath, split
|
|
|
|
|
215 |
):
|
216 |
-
|
217 |
""" Yields examples as (key, example) tuples. """
|
218 |
-
# This method handles input defined in _split_generators to
|
|
|
219 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
|
|
220 |
if split == datasets.Split.TRAIN:
|
|
|
|
|
|
|
|
|
|
|
221 |
with bz2.open(filepath, "rt", encoding="utf-8") as f:
|
222 |
-
# We know that this file only contains the desired language,
|
223 |
-
# because for the training sets the languages are in separate
|
224 |
-
# files, and only the desired language has been downloaded
|
225 |
for id_, row in enumerate(f):
|
226 |
data = json.loads(row)
|
227 |
if data["quality"] < quality:
|
228 |
-
# The rest of this file contains too low quality data
|
|
|
229 |
break
|
230 |
yield id_, {
|
231 |
"lang": data["lang"],
|
232 |
"sent1": data["sent1"],
|
233 |
"sent2": data["sent2"],
|
234 |
-
"annot_score": 0.0,
|
235 |
"gem_id": data["gem_id"],
|
236 |
-
#"quality": data["quality"],
|
237 |
}
|
238 |
else:
|
|
|
|
|
|
|
|
|
|
|
239 |
keep_all = (split == "validation.full" or split == "test.full")
|
240 |
with open(filepath, encoding="utf-8") as f:
|
241 |
for id_, row in enumerate(f):
|
242 |
data = json.loads(row)
|
243 |
-
if data["lang"] == lang:
|
244 |
if keep_all or data["annot_score"] >= 3.0:
|
|
|
|
|
|
|
|
|
245 |
yield id_, {
|
246 |
"lang": data["lang"],
|
247 |
"sent1": data["sent1"],
|
248 |
"sent2": data["sent2"],
|
249 |
"annot_score": data["annot_score"],
|
250 |
"gem_id": data["gem_id"],
|
251 |
-
#"quality": 100,
|
252 |
}
|
253 |
|
|
|
106 |
"sent2": datasets.Value("string"),
|
107 |
"annot_score": datasets.Value("float"),
|
108 |
"gem_id": datasets.Value("string"),
|
|
|
109 |
}
|
110 |
)
|
111 |
|
|
|
129 |
|
130 |
def _split_generators(self, dl_manager):
|
131 |
"""Returns SplitGenerators."""
|
132 |
+
# This method is tasked with downloading/extracting the data
|
133 |
+
# and defining the splits depending on the configuration.
|
134 |
+
# Several configurations are possible (listed in
|
135 |
+
# BUILDER_CONFIGS), and the configuration selected by the user
|
136 |
+
# is in self.config.name, which consists of two fields
|
137 |
+
# separated by a period, containing the values of
|
138 |
+
# self.config.lang and self.config.quality.
|
139 |
|
140 |
+
# Select which file of the training data contains the matching data:
|
141 |
if self.config.quality < 70:
|
142 |
# We need to retrieve the largest training set file
|
143 |
# containing the full training set for the desired language
|
144 |
_URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang)
|
145 |
+
|
146 |
elif self.config.quality <= 95:
|
147 |
# We can do with a smaller version of the training set
|
148 |
# for the desired language
|
149 |
_URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang)
|
150 |
|
151 |
# Otherwise, if the desired quality is above 95, we do not
|
152 |
+
# download any training data, because there is no matching data.
|
153 |
+
# The validation and test sets are so small that we do not perform
|
154 |
+
# any filtering or optimization at this stage.
|
155 |
|
156 |
+
# dl_manager is a datasets.download.DownloadManager, which
|
157 |
+
# downloads and extracts the URLs
|
158 |
+
# (It can accept any type or nested list/dict and will give
|
159 |
+
# back the same structure with the url replaced with path to
|
160 |
+
# local files. By default the archives will be extracted and
|
161 |
+
# a path to a cached folder where they are extracted is
|
162 |
+
# returned instead of the archive.)
|
163 |
data_dir = dl_manager.download_and_extract(_URLs)
|
164 |
|
165 |
splits = [
|
|
|
205 |
),
|
206 |
]
|
207 |
|
208 |
+
# If the desired quality value is 100, no subset of the
|
209 |
+
# training set is good enough, and we only produce validation
|
210 |
+
# and test sets, in order to save space and time.
|
211 |
+
|
212 |
if self.config.quality <= 95:
|
213 |
+
# In this case there is matching training data, so we produce
|
214 |
+
# a train split.
|
215 |
splits.append(
|
216 |
datasets.SplitGenerator(
|
217 |
name=datasets.Split.TRAIN,
|
|
|
228 |
return splits
|
229 |
|
230 |
def _generate_examples(
|
231 |
+
self, lang, quality, filepath, split
|
232 |
+
# method parameters are unpacked from `gen_kwargs` as given in
|
233 |
+
# `_split_generators`
|
234 |
):
|
|
|
235 |
""" Yields examples as (key, example) tuples. """
|
236 |
+
# This method handles input defined in _split_generators to
|
237 |
+
# yield (key, example) tuples from the dataset.
|
238 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
239 |
+
|
240 |
if split == datasets.Split.TRAIN:
|
241 |
+
# Training sets are in compressed bz2 files.
|
242 |
+
# They contain a field "quality" missing from the validation and test sets.
|
243 |
+
# We also know that this file only contains the desired language,
|
244 |
+
# because for the training sets the languages are in separate
|
245 |
+
# files, and only the desired language has been downloaded.
|
246 |
with bz2.open(filepath, "rt", encoding="utf-8") as f:
|
|
|
|
|
|
|
247 |
for id_, row in enumerate(f):
|
248 |
data = json.loads(row)
|
249 |
if data["quality"] < quality:
|
250 |
+
# The rest of this file contains too low quality data,
|
251 |
+
# because the data is sorted best first
|
252 |
break
|
253 |
yield id_, {
|
254 |
"lang": data["lang"],
|
255 |
"sent1": data["sent1"],
|
256 |
"sent2": data["sent2"],
|
257 |
+
"annot_score": 0.0, # means there is no annotation
|
258 |
"gem_id": data["gem_id"],
|
|
|
259 |
}
|
260 |
else:
|
261 |
+
# The validation and test sets are in jsonl files.
|
262 |
+
# They contain the fields "lang" and "quality" that we filter on.
|
263 |
+
# If we ask for the full sets, we will keep all data entries, also
|
264 |
+
# the sentence pairs that were not considered paraphrases by the
|
265 |
+
# annotators:
|
266 |
keep_all = (split == "validation.full" or split == "test.full")
|
267 |
with open(filepath, encoding="utf-8") as f:
|
268 |
for id_, row in enumerate(f):
|
269 |
data = json.loads(row)
|
270 |
+
if data["lang"] == lang: # only keep desired language
|
271 |
if keep_all or data["annot_score"] >= 3.0:
|
272 |
+
# for full sets keep all;
|
273 |
+
# for standard test and validation sets, keep only
|
274 |
+
# the paraphrases (annot_score >= 3.0 means "good
|
275 |
+
# or mostly good example of paraphrases")
|
276 |
yield id_, {
|
277 |
"lang": data["lang"],
|
278 |
"sent1": data["sent1"],
|
279 |
"sent2": data["sent2"],
|
280 |
"annot_score": data["annot_score"],
|
281 |
"gem_id": data["gem_id"],
|
|
|
282 |
}
|
283 |
|