system HF staff commited on
Commit
0ca27d3
1 Parent(s): 747ccce

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. wmt20_mlqe_task3.py +90 -57
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  - machine-generated
1
  ---
2
+ pretty_name: WMT20 - MultiLingual Quality Estimation (MLQE) Task3
3
  annotations_creators:
4
  - expert-generated
5
  - machine-generated
wmt20_mlqe_task3.py CHANGED
@@ -16,7 +16,6 @@
16
 
17
 
18
  import csv
19
- import glob
20
  import os
21
 
22
  import datasets
@@ -155,87 +154,121 @@ class Wmt20MlqeTask3(datasets.GeneratorBasedBuilder):
155
 
156
  def _split_generators(self, dl_manager):
157
  """Returns SplitGenerators."""
158
- data_dir = dl_manager.download_and_extract(_URLs)
159
  return [
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TRAIN,
162
  gen_kwargs={
163
- "filepath": os.path.join(data_dir["train+dev"], "task3", "train"),
164
  "split": "train",
 
165
  },
166
  ),
167
  datasets.SplitGenerator(
168
  name=datasets.Split.TEST,
169
  gen_kwargs={
170
- "filepath": os.path.join(data_dir["test"], "test-blind"),
171
  "split": "test",
 
172
  },
173
  ),
174
  datasets.SplitGenerator(
175
  name=datasets.Split.VALIDATION,
176
  gen_kwargs={
177
- "filepath": os.path.join(data_dir["train+dev"], "task3", "dev"),
178
  "split": "dev",
 
179
  },
180
  ),
181
  ]
182
 
183
- def _generate_examples(self, filepath, split):
184
  """Yields examples."""
185
 
186
- def open_and_read(fp):
187
- with open(fp, encoding="utf-8") as f:
188
- return f.read().splitlines()
189
-
190
- for id_, folder in enumerate(sorted(glob.glob(os.path.join(filepath, "*")))):
191
- source_segments = open_and_read(os.path.join(folder, "source.segments"))
192
- source_tokenized = open_and_read(os.path.join(folder, "source.tokenized"))
193
- mt_segments = open_and_read(os.path.join(folder, "mt.segments"))
194
- mt_tokenized = open_and_read(os.path.join(folder, "mt.tokenized"))
195
-
196
- if split in ["train", "dev"] and not os.path.exists(os.path.join(folder, "token_index")):
197
- token_index = []
198
- else:
199
- token_index = [
200
- [idx.split(" ") for idx in line.split("\t")]
201
- for line in open_and_read(os.path.join(folder, "token_index"))
202
- if line != ""
203
- ]
204
- total_words = open_and_read(os.path.join(folder, "total_words"))[0]
205
-
206
- if split in ["train", "dev"]:
207
- with open(os.path.join(folder, "annotations.tsv"), encoding="utf-8") as f:
208
- reader = csv.DictReader(f, delimiter="\t")
209
- annotations = [
210
- {
211
- "segment_id": row["segment_id"].split(" "),
212
- "annotation_start": row["annotation_start"].split(" "),
213
- "annotation_length": row["annotation_length"].split(" "),
214
- "severity": row["severity"],
215
- "severity_weight": row["severity_weight"],
216
- "category": row["category"],
217
- }
218
- for row in reader
219
- ]
220
- with open(os.path.join(folder, "token_annotations.tsv"), encoding="utf-8") as f:
221
- reader = csv.DictReader(f, delimiter="\t")
222
- token_annotations = [
223
- {
224
- "segment_id": row["segment_id"].split(" "),
225
- "first_token": row["first_token"].replace("-", "-1").split(" "),
226
- "last_token": row["last_token"].replace("-", "-1").split(" "),
227
- "token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
228
- "severity": row["severity"],
229
- "category": row["category"],
230
- }
231
- for row in reader
 
232
  ]
233
- else:
234
- annotations = []
235
- token_annotations = []
236
 
237
- yield id_, {
238
- "document_id": os.path.basename(folder),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  "source_segments": source_segments,
240
  "source_tokenized": source_tokenized,
241
  "mt_segments": mt_segments,
16
 
17
 
18
  import csv
 
19
  import os
20
 
21
  import datasets
154
 
155
  def _split_generators(self, dl_manager):
156
  """Returns SplitGenerators."""
157
+ downloaded_files = dl_manager.download(_URLs)
158
  return [
159
  datasets.SplitGenerator(
160
  name=datasets.Split.TRAIN,
161
  gen_kwargs={
162
+ "main_dir": "task3/train",
163
  "split": "train",
164
+ "files": dl_manager.iter_archive(downloaded_files["train+dev"]),
165
  },
166
  ),
167
  datasets.SplitGenerator(
168
  name=datasets.Split.TEST,
169
  gen_kwargs={
170
+ "main_dir": "test-blind",
171
  "split": "test",
172
+ "files": dl_manager.iter_archive(downloaded_files["test"]),
173
  },
174
  ),
175
  datasets.SplitGenerator(
176
  name=datasets.Split.VALIDATION,
177
  gen_kwargs={
178
+ "main_dir": "task3/dev",
179
  "split": "dev",
180
+ "files": dl_manager.iter_archive(downloaded_files["train+dev"]),
181
  },
182
  ),
183
  ]
184
 
185
+ def _generate_examples(self, main_dir, split, files):
186
  """Yields examples."""
187
 
188
+ prev_folder = None
189
+ source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
190
+ token_index, total_words, annotations, token_annotations = [], [], [], []
191
+ for path, f in files:
192
+ if path.startswith(main_dir):
193
+ dir_name = path.split("/")[main_dir.count("/") + 1]
194
+ folder = main_dir + "/" + dir_name
195
+
196
+ if prev_folder is not None and prev_folder != folder:
197
+ yield prev_folder, {
198
+ "document_id": os.path.basename(prev_folder),
199
+ "source_segments": source_segments,
200
+ "source_tokenized": source_tokenized,
201
+ "mt_segments": mt_segments,
202
+ "mt_tokenized": mt_tokenized,
203
+ "annotations": annotations,
204
+ "token_annotations": token_annotations,
205
+ "token_index": token_index,
206
+ "total_words": total_words,
207
+ }
208
+ source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
209
+ token_index, total_words, annotations, token_annotations = [], [], [], []
210
+
211
+ prev_folder = folder
212
+
213
+ source_segments_path = "/".join([folder, "source.segments"])
214
+ source_tokenized_path = "/".join([folder, "source.tokenized"])
215
+ mt_segments_path = "/".join([folder, "mt.segments"])
216
+ mt_tokenized_path = "/".join([folder, "mt.tokenized"])
217
+ total_words_path = "/".join([folder, "total_words"])
218
+ token_index_path = "/".join([folder, "token_index"])
219
+
220
+ if path == source_segments_path:
221
+ source_segments = f.read().decode("utf-8").splitlines()
222
+ elif path == source_tokenized_path:
223
+ source_tokenized = f.read().decode("utf-8").splitlines()
224
+ elif path == mt_segments_path:
225
+ mt_segments = f.read().decode("utf-8").splitlines()
226
+ elif path == mt_tokenized_path:
227
+ mt_tokenized = f.read().decode("utf-8").splitlines()
228
+ elif path == total_words_path:
229
+ total_words = f.read().decode("utf-8").splitlines()[0]
230
+ elif path == token_index_path:
231
+ token_index = [
232
+ [idx.split(" ") for idx in line.split("\t")]
233
+ for line in f.read().decode("utf-8").splitlines()
234
+ if line != ""
235
  ]
 
 
 
236
 
237
+ if split in ["train", "dev"]:
238
+ annotations_path = "/".join([folder, "annotations.tsv"])
239
+ token_annotations_path = "/".join([folder, "token_annotations.tsv"])
240
+
241
+ if path == annotations_path:
242
+ lines = (line.decode("utf-8") for line in f)
243
+ reader = csv.DictReader(lines, delimiter="\t")
244
+ annotations = [
245
+ {
246
+ "segment_id": row["segment_id"].split(" "),
247
+ "annotation_start": row["annotation_start"].split(" "),
248
+ "annotation_length": row["annotation_length"].split(" "),
249
+ "severity": row["severity"],
250
+ "severity_weight": row["severity_weight"],
251
+ "category": row["category"],
252
+ }
253
+ for row in reader
254
+ ]
255
+ elif path == token_annotations_path:
256
+ lines = (line.decode("utf-8") for line in f)
257
+ reader = csv.DictReader(lines, delimiter="\t")
258
+ token_annotations = [
259
+ {
260
+ "segment_id": row["segment_id"].split(" "),
261
+ "first_token": row["first_token"].replace("-", "-1").split(" "),
262
+ "last_token": row["last_token"].replace("-", "-1").split(" "),
263
+ "token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
264
+ "severity": row["severity"],
265
+ "category": row["category"],
266
+ }
267
+ for row in reader
268
+ ]
269
+ if prev_folder is not None:
270
+ yield prev_folder, {
271
+ "document_id": os.path.basename(prev_folder),
272
  "source_segments": source_segments,
273
  "source_tokenized": source_tokenized,
274
  "mt_segments": mt_segments,