Datasets:
wmt
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
ff94e4b
1 Parent(s): a382331

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (afe431e7ee8b199f1f103b2bd5f164e94e9c4ce7)
- Delete loading script (a601a6e9d6d4309d023319e11a9757f0c3ce2204)

README.md CHANGED
@@ -20,6 +20,7 @@ task_categories:
20
  task_ids: []
21
  pretty_name: WMT20 - MultiLingual Quality Estimation (MLQE) Task3
22
  dataset_info:
 
23
  features:
24
  - name: document_id
25
  dtype: string
@@ -169,19 +170,28 @@ dataset_info:
169
  sequence: int32
170
  - name: total_words
171
  dtype: int32
172
- config_name: plain_text
173
  splits:
174
  - name: train
175
- num_bytes: 10762355
176
  num_examples: 1448
177
  - name: test
178
- num_bytes: 745260
179
  num_examples: 180
180
  - name: validation
181
- num_bytes: 1646596
182
  num_examples: 200
183
- download_size: 3534634
184
- dataset_size: 13154211
 
 
 
 
 
 
 
 
 
 
185
  ---
186
 
187
  # Dataset Card for WMT20 - MultiLingual Quality Estimation (MLQE) Task3
 
20
  task_ids: []
21
  pretty_name: WMT20 - MultiLingual Quality Estimation (MLQE) Task3
22
  dataset_info:
23
+ config_name: plain_text
24
  features:
25
  - name: document_id
26
  dtype: string
 
170
  sequence: int32
171
  - name: total_words
172
  dtype: int32
 
173
  splits:
174
  - name: train
175
+ num_bytes: 10762231
176
  num_examples: 1448
177
  - name: test
178
+ num_bytes: 743088
179
  num_examples: 180
180
  - name: validation
181
+ num_bytes: 1646472
182
  num_examples: 200
183
+ download_size: 4660293
184
+ dataset_size: 13151791
185
+ configs:
186
+ - config_name: plain_text
187
+ data_files:
188
+ - split: train
189
+ path: plain_text/train-*
190
+ - split: test
191
+ path: plain_text/test-*
192
+ - split: validation
193
+ path: plain_text/validation-*
194
+ default: true
195
  ---
196
 
197
  # Dataset Card for WMT20 - MultiLingual Quality Estimation (MLQE) Task3
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceb58cb67321fd97e53e4ac2307e39c55af907dbb5ff045d0b3302cf1aef8d1f
3
+ size 361405
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:824896b2696f4c77727ab164cb8c2a0819d4fa87f9bfd656514c840856dabc92
3
+ size 3729337
plain_text/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee30fbe88c3450ba904686bc6faadf821c8a7d5e621d027f19659e70a721804e
3
+ size 569551
wmt20_mlqe_task3.py DELETED
@@ -1,280 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """WMT MLQE Shared task 3."""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """
25
- Not available.
26
- """
27
-
28
- _DESCRIPTION = """\
29
- This shared task (part of WMT20) will build on its previous editions
30
- to further examine automatic methods for estimating the quality
31
- of neural machine translation output at run-time, without relying
32
- on reference translations. As in previous years, we cover estimation
33
- at various levels. Important elements introduced this year include: a new
34
- task where sentences are annotated with Direct Assessment (DA)
35
- scores instead of labels based on post-editing; a new multilingual
36
- sentence-level dataset mainly from Wikipedia articles, where the
37
- source articles can be retrieved for document-wide context; the
38
- availability of NMT models to explore system-internal information for the task.
39
-
40
- The goal of this task 3 is to predict document-level quality scores as well as fine-grained annotations.
41
- """
42
-
43
- _HOMEPAGE = "http://www.statmt.org/wmt20/quality-estimation-task.html"
44
-
45
- _LICENSE = "Unknown"
46
-
47
- _URLs = {
48
- "train+dev": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-task3-enfr-traindev.tar.gz",
49
- "test": "https://github.com/deep-spin/deep-spin.github.io/raw/master/docs/data/wmt2020_qe/qe-enfr-blindtest.tar.gz",
50
- }
51
-
52
-
53
- _ANNOTATION_CATEGORIES = [
54
- "Addition",
55
- "Agreement",
56
- "Ambiguous Translation",
57
- "Capitalization",
58
- "Character Encoding",
59
- "Company Terminology",
60
- "Date/Time",
61
- "Diacritics",
62
- "Duplication",
63
- "False Friend",
64
- "Grammatical Register",
65
- "Hyphenation",
66
- "Inconsistency",
67
- "Lexical Register",
68
- "Lexical Selection",
69
- "Named Entity",
70
- "Number",
71
- "Omitted Auxiliary Verb",
72
- "Omitted Conjunction",
73
- "Omitted Determiner",
74
- "Omitted Preposition",
75
- "Omitted Pronoun",
76
- "Orthography",
77
- "Other POS Omitted",
78
- "Over-translation",
79
- "Overly Literal",
80
- "POS",
81
- "Punctuation",
82
- "Shouldn't Have Been Translated",
83
- "Shouldn't have been translated",
84
- "Spelling",
85
- "Tense/Mood/Aspect",
86
- "Under-translation",
87
- "Unidiomatic",
88
- "Unintelligible",
89
- "Unit Conversion",
90
- "Untranslated",
91
- "Whitespace",
92
- "Word Order",
93
- "Wrong Auxiliary Verb",
94
- "Wrong Conjunction",
95
- "Wrong Determiner",
96
- "Wrong Language Variety",
97
- "Wrong Preposition",
98
- "Wrong Pronoun",
99
- ]
100
-
101
-
102
- class Wmt20MlqeTask3(datasets.GeneratorBasedBuilder):
103
- """WMT MLQE Shared task 3."""
104
-
105
- BUILDER_CONFIGS = [
106
- datasets.BuilderConfig(
107
- name="plain_text",
108
- version=datasets.Version("1.1.0"),
109
- description="Plain text",
110
- )
111
- ]
112
-
113
- def _info(self):
114
- features = datasets.Features(
115
- {
116
- "document_id": datasets.Value("string"),
117
- "source_segments": datasets.Sequence(datasets.Value("string")),
118
- "source_tokenized": datasets.Sequence(datasets.Value("string")),
119
- "mt_segments": datasets.Sequence(datasets.Value("string")),
120
- "mt_tokenized": datasets.Sequence(datasets.Value("string")),
121
- "annotations": datasets.Sequence(
122
- {
123
- "segment_id": datasets.Sequence(datasets.Value("int32")),
124
- "annotation_start": datasets.Sequence(datasets.Value("int32")),
125
- "annotation_length": datasets.Sequence(datasets.Value("int32")),
126
- "severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
127
- "severity_weight": datasets.Value("float32"),
128
- "category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
129
- }
130
- ),
131
- "token_annotations": datasets.Sequence(
132
- {
133
- "segment_id": datasets.Sequence(datasets.Value("int32")),
134
- "first_token": datasets.Sequence(datasets.Value("int32")),
135
- "last_token": datasets.Sequence(datasets.Value("int32")),
136
- "token_after_gap": datasets.Sequence(datasets.Value("int32")),
137
- "severity": datasets.ClassLabel(names=["minor", "major", "critical"]),
138
- "category": datasets.ClassLabel(names=_ANNOTATION_CATEGORIES),
139
- }
140
- ),
141
- "token_index": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("int32")))),
142
- "total_words": datasets.Value("int32"),
143
- }
144
- )
145
-
146
- return datasets.DatasetInfo(
147
- description=_DESCRIPTION,
148
- features=features,
149
- supervised_keys=None,
150
- homepage=_HOMEPAGE,
151
- license=_LICENSE,
152
- citation=_CITATION,
153
- )
154
-
155
- def _split_generators(self, dl_manager):
156
- """Returns SplitGenerators."""
157
- downloaded_files = dl_manager.download(_URLs)
158
- return [
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TRAIN,
161
- gen_kwargs={
162
- "main_dir": "task3/train",
163
- "split": "train",
164
- "files": dl_manager.iter_archive(downloaded_files["train+dev"]),
165
- },
166
- ),
167
- datasets.SplitGenerator(
168
- name=datasets.Split.TEST,
169
- gen_kwargs={
170
- "main_dir": "test-blind",
171
- "split": "test",
172
- "files": dl_manager.iter_archive(downloaded_files["test"]),
173
- },
174
- ),
175
- datasets.SplitGenerator(
176
- name=datasets.Split.VALIDATION,
177
- gen_kwargs={
178
- "main_dir": "task3/dev",
179
- "split": "dev",
180
- "files": dl_manager.iter_archive(downloaded_files["train+dev"]),
181
- },
182
- ),
183
- ]
184
-
185
- def _generate_examples(self, main_dir, split, files):
186
- """Yields examples."""
187
-
188
- prev_folder = None
189
- source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
190
- token_index, total_words, annotations, token_annotations = [], [], [], []
191
- for path, f in files:
192
- if path.startswith(main_dir):
193
- dir_name = path.split("/")[main_dir.count("/") + 1]
194
- folder = main_dir + "/" + dir_name
195
-
196
- if prev_folder is not None and prev_folder != folder:
197
- yield prev_folder, {
198
- "document_id": os.path.basename(prev_folder),
199
- "source_segments": source_segments,
200
- "source_tokenized": source_tokenized,
201
- "mt_segments": mt_segments,
202
- "mt_tokenized": mt_tokenized,
203
- "annotations": annotations,
204
- "token_annotations": token_annotations,
205
- "token_index": token_index,
206
- "total_words": total_words,
207
- }
208
- source_segments, source_tokenized, mt_segments, mt_tokenized = [None] * 4
209
- token_index, total_words, annotations, token_annotations = [], [], [], []
210
-
211
- prev_folder = folder
212
-
213
- source_segments_path = "/".join([folder, "source.segments"])
214
- source_tokenized_path = "/".join([folder, "source.tokenized"])
215
- mt_segments_path = "/".join([folder, "mt.segments"])
216
- mt_tokenized_path = "/".join([folder, "mt.tokenized"])
217
- total_words_path = "/".join([folder, "total_words"])
218
- token_index_path = "/".join([folder, "token_index"])
219
-
220
- if path == source_segments_path:
221
- source_segments = f.read().decode("utf-8").splitlines()
222
- elif path == source_tokenized_path:
223
- source_tokenized = f.read().decode("utf-8").splitlines()
224
- elif path == mt_segments_path:
225
- mt_segments = f.read().decode("utf-8").splitlines()
226
- elif path == mt_tokenized_path:
227
- mt_tokenized = f.read().decode("utf-8").splitlines()
228
- elif path == total_words_path:
229
- total_words = f.read().decode("utf-8").splitlines()[0]
230
- elif path == token_index_path:
231
- token_index = [
232
- [idx.split(" ") for idx in line.split("\t")]
233
- for line in f.read().decode("utf-8").splitlines()
234
- if line != ""
235
- ]
236
-
237
- if split in ["train", "dev"]:
238
- annotations_path = "/".join([folder, "annotations.tsv"])
239
- token_annotations_path = "/".join([folder, "token_annotations.tsv"])
240
-
241
- if path == annotations_path:
242
- lines = (line.decode("utf-8") for line in f)
243
- reader = csv.DictReader(lines, delimiter="\t")
244
- annotations = [
245
- {
246
- "segment_id": row["segment_id"].split(" "),
247
- "annotation_start": row["annotation_start"].split(" "),
248
- "annotation_length": row["annotation_length"].split(" "),
249
- "severity": row["severity"],
250
- "severity_weight": row["severity_weight"],
251
- "category": row["category"],
252
- }
253
- for row in reader
254
- ]
255
- elif path == token_annotations_path:
256
- lines = (line.decode("utf-8") for line in f)
257
- reader = csv.DictReader(lines, delimiter="\t")
258
- token_annotations = [
259
- {
260
- "segment_id": row["segment_id"].split(" "),
261
- "first_token": row["first_token"].replace("-", "-1").split(" "),
262
- "last_token": row["last_token"].replace("-", "-1").split(" "),
263
- "token_after_gap": row["token_after_gap"].replace("-", "-1").split(" "),
264
- "severity": row["severity"],
265
- "category": row["category"],
266
- }
267
- for row in reader
268
- ]
269
- if prev_folder is not None:
270
- yield prev_folder, {
271
- "document_id": os.path.basename(prev_folder),
272
- "source_segments": source_segments,
273
- "source_tokenized": source_tokenized,
274
- "mt_segments": mt_segments,
275
- "mt_tokenized": mt_tokenized,
276
- "annotations": annotations,
277
- "token_annotations": token_annotations,
278
- "token_index": token_index,
279
- "total_words": total_words,
280
- }