Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
c625ad3
1 Parent(s): 49fa529

Upload mlqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mlqa.py +247 -0
mlqa.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = r"""\
13
+ @article{lewis2019mlqa,
14
+ author={Lewis, Patrick and O\{g}uz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
15
+ title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
16
+ journal={arXiv preprint arXiv:1910.07475},
17
+ year={2019}
18
+ }
19
+ """
20
+
21
+ _DATASETNAME = "mlqa"
22
+
23
+ _DESCRIPTION = """\
24
+ MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
25
+ MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
26
+ German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
27
+ 4 different languages on average.
28
+ """
29
+
30
+ _HOMEPAGE = "https://github.com/facebookresearch/MLQA"
31
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
32
+ _LANGUAGES = ["vie"]
33
+ _URL = "https://dl.fbaipublicfiles.com/MLQA/"
34
+ _DEV_TEST_URL = "MLQA_V1.zip"
35
+ _TRANSLATE_TEST_URL = "mlqa-translate-test.tar.gz"
36
+ _TRANSLATE_TRAIN_URL = "mlqa-translate-train.tar.gz"
37
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
38
+
39
+ _SOURCE_VERSION = "1.0.0"
40
+ _SEACROWD_VERSION = "2024.06.20"
41
+
42
+ _LOCAL = False
43
+
44
+
45
+ class MLQADataset(datasets.GeneratorBasedBuilder):
46
+ """
47
+ MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
48
+ MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
49
+ German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
50
+ 4 different languages on average.
51
+ """
52
+
53
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
54
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
55
+
56
+ subsets = [
57
+ "mlqa-translate-test.vi",
58
+ "mlqa-translate-train.vi",
59
+ "mlqa.vi.ar",
60
+ "mlqa.vi.de",
61
+ "mlqa.vi.zh",
62
+ "mlqa.vi.en",
63
+ "mlqa.vi.es",
64
+ "mlqa.vi.hi",
65
+ "mlqa.vi.vi",
66
+ "mlqa.ar.vi",
67
+ "mlqa.de.vi",
68
+ "mlqa.zh.vi",
69
+ "mlqa.en.vi",
70
+ "mlqa.es.vi",
71
+ "mlqa.hi.vi",
72
+ ]
73
+
74
+ BUILDER_CONFIGS = [
75
+ SEACrowdConfig(
76
+ name="{sub}_source".format(sub=subset),
77
+ version=datasets.Version(_SOURCE_VERSION),
78
+ description="{sub} source schema".format(sub=subset),
79
+ schema="source",
80
+ subset_id="{sub}".format(sub=subset),
81
+ )
82
+ for subset in subsets
83
+ ] + [
84
+ SEACrowdConfig(
85
+ name="{sub}_seacrowd_qa".format(sub=subset),
86
+ version=datasets.Version(_SEACROWD_VERSION),
87
+ description="{sub} SEACrowd schema".format(sub=subset),
88
+ schema="seacrowd_qa",
89
+ subset_id="{sub}".format(sub=subset),
90
+ )
91
+ for subset in subsets
92
+ ]
93
+
94
+ DEFAULT_CONFIG_NAME = "mlqa.vi.vi_source"
95
+
96
+ def _info(self) -> datasets.DatasetInfo:
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {"context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.Features({"answer_start": [datasets.Value("int64")], "text": [datasets.Value("string")]}), "id": datasets.Value("string")}
100
+ )
101
+ elif self.config.schema == "seacrowd_qa":
102
+ features = schemas.qa_features
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
113
+ name_split = self.config.name.split("_")
114
+ url = ""
115
+ data_path = ""
116
+
117
+ if name_split[0].startswith("mlqa-translate-train"):
118
+ config_name, lang = name_split[0].split(".")
119
+ url = _URL + _TRANSLATE_TRAIN_URL
120
+ data_path = dl_manager.download(url)
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": f"{config_name}/{lang}_squad-translate-train-train-v1.1.json",
127
+ "files": dl_manager.iter_archive(data_path),
128
+ "split": "train",
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ gen_kwargs={
134
+ "filepath": f"{config_name}/{lang}_squad-translate-train-dev-v1.1.json",
135
+ "files": dl_manager.iter_archive(data_path),
136
+ "split": "test",
137
+ },
138
+ ),
139
+ ]
140
+
141
+ elif name_split[0].startswith("mlqa-translate-test"):
142
+ config_name, lang = name_split[0].split(".")
143
+ url = _URL + _TRANSLATE_TEST_URL
144
+ data_path = dl_manager.download(url)
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TEST,
148
+ gen_kwargs={
149
+ "filepath": f"{config_name}/translate-test-context-{lang}-question-{lang}.json",
150
+ "files": dl_manager.iter_archive(data_path),
151
+ "split": "test",
152
+ },
153
+ ),
154
+ ]
155
+
156
+ elif name_split[0].startswith("mlqa."):
157
+ url = _URL + _DEV_TEST_URL
158
+ data_path = dl_manager.download_and_extract(url)
159
+ ctx_lang, qst_lang = name_split[0].split(".")[1:]
160
+ return [
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.VALIDATION,
163
+ gen_kwargs={
164
+ "filepath": os.path.join(
165
+ os.path.join(data_path, "MLQA_V1/dev"),
166
+ f"dev-context-{ctx_lang}-question-{qst_lang}.json",
167
+ ),
168
+ "split": "dev",
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TEST,
173
+ gen_kwargs={
174
+ "filepath": os.path.join(
175
+ os.path.join(data_path, "MLQA_V1/test"),
176
+ f"test-context-{ctx_lang}-question-{qst_lang}.json",
177
+ ),
178
+ "split": "test",
179
+ },
180
+ ),
181
+ ]
182
+ elif name_split[0] == "mlqa":
183
+ url = _URL + _DEV_TEST_URL
184
+ data_path = dl_manager.download_and_extract(url)
185
+ ctx_lang = qst_lang = "vi"
186
+ return [
187
+ datasets.SplitGenerator(
188
+ name=datasets.Split.VALIDATION,
189
+ gen_kwargs={
190
+ "filepath": os.path.join(
191
+ os.path.join(data_path, "MLQA_V1/dev"),
192
+ f"dev-context-{ctx_lang}-question-{qst_lang}.json",
193
+ ),
194
+ "split": "dev",
195
+ },
196
+ ),
197
+ datasets.SplitGenerator(
198
+ name=datasets.Split.TEST,
199
+ gen_kwargs={
200
+ "filepath": os.path.join(
201
+ os.path.join(data_path, "MLQA_V1/test"),
202
+ f"test-context-{ctx_lang}-question-{qst_lang}.json",
203
+ ),
204
+ "split": "test",
205
+ },
206
+ ),
207
+ ]
208
+
209
+ def _generate_examples(self, filepath: Path, split: str, files=None) -> Tuple[int, Dict]:
210
+ is_config_ok = True
211
+ if self.config.name.startswith("mlqa-translate"):
212
+ for path, f in files:
213
+ if path == filepath:
214
+ data = json.loads(f.read().decode("utf-8"))
215
+ break
216
+
217
+ elif self.config.schema == "source" or self.config.schema == "seacrowd_qa":
218
+ with open(filepath, encoding="utf-8") as f:
219
+ data = json.load(f)
220
+ else:
221
+ is_config_ok = False
222
+ raise ValueError(f"Invalid config: {self.config.name}")
223
+
224
+ if is_config_ok:
225
+ count = 0
226
+ for examples in data["data"]:
227
+ for example in examples["paragraphs"]:
228
+ context = example["context"]
229
+ for qa in example["qas"]:
230
+ question = qa["question"]
231
+ id_ = qa["id"]
232
+ answers = qa["answers"]
233
+ answers_start = [answer["answer_start"] for answer in answers]
234
+ answers_text = [answer["text"] for answer in answers]
235
+
236
+ if self.config.schema == "source":
237
+ yield count, {
238
+ "context": context,
239
+ "question": question,
240
+ "answers": {"answer_start": answers_start, "text": answers_text},
241
+ "id": id_,
242
+ }
243
+ count += 1
244
+
245
+ elif self.config.schema == "seacrowd_qa":
246
+ yield count, {"question_id": id_, "context": context, "question": question, "answer": {"answer_start": answers_start[0], "text": answers_text[0]}, "id": id_, "choices": [], "type": "extractive", "document_id": count, "meta":{}}
247
+ count += 1