Datasets:

Languages:
Indonesian
ArXiv:
holylovenia commited on
Commit
73271e7
·
1 Parent(s): 4d2084f

Upload idk_mrc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. idk_mrc.py +231 -0
idk_mrc.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ import re
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from nusacrowd.utils import schemas
25
+ from nusacrowd.utils.configs import NusantaraConfig
26
+ from nusacrowd.utils.constants import Tasks
27
+
28
+ _CITATION = """\
29
+ @misc{putri2022idk,
30
+ doi = {10.48550/ARXIV.2210.13778},
31
+ url = {https://arxiv.org/abs/2210.13778},
32
+ author = {Putri, Rifki Afina and Oh, Alice},
33
+ title = {IDK-MRC: Unanswerable Questions for Indonesian Machine Reading Comprehension},
34
+ publisher = {arXiv},
35
+ year = {2022}
36
+ }
37
+
38
+ """
39
+
40
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
41
+ _LOCAL = False
42
+
43
+ _ALL_DATASETS = ["idk_mrc", "trans_squad", "tydiqa", "model_gen", "human_filt"]
44
+ _DATASETNAME = _ALL_DATASETS[0] # idk_mrc
45
+ _BASELINES = _ALL_DATASETS[1:] # trans_squad, tydiqa, model_gen, human_filt
46
+
47
+ _DESCRIPTION = """\
48
+ I(n)dontKnow-MRC (IDK-MRC) is an Indonesian Machine Reading Comprehension dataset that covers
49
+ answerable and unanswerable questions. Based on the combination of the existing answerable questions in TyDiQA,
50
+ the new unanswerable question in IDK-MRC is generated using a question generation model and human-written question.
51
+ Each paragraph in the dataset has a set of answerable and unanswerable questions with the corresponding answer.
52
+
53
+ Besides IDK-MRC (idk_mrc) dataset, several baseline datasets also provided:
54
+ 1. Trans SQuAD (trans_squad): machine translated SQuAD 2.0 (Muis and Purwarianti, 2020)
55
+ 2. TyDiQA (tydiqa): Indonesian answerable questions set from the TyDiQA-GoldP (Clark et al., 2020)
56
+ 3. Model Gen (model_gen): TyDiQA + the unanswerable questions output from the question generation model
57
+ 4. Human Filt (human_filt): Model Gen dataset that has been filtered by human annotator
58
+ """
59
+
60
+ _HOMEPAGE = "https://github.com/rifkiaputri/IDK-MRC"
61
+
62
+ _LICENSE = "CC-BY-SA 4.0"
63
+
64
+ _URLS = {
65
+ _DATASETNAME: {
66
+ "test": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/idk_mrc/test.json",
67
+ "train": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/idk_mrc/train.json",
68
+ "validation": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/idk_mrc/valid.json",
69
+ },
70
+ "baseline": {
71
+ "test": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/baseline/{name}/test.json",
72
+ "train": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/baseline/{name}/train.json",
73
+ "validation": "https://raw.githubusercontent.com/rifkiaputri/IDK-MRC/master/dataset/baseline/{name}/valid.json",
74
+ },
75
+ }
76
+
77
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
78
+
79
+ _SOURCE_VERSION = "1.0.0"
80
+
81
+ _NUSANTARA_VERSION = "1.0.0"
82
+
83
+
84
+ def nusantara_config_constructor(name, schema, version):
85
+ """
86
+ Construct NusantaraConfig with idk_mrc_{schema} format for the main dataset &
87
+ idk_mrc_baseline_{name}_{schema} format for the baseline datasets.
88
+ Suported dataset names: see _ALL_DATASETS
89
+ """
90
+ if schema != "source" and schema != "nusantara_qa":
91
+ raise ValueError(f"Invalid schema: {schema}")
92
+
93
+ if name not in _ALL_DATASETS:
94
+ raise ValueError(f"Invalid dataset name: {name}")
95
+
96
+ if name == "idk_mrc":
97
+ return NusantaraConfig(
98
+ name="idk_mrc_{schema}".format(schema=schema),
99
+ version=datasets.Version(version),
100
+ description="IDK-MRC with {schema} schema".format(schema=schema),
101
+ schema=schema,
102
+ subset_id="idk_mrc",
103
+ )
104
+ else:
105
+ return NusantaraConfig(
106
+ name="idk_mrc_baseline_{name}_{schema}".format(name=name, schema=schema),
107
+ version=datasets.Version(version),
108
+ description="IDK-MRC baseline ({name}) with {schema} schema".format(name=name, schema=schema),
109
+ schema=schema,
110
+ subset_id="idk_mrc",
111
+ )
112
+
113
+
114
+ class IdkMrc(datasets.GeneratorBasedBuilder):
115
+ """IDK-MRC is an Indonesian MRC dataset that covers answerable and unanswerable questions"""
116
+
117
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
118
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
119
+
120
+ BUILDER_CONFIGS = [
121
+ nusantara_config_constructor(name, schema, version)
122
+ for name in _ALL_DATASETS for schema, version in zip(["source", "nusantara_qa"], [_SOURCE_VERSION, _NUSANTARA_VERSION])
123
+ ]
124
+
125
+ DEFAULT_CONFIG_NAME = "idk_mrc_source"
126
+
127
+ def _info(self) -> datasets.DatasetInfo:
128
+ if self.config.schema == "source":
129
+ features = datasets.Features(
130
+ {
131
+ "context": datasets.Value("string"),
132
+ "qas": [
133
+ {
134
+ "id": datasets.Value("string"),
135
+ "is_impossible": datasets.Value("bool"),
136
+ "question": datasets.Value("string"),
137
+ "answers": [
138
+ {
139
+ "text": datasets.Value("string"),
140
+ "answer_start": datasets.Value("int64")
141
+ }
142
+ ]
143
+ }
144
+ ],
145
+ }
146
+ )
147
+
148
+ elif self.config.schema == "nusantara_qa":
149
+ features = schemas.qa_features
150
+
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=_LICENSE,
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
160
+ """Returns SplitGenerators."""
161
+ if self.config.name == "idk_mrc_source" or self.config.name == "idk_mrc_nusantara_qa":
162
+ data_name = "idk_mrc"
163
+ train_data_path = dl_manager.download_and_extract(_URLS[_DATASETNAME]["train"])
164
+ validation_data_path = dl_manager.download_and_extract(_URLS[_DATASETNAME]["validation"])
165
+ test_data_path = dl_manager.download_and_extract(_URLS[_DATASETNAME]["test"])
166
+ else:
167
+ try:
168
+ data_name = re.search("baseline_(.+?)_(source|nusantara_qa)", self.config.name).group(1)
169
+ except AttributeError:
170
+ raise ValueError(f"Invalid config name: {self.config.name}")
171
+
172
+ if data_name not in _BASELINES:
173
+ raise ValueError(f"Invalid baseline dataset name: {data_name}")
174
+
175
+ train_data_path = dl_manager.download_and_extract(_URLS["baseline"]["train"].format(name=data_name))
176
+ validation_data_path = dl_manager.download_and_extract(_URLS["baseline"]["validation"].format(name=data_name))
177
+ test_data_path = dl_manager.download_and_extract(_URLS["baseline"]["test"].format(name=data_name)) if data_name != "trans_squad" else ""
178
+
179
+ data_split = [
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.TRAIN,
182
+ gen_kwargs={
183
+ "filepath": train_data_path,
184
+ },
185
+ ),
186
+ datasets.SplitGenerator(
187
+ name=datasets.Split.VALIDATION,
188
+ gen_kwargs={
189
+ "filepath": os.path.join(validation_data_path),
190
+ },
191
+ ),
192
+ datasets.SplitGenerator(
193
+ name=datasets.Split.TEST,
194
+ gen_kwargs={
195
+ "filepath": os.path.join(test_data_path),
196
+ },
197
+ ),
198
+ ]
199
+
200
+ if data_name == "trans_squad":
201
+ # trans_squad doesn't have test split
202
+ return data_split[:2]
203
+
204
+ return data_split
205
+
206
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
207
+ """Yields examples as (key, example) tuples."""
208
+ with open(filepath) as json_file:
209
+ examples = json.load(json_file)
210
+
211
+ if self.config.schema == "source":
212
+ # Dataset doesn't have predefined context ID, use python enumeration.
213
+ # The examples in the source schema are grouped by paragraph context;
214
+ # each context can have multiple questions.
215
+ for key, example in enumerate(examples):
216
+ yield key, example
217
+
218
+ elif self.config.schema == "nusantara_qa":
219
+ for key, example in enumerate(examples):
220
+ for qa in example["qas"]:
221
+ # Use question ID as key
222
+ yield str(qa["id"]), {
223
+ "id": qa["id"],
224
+ "question_id": qa["id"],
225
+ "document_id": str(key),
226
+ "question": qa["question"],
227
+ "type": "extractive",
228
+ "choices": [],
229
+ "context": example["context"],
230
+ "answer": [ans["text"] for ans in qa["answers"]],
231
+ }