holylovenia commited on
Commit
3fd25e8
1 Parent(s): b0d141f

Upload ac_iquad.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ac_iquad.py +236 -0
ac_iquad.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This is an automatically-produced question answering dataset \
18
+ generated from Indonesian Wikipedia articles. Each entry \
19
+ in the dataset consists of a context paragraph, the \
20
+ question and answer, and the question's equivalent SPARQL \
21
+ query. Questions are separated into two subsets: simple \
22
+ (question consists of a single SPARQL triple pattern) and \
23
+ complex (question consists of two triples plus an optional \
24
+ typing triple).
25
+ """
26
+ import json
27
+ import os
28
+ from pathlib import Path
29
+ from typing import Dict, List, Tuple
30
+
31
+ import datasets
32
+ import pandas as pd
33
+
34
+ from seacrowd.utils import schemas
35
+ from seacrowd.utils.configs import SEACrowdConfig
36
+ from seacrowd.utils.constants import Licenses, Tasks
37
+
38
+ _CITATION = """\
39
+ @article{afa5bf8149d6406786539c1ea827087d,
40
+ title = "AC-IQuAD: Automatically Constructed Indonesian Question Answering Dataset by Leveraging Wikidata",
41
+ abstract = "Constructing a question-answering dataset can be prohibitively expensive, making it difficult for researchers
42
+ to make one for an under-resourced language, such as Indonesian. We create a novel Indonesian Question Answering dataset
43
+ that is produced automatically end-to-end. The process uses Context Free Grammar, the Wikipedia Indonesian Corpus, and
44
+ the concept of the proxy model. The dataset consists of 134 thousand simple questions and 60 thousand complex questions.
45
+ It achieved competitive grammatical and model accuracy compared to the translated dataset but suffers from some issues
46
+ due to resource constraints.",
47
+ keywords = "Automatic dataset construction, Question answering dataset, Under-resourced Language",
48
+ author = "Kerenza Doxolodeo and Krisnadhi, {Adila Alfa}",
49
+ note = "Publisher Copyright: {\textcopyright} 2024, The Author(s).",
50
+ year = "2024",
51
+ doi = "10.1007/s10579-023-09702-y",
52
+ language = "English",
53
+ journal = "Language Resources and Evaluation",
54
+ issn = "1574-020X",
55
+ publisher = "Springer Netherlands",
56
+ }
57
+ """
58
+
59
+ _DATASETNAME = "ac_iquad"
60
+
61
+ _DESCRIPTION = """
62
+ This is an automatically-produced question answering dataset \
63
+ generated from Indonesian Wikipedia articles. Each entry \
64
+ in the dataset consists of a context paragraph, the \
65
+ question and answer, and the question's equivalent SPARQL \
66
+ query. Questions are separated into two subsets: simple \
67
+ (question consists of a single SPARQL triple pattern) and \
68
+ complex (question consists of two triples plus an optional \
69
+ typing triple).
70
+ """
71
+
72
+ _HOMEPAGE = "https://www.kaggle.com/datasets/realdeo/indonesian-qa-generated-by-kg"
73
+
74
+ _LANGUAGES = ["ind"]
75
+
76
+ _LICENSE = Licenses.CC_BY_4_0.value
77
+
78
+ _LOCAL = False
79
+
80
+ _URLS = {
81
+ _DATASETNAME: "https://github.com/muhammadravi251001/ac-iquad/raw/main/data/ac_iquad.zip",
82
+ }
83
+
84
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
85
+
86
+ _SOURCE_VERSION = "1.0.0"
87
+
88
+ _SEACROWD_VERSION = "2024.06.20"
89
+
90
+
91
+ class ACIQuADDataset(datasets.GeneratorBasedBuilder):
92
+ """
93
+ This is an automatically-produced question answering dataset \
94
+ generated from Indonesian Wikipedia articles. Each entry \
95
+ in the dataset consists of a context paragraph, the \
96
+ question and answer, and the question's equivalent SPARQL \
97
+ query. Questions are separated into two subsets: simple \
98
+ (question consists of a single SPARQL triple pattern) and \
99
+ complex (question consists of two triples plus an optional \
100
+ typing triple).
101
+ """
102
+
103
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
104
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
105
+ SEACROWD_SCHEMA_NAME = "qa"
106
+
107
+ BUILDER_CONFIGS = [
108
+ SEACrowdConfig(
109
+ name=f"{_DATASETNAME}_simple_source",
110
+ version=SOURCE_VERSION,
111
+ description=f"{_DATASETNAME} source schema",
112
+ schema="source",
113
+ subset_id=f"{_DATASETNAME}_simple",
114
+ ),
115
+ SEACrowdConfig(
116
+ name=f"{_DATASETNAME}_simple_seacrowd_{SEACROWD_SCHEMA_NAME}",
117
+ version=SEACROWD_VERSION,
118
+ description=f"{_DATASETNAME} SEACrowd schema",
119
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
120
+ subset_id=f"{_DATASETNAME}_simple",
121
+ ),
122
+ SEACrowdConfig(
123
+ name=f"{_DATASETNAME}_complex_source",
124
+ version=SOURCE_VERSION,
125
+ description=f"{_DATASETNAME} source schema",
126
+ schema="source",
127
+ subset_id=f"{_DATASETNAME}_complex",
128
+ ),
129
+ SEACrowdConfig(
130
+ name=f"{_DATASETNAME}_complex_seacrowd_{SEACROWD_SCHEMA_NAME}",
131
+ version=SEACROWD_VERSION,
132
+ description=f"{_DATASETNAME} SEACrowd schema",
133
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
134
+ subset_id=f"{_DATASETNAME}_complex",
135
+ ),
136
+ ]
137
+
138
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_simple_source"
139
+
140
+ def _info(self) -> datasets.DatasetInfo:
141
+
142
+ if self.config.schema == "source":
143
+ features_dict = {
144
+ "question": datasets.Value("string"),
145
+ "sparql": datasets.Value("string"),
146
+ "answer": datasets.Value("string"),
147
+ "context": datasets.Value("string"),
148
+ "answerline": datasets.Value("string"),
149
+ }
150
+
151
+ if self.config.subset_id.split("_")[2] == "complex":
152
+ features_dict["type"] = datasets.Value("string")
153
+
154
+ features = datasets.Features(features_dict)
155
+
156
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
157
+ features = schemas.qa_features
158
+
159
+ if self.config.subset_id.split("_")[2] == "complex":
160
+ features["meta"] = {"sparql": datasets.Value("string"), "answer_meta": datasets.Value("string"), "type": datasets.Value("string")}
161
+
162
+ else:
163
+ features["meta"] = {"sparql": datasets.Value("string"), "answer_meta": datasets.Value("string")}
164
+
165
+ return datasets.DatasetInfo(
166
+ description=_DESCRIPTION,
167
+ features=features,
168
+ homepage=_HOMEPAGE,
169
+ license=_LICENSE,
170
+ citation=_CITATION,
171
+ )
172
+
173
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
174
+ """Returns SplitGenerators."""
175
+
176
+ subset = self.config.name.split("_")[2]
177
+ data_dir = dl_manager.download_and_extract(_URLS[_DATASETNAME])
178
+
179
+ if subset == "simple":
180
+ subset = "single"
181
+
182
+ return [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TRAIN,
185
+ gen_kwargs={
186
+ "filepath": os.path.join(data_dir, f"{subset}_train.json"),
187
+ "split": "train",
188
+ },
189
+ ),
190
+ datasets.SplitGenerator(
191
+ name=datasets.Split.TEST,
192
+ gen_kwargs={
193
+ "filepath": os.path.join(data_dir, f"{subset}_test.json"),
194
+ "split": "test",
195
+ },
196
+ ),
197
+ ]
198
+
199
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
200
+ """Yields examples as (key, example) tuples."""
201
+
202
+ with open(filepath, "r", encoding="utf-8") as file:
203
+ data_json = json.load(file)
204
+
205
+ df = pd.json_normalize(data_json)
206
+
207
+ for index, row in df.iterrows():
208
+
209
+ if self.config.schema == "source":
210
+ example = row.to_dict()
211
+
212
+ if self.config.subset_id.split("_")[2] == "complex":
213
+ example["type"] = example.pop("tipe", None)
214
+
215
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
216
+
217
+ subset = self.config.name.split("_")[2]
218
+ if subset == "simple":
219
+ row["answerline"] = f"[{row['answerline']}]"
220
+
221
+ example = {
222
+ "id": str(index),
223
+ "question_id": "question_id",
224
+ "document_id": "document_id",
225
+ "question": row["question"],
226
+ "type": "extractive",
227
+ "choices": [],
228
+ "context": row["context"],
229
+ "answer": eval(row["answerline"]),
230
+ "meta": {"sparql": row["sparql"], "answer_meta": row["answer"]},
231
+ }
232
+
233
+ if self.config.subset_id.split("_")[2] == "complex":
234
+ example["meta"]["type"] = row["tipe"]
235
+
236
+ yield index, example