Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
373fa7a
·
verified ·
1 Parent(s): 55510c8

Upload vimqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vimqa.py +191 -0
vimqa.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{le-etal-2022-vimqa,
13
+ title = "{VIMQA}: A {V}ietnamese Dataset for Advanced Reasoning and Explainable Multi-hop Question Answering",
14
+ author = "Le, Khang and
15
+ Nguyen, Hien and
16
+ Le Thanh, Tung and
17
+ Nguyen, Minh",
18
+ editor = "Calzolari, Nicoletta and
19
+ B{\'e}chet, Fr{\'e}d{\'e}ric and
20
+ Blache, Philippe and
21
+ Choukri, Khalid and
22
+ Cieri, Christopher and
23
+ Declerck, Thierry and
24
+ Goggi, Sara and
25
+ Isahara, Hitoshi and
26
+ Maegaard, Bente and
27
+ Mariani, Joseph and
28
+ Mazo, H{\'e}l{\'e}ne and
29
+ Odijk, Jan and
30
+ Piperidis, Stelios",
31
+ booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
32
+ month = jun,
33
+ year = "2022",
34
+ address = "Marseille, France",
35
+ publisher = "European Language Resources Association",
36
+ url = "https://aclanthology.org/2022.lrec-1.700",
37
+ pages = "6521--6529",
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "vimqa"
42
+
43
+ _DESCRIPTION = """
44
+ VIMQA, a new Vietnamese dataset with over 10,000 Wikipedia-based multi-hop question-answer pairs. The dataset is human-generated and has four main features:
45
+ The questions require advanced reasoning over multiple paragraphs.
46
+ Sentence-level supporting facts are provided, enabling the QA model to reason and explain the answer.
47
+ The dataset offers various types of reasoning to test the model's ability to reason and extract relevant proof.
48
+ The dataset is in Vietnamese, a low-resource language
49
+ """
50
+
51
+ _HOMEPAGE = "https://github.com/vimqa/vimqa"
52
+
53
+ _LANGUAGES = ["vie"]
54
+
55
+ _LICENSE = f"""{Licenses.OTHERS.value} | \
56
+ The licence terms for VimQA follows this EULA docs on their repo.
57
+ Please refer to the following doc of EULA (to review the permissions and request for access)
58
+ VIMQA EULA -- https://github.com/vimqa/vimqa/blob/main/VIMQA_EULA.pdf
59
+ """
60
+
61
+ _LOCAL = True
62
+
63
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
64
+
65
+ _SOURCE_VERSION = "1.0.0"
66
+
67
+ _SEACROWD_VERSION = "2024.06.20"
68
+
69
+
70
+ class VimqaDataset(datasets.GeneratorBasedBuilder):
71
+ """VIMQA, a new Vietnamese dataset with over 10,000 Wikipedia-based multi-hop question-answer pairs."""
72
+
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
75
+
76
+ BUILDER_CONFIGS = [
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_source",
79
+ version=SOURCE_VERSION,
80
+ description=f"{_DATASETNAME} source schema",
81
+ schema="source",
82
+ subset_id=_DATASETNAME,
83
+ ),
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_seacrowd_qa",
86
+ version=SEACROWD_VERSION,
87
+ description=f"{_DATASETNAME} SEACrowd schema",
88
+ schema="seacrowd_qa",
89
+ subset_id=_DATASETNAME,
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+ if self.config.schema == "source":
97
+ features = datasets.Features(
98
+ {
99
+ "id": datasets.Value("string"),
100
+ "question": datasets.Value("string"),
101
+ "answer": datasets.Value("string"),
102
+ "type": datasets.Value("string"),
103
+ "supporting_facts": datasets.features.Sequence(
104
+ {
105
+ "title": datasets.Value("string"),
106
+ "sent_id": datasets.Value("int32"),
107
+ }
108
+ ),
109
+ "context": datasets.features.Sequence(
110
+ {
111
+ "title": datasets.Value("string"),
112
+ "sentences": datasets.features.Sequence(datasets.Value("string")),
113
+ }
114
+ ),
115
+ }
116
+ )
117
+ else:
118
+ features = schemas.qa_features
119
+ features["meta"] = {
120
+ "supporting_facts": datasets.features.Sequence(
121
+ {
122
+ "title": datasets.Value("string"),
123
+ "sent_id": datasets.Value("int32"),
124
+ }
125
+ ),
126
+ "context": datasets.features.Sequence(
127
+ {
128
+ "title": datasets.Value("string"),
129
+ "sentences": datasets.features.Sequence(datasets.Value("string")),
130
+ }
131
+ ),
132
+ }
133
+
134
+ return datasets.DatasetInfo(
135
+ description=_DESCRIPTION,
136
+ features=features,
137
+ homepage=_HOMEPAGE,
138
+ license=_LICENSE,
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
143
+ """Returns SplitGenerators."""
144
+ if self.config.data_dir is None:
145
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
146
+ else:
147
+ data_dir = self.config.data_dir
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={"filepath": os.path.join(data_dir, "vimqa_train.json")},
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.VALIDATION,
156
+ gen_kwargs={"filepath": os.path.join(data_dir, "vimqa_dev.json")},
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={"filepath": os.path.join(data_dir, "vimqa_test.json")},
161
+ ),
162
+ ]
163
+
164
+ def _generate_examples(self, filepath: Path) -> tuple[int, dict]:
165
+ with open(filepath, "r", encoding="utf-8") as f:
166
+ data = json.load(f)
167
+ for i, item in enumerate(data):
168
+ if self.config.schema == "source":
169
+ yield i, {
170
+ "id": item["_id"],
171
+ "question": item["question"],
172
+ "answer": item["answer"],
173
+ "type": item["type"],
174
+ "supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in item["supporting_facts"]],
175
+ "context": [{"title": f[0], "sentences": f[1]} for f in item["context"]],
176
+ }
177
+ else:
178
+ yield i, {
179
+ "id": str(i),
180
+ "question_id": item["_id"],
181
+ "document_id": "",
182
+ "question": item["question"],
183
+ "type": item["type"],
184
+ "choices": [],
185
+ "context": "",
186
+ "answer": [item["answer"]],
187
+ "meta": {
188
+ "supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in item["supporting_facts"]],
189
+ "context": [{"title": f[0], "sentences": f[1]} for f in item["context"]],
190
+ },
191
+ }