holylovenia commited on
Commit
c2027a4
1 Parent(s): 2ca93d0

Upload m3exam.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. m3exam.py +318 -0
m3exam.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ import re
19
+ import zipfile
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Licenses, Tasks
28
+
29
+ _CITATION = """\
30
+ @article{zhang2023m3exam,
31
+ title={M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models},
32
+ author={Wenxuan Zhang and Sharifah Mahani Aljunied and Chang Gao and Yew Ken Chia and Lidong Bing},
33
+ year={2023},
34
+ eprint={2306.05179},
35
+ archivePrefix={arXiv},
36
+ primaryClass={cs.CL}
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "m3exam"
41
+
42
+ _DESCRIPTION = """\
43
+ M3Exam is a novel benchmark sourced from real and official human exam questions for evaluating LLMs\
44
+ in a multilingual, multimodal, and multilevel context. In total, M3Exam contains 12,317 questions in 9\
45
+ diverse languages with three educational levels, where about 23% of the questions require processing images\
46
+ for successful solving. M3Exam dataset covers 3 languages spoken in Southeast Asia.
47
+ """
48
+
49
+ _HOMEPAGE = "https://github.com/DAMO-NLP-SG/M3Exam"
50
+
51
+ _LANGUAGES = ["jav", "tha", "vie"]
52
+ _LANG_MAPPER = {"jav": "javanese", "tha": "thai", "vie": "vietnamese"}
53
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
54
+
55
+ _LOCAL = False
56
+ _PASSWORD = "12317".encode("utf-8") # password to unzip dataset after downloading
57
+ _URLS = {
58
+ _DATASETNAME: "https://drive.usercontent.google.com/download?id=1eREETRklmXJLXrNPTyHxQ3RFdPhq_Nes&authuser=0&confirm=t",
59
+ }
60
+
61
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING, Tasks.VISUAL_QUESTION_ANSWERING]
62
+
63
+ _SOURCE_VERSION = "1.0.0"
64
+
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+
68
+ class M3ExamDataset(datasets.GeneratorBasedBuilder):
69
+ """
70
+ M3Exam is a novel benchmark sourced from real and official human exam questions for evaluating LLMs
71
+ in a multilingual, multimodal, and multilevel context. In total, M3Exam contains 12,317 questions in 9
72
+ diverse languages with three educational levels, where about 23% of the questions require processing images
73
+ for successful solving. M3Exam dataset covers 3 languages spoken in Southeast Asia.
74
+ """
75
+
76
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
77
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
78
+
79
+ BUILDER_CONFIGS = (
80
+ [SEACrowdConfig(name=f"{_DATASETNAME}_{lang}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME} source schema", schema="source", subset_id=f"{_DATASETNAME}") for lang in _LANGUAGES]
81
+ + [
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_{lang}_seacrowd_qa",
84
+ version=datasets.Version(_SEACROWD_VERSION),
85
+ description=f"{_DATASETNAME} SEACrowd schema",
86
+ schema="seacrowd_qa",
87
+ subset_id=f"{_DATASETNAME}",
88
+ )
89
+ for lang in _LANGUAGES
90
+ ]
91
+ + [
92
+ SEACrowdConfig(
93
+ name=f"{_DATASETNAME}_{lang}_seacrowd_imqa",
94
+ version=datasets.Version(_SEACROWD_VERSION),
95
+ description=f"{_DATASETNAME} SEACrowd schema",
96
+ schema="seacrowd_imqa",
97
+ subset_id=f"{_DATASETNAME}",
98
+ )
99
+ for lang in _LANGUAGES
100
+ ]
101
+ )
102
+
103
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_jav_source"
104
+
105
+ def _info(self) -> datasets.DatasetInfo:
106
+
107
+ if self.config.schema == "source":
108
+ features = datasets.Features(
109
+ {
110
+ "question_text": datasets.Value("string"),
111
+ "background_description": datasets.Sequence(datasets.Value("string")),
112
+ "answer_text": datasets.Value("string"),
113
+ "options": datasets.Sequence(datasets.Value("string")),
114
+ "language": datasets.Value("string"),
115
+ "level": datasets.Value("string"),
116
+ "subject": datasets.Value("string"),
117
+ "subject_category": datasets.Value("string"),
118
+ "year": datasets.Value("string"),
119
+ "need_image": datasets.Value("string"),
120
+ "image_paths": datasets.Sequence(datasets.Value("string")),
121
+ }
122
+ )
123
+ elif self.config.schema == "seacrowd_qa":
124
+ features = schemas.qa_features
125
+ features["meta"] = {
126
+ "background_description": datasets.Sequence(datasets.Value("string")),
127
+ "level": datasets.Value("string"),
128
+ "subject": datasets.Value("string"),
129
+ "subject_category": datasets.Value("string"),
130
+ "year": datasets.Value("string"),
131
+ }
132
+ elif self.config.schema == "seacrowd_imqa":
133
+ features = schemas.imqa_features
134
+ features["meta"] = {
135
+ "background_description": datasets.Sequence(datasets.Value("string")),
136
+ "level": datasets.Value("string"),
137
+ "subject": datasets.Value("string"),
138
+ "subject_category": datasets.Value("string"),
139
+ "year": datasets.Value("string"),
140
+ }
141
+
142
+ return datasets.DatasetInfo(
143
+ description=_DESCRIPTION,
144
+ features=features,
145
+ homepage=_HOMEPAGE,
146
+ license=_LICENSE,
147
+ citation=_CITATION,
148
+ )
149
+
150
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
151
+ """Returns SplitGenerators."""
152
+ urls = _URLS[_DATASETNAME]
153
+ lang = self.config.name.split("_")[1]
154
+
155
+ data_dir = dl_manager.download(urls)
156
+
157
+ if not os.path.exists(data_dir + "_extracted"):
158
+ if not os.path.exists(data_dir + ".zip"):
159
+ os.rename(data_dir, data_dir + ".zip")
160
+ with zipfile.ZipFile(data_dir + ".zip", "r") as zip_ref:
161
+ zip_ref.extractall(data_dir + "_extracted", pwd=_PASSWORD) # unzipping with password
162
+ if not os.path.exists(data_dir):
163
+ os.rename(data_dir + ".zip", data_dir)
164
+ image_generator = [
165
+ datasets.SplitGenerator(
166
+ name=datasets.Split.TRAIN,
167
+ gen_kwargs={
168
+ "filepath": os.path.join(data_dir + "_extracted", "data/multimodal-question"),
169
+ "split": "train",
170
+ },
171
+ ),
172
+ ]
173
+
174
+ text_generator = [
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ gen_kwargs={
178
+ "filepath": os.path.join(data_dir + "_extracted", f"data/text-question/{_LANG_MAPPER[lang]}-questions-test.json"),
179
+ "split": "test",
180
+ },
181
+ ),
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.VALIDATION,
184
+ gen_kwargs={
185
+ "filepath": os.path.join(data_dir + "_extracted", f"data/text-question/{_LANG_MAPPER[lang]}-questions-dev.json"),
186
+ "split": "dev",
187
+ },
188
+ ),
189
+ ]
190
+ if "imqa" in self.config.name:
191
+ return image_generator
192
+ else:
193
+ if "source" in self.config.name:
194
+ image_generator.extend(text_generator)
195
+ return image_generator
196
+ else:
197
+ return text_generator
198
+
199
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
200
+ """Yields examples as (key, example) tuples."""
201
+ lang = self.config.name.split("_")[1]
202
+ thai_answer_mapper = {"1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "๑": "1", "๒": "2", "๓": "3", "๔": "4", "๕": "5"}
203
+ if self.config.schema == "source":
204
+ if split == "train":
205
+ filepath_json = os.path.join(filepath, f"{_LANG_MAPPER[lang]}-questions-image.json")
206
+ with open(filepath_json, "r") as file:
207
+ data = json.load(file)
208
+ idx = 0
209
+ for json_obj in data:
210
+ image_paths = []
211
+ for text in [json_obj["question_text"]] + json_obj["options"] + json_obj["background_description"]:
212
+ matches = re.findall(r"\[image-(\d+)\.(jpg|png)\]", text)
213
+ if matches:
214
+ image_path = [os.path.join(filepath, f"images-{_LANG_MAPPER[lang]}/image-{image_number[0]}.{image_number[1]}") for image_number in matches]
215
+ image_paths.extend(image_path)
216
+ example = {
217
+ "question_text": json_obj["question_text"],
218
+ "background_description": json_obj["background_description"] if "background_description" in json_obj.keys() else None,
219
+ "answer_text": json_obj["answer_text"],
220
+ "options": json_obj["options"],
221
+ "language": json_obj["language"] if "language" in json_obj.keys() else None,
222
+ "level": json_obj["level"] if "level" in json_obj.keys() else None,
223
+ "subject": json_obj["subject"] if "subject" in json_obj.keys() else None,
224
+ "subject_category": json_obj["subject_category"] if "subject_category" in json_obj.keys() else None,
225
+ "year": json_obj["year"] if "year" in json_obj.keys() else None,
226
+ "need_image": "yes",
227
+ "image_paths": image_paths,
228
+ }
229
+ yield idx, example
230
+ idx += 1
231
+ else:
232
+ with open(filepath, "r") as file:
233
+ data = json.load(file)
234
+ idx = 0
235
+ for json_obj in data:
236
+ example = {
237
+ "question_text": json_obj["question_text"],
238
+ "background_description": json_obj["background_description"] if "background_description" in json_obj.keys() else None,
239
+ "answer_text": json_obj["answer_text"],
240
+ "options": json_obj["options"],
241
+ "language": json_obj["language"] if "language" in json_obj.keys() else None,
242
+ "level": json_obj["level"] if "level" in json_obj.keys() else None,
243
+ "subject": json_obj["subject"] if "subject" in json_obj.keys() else None,
244
+ "subject_category": json_obj["subject_category"] if "subject_category" in json_obj.keys() else None,
245
+ "year": json_obj["year"] if "year" in json_obj.keys() else None,
246
+ "need_image": "no",
247
+ "image_paths": None,
248
+ }
249
+ yield idx, example
250
+ idx += 1
251
+
252
+ elif self.config.schema == "seacrowd_qa":
253
+ with open(filepath, "r") as file:
254
+ data = json.load(file)
255
+ idx = 0
256
+
257
+ for json_obj in data:
258
+ answer = [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"] if json_obj["answer_text"] == answer.split(".")[0]]
259
+ if "_tha_" in self.config.name and len(answer) == 0:
260
+ answer = [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"] if thai_answer_mapper[json_obj["answer_text"]] == thai_answer_mapper[answer.split(".")[0]]]
261
+
262
+ example = {
263
+ "id": idx,
264
+ "question_id": idx,
265
+ "document_id": idx,
266
+ "question": json_obj["question_text"],
267
+ "type": "multiple_choice",
268
+ "choices": [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"]],
269
+ "context": "",
270
+ "answer": answer,
271
+ "meta": {
272
+ "background_description": json_obj["background_description"] if "background_description" in json_obj.keys() else None,
273
+ "level": json_obj["level"] if "level" in json_obj.keys() else None,
274
+ "subject": json_obj["subject"] if "subject" in json_obj.keys() else None,
275
+ "subject_category": json_obj["subject_category"] if "subject_category" in json_obj.keys() else None,
276
+ "year": json_obj["year"] if "year" in json_obj.keys() else None,
277
+ },
278
+ }
279
+ yield idx, example
280
+ idx += 1
281
+
282
+ elif self.config.schema == "seacrowd_imqa":
283
+ filepath_json = os.path.join(filepath, f"{_LANG_MAPPER[lang]}-questions-image.json")
284
+ with open(filepath_json, "r") as file:
285
+ data = json.load(file)
286
+ idx = 0
287
+
288
+ for json_obj in data:
289
+ answer = [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"] if json_obj["answer_text"] == answer.split(".")[0]]
290
+ if "_tha_" in self.config.name and len(answer) == 0:
291
+ answer = [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"] if thai_answer_mapper[json_obj["answer_text"]] == thai_answer_mapper[answer.split(".")[0]]]
292
+ image_paths = []
293
+ for text in [json_obj["question_text"]] + json_obj["options"] + json_obj["background_description"]:
294
+ matches = re.findall(r"\[image-(\d+)\.(jpg|png)\]", text)
295
+ if matches:
296
+ image_path = [os.path.join(filepath, f"images-{_LANG_MAPPER[lang]}/image-{image_number[0]}.{image_number[1]}") for image_number in matches]
297
+ image_paths.extend(image_path)
298
+
299
+ example = {
300
+ "id": idx,
301
+ "question_id": idx,
302
+ "document_id": idx,
303
+ "questions": [json_obj["question_text"]],
304
+ "type": "multiple_choice",
305
+ "choices": [".".join(answer.split(".")[1:]).strip() for answer in json_obj["options"]],
306
+ "context": "",
307
+ "answer": answer,
308
+ "image_paths": image_paths,
309
+ "meta": {
310
+ "background_description": json_obj["background_description"] if "background_description" in json_obj.keys() else None,
311
+ "level": json_obj["level"] if "level" in json_obj.keys() else None,
312
+ "subject": json_obj["subject"] if "subject" in json_obj.keys() else None,
313
+ "subject_category": json_obj["subject_category"] if "subject_category" in json_obj.keys() else None,
314
+ "year": json_obj["year"] if "year" in json_obj.keys() else None,
315
+ },
316
+ }
317
+ yield idx, example
318
+ idx += 1