Datasets:

Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
d5e1adb
1 Parent(s): b074c81

Delete loading script

Browse files
Files changed (1) hide show
  1. exams.py +0 -266
exams.py DELETED
@@ -1,266 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """EXAMS: a benchmark dataset for multilingual and cross-lingual question answering"""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @article{hardalov2020exams,
25
- title={EXAMS: A Multi-subject High School Examinations Dataset for Cross-lingual and Multilingual Question Answering},
26
- author={Hardalov, Momchil and Mihaylov, Todor and Dimitrina Zlatkova and Yoan Dinkov and Ivan Koychev and Preslav Nvakov},
27
- journal={arXiv preprint arXiv:2011.03080},
28
- year={2020}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- EXAMS is a benchmark dataset for multilingual and cross-lingual question answering from high school examinations.
34
- It consists of more than 24,000 high-quality high school exam questions in 16 languages,
35
- covering 8 language families and 24 school subjects from Natural Sciences and Social Sciences, among others.
36
- """
37
-
38
- _HOMEPAGE = "https://github.com/mhardalov/exams-qa"
39
-
40
- _LICENSE = "CC-BY-SA-4.0"
41
-
42
- _URLS_LIST = [
43
- ("alignments", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/parallel_questions.jsonl"),
44
- ]
45
- _URLS_LIST += [
46
- (
47
- "multilingual_train",
48
- "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/train.jsonl.tar.gz",
49
- ),
50
- ("multilingual_dev", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/dev.jsonl.tar.gz"),
51
- ("multilingual_test", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/test.jsonl.tar.gz"),
52
- (
53
- "multilingual_with_para_train",
54
- "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/train_with_para.jsonl.tar.gz",
55
- ),
56
- (
57
- "multilingual_with_para_dev",
58
- "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/dev_with_para.jsonl.tar.gz",
59
- ),
60
- (
61
- "multilingual_with_para_test",
62
- "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/test_with_para.jsonl.tar.gz",
63
- ),
64
- ]
65
-
66
- _CROSS_LANGUAGES = ["bg", "hr", "hu", "it", "mk", "pl", "pt", "sq", "sr", "tr", "vi"]
67
- _URLS_LIST += [
68
- ("crosslingual_test", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/test.jsonl.tar.gz"),
69
- (
70
- "crosslingual_with_para_test",
71
- "https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/test_with_para.jsonl.tar.gz",
72
- ),
73
- ]
74
- for ln in _CROSS_LANGUAGES:
75
- _URLS_LIST += [
76
- (
77
- f"crosslingual_{ln}_train",
78
- f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/train_{ln}.jsonl.tar.gz",
79
- ),
80
- (
81
- f"crosslingual_with_para_{ln}_train",
82
- f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_{ln}_with_para.jsonl.tar.gz",
83
- ),
84
- (
85
- f"crosslingual_{ln}_dev",
86
- f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/dev_{ln}.jsonl.tar.gz",
87
- ),
88
- (
89
- f"crosslingual_with_para_{ln}_dev",
90
- f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_{ln}_with_para.jsonl.tar.gz",
91
- ),
92
- ]
93
- _URLs = dict(_URLS_LIST)
94
-
95
-
96
- class ExamsConfig(datasets.BuilderConfig):
97
- def __init__(self, lang, with_para, **kwargs):
98
- super(ExamsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
99
- self.lang = lang
100
- self.with_para = "_with_para" if with_para else ""
101
-
102
-
103
- class Exams(datasets.GeneratorBasedBuilder):
104
- """Exams dataset"""
105
-
106
- VERSION = datasets.Version("1.0.0")
107
- BUILDER_CONFIG_CLASS = ExamsConfig
108
- BUILDER_CONFIGS = [
109
- ExamsConfig(
110
- lang="",
111
- with_para=False,
112
- name="alignments",
113
- description="loads the alignment between question IDs across languages",
114
- ),
115
- ExamsConfig(
116
- lang="all",
117
- with_para=False,
118
- name="multilingual",
119
- description="Loads the unified multilingual train/dev/test split",
120
- ),
121
- ExamsConfig(
122
- lang="all",
123
- with_para=True,
124
- name="multilingual_with_para",
125
- description="Loads the unified multilingual train/dev/test split with Wikipedia support paragraphs",
126
- ),
127
- ExamsConfig(
128
- lang="all", with_para=False, name="crosslingual_test", description="Loads crosslingual test set only"
129
- ),
130
- ExamsConfig(
131
- lang="all",
132
- with_para=True,
133
- name="crosslingual_with_para_test",
134
- description="Loads crosslingual test set only with Wikipedia support paragraphs",
135
- ),
136
- ]
137
- for ln in _CROSS_LANGUAGES:
138
- BUILDER_CONFIGS += [
139
- ExamsConfig(
140
- lang=ln,
141
- with_para=False,
142
- name=f"crosslingual_{ln}",
143
- description=f"Loads crosslingual train and dev set for {ln}",
144
- ),
145
- ExamsConfig(
146
- lang=ln,
147
- with_para=True,
148
- name=f"crosslingual_with_para_{ln}",
149
- description=f"Loads crosslingual train and dev set for {ln} with Wikipedia support paragraphs",
150
- ),
151
- ]
152
-
153
- DEFAULT_CONFIG_NAME = (
154
- "multilingual_with_para" # It's not mandatory to have a default configuration. Just use one if it make sense.
155
- )
156
-
157
- def _info(self):
158
- if self.config.name == "alignments": # This is the name of the configuration selected in BUILDER_CONFIGS above
159
- features = datasets.Features(
160
- {
161
- "source_id": datasets.Value("string"),
162
- "target_id_list": datasets.Sequence(datasets.Value("string")),
163
- }
164
- )
165
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
166
- features = datasets.Features(
167
- {
168
- "id": datasets.Value("string"),
169
- "question": {
170
- "stem": datasets.Value("string"),
171
- "choices": datasets.Sequence(
172
- {
173
- "text": datasets.Value("string"),
174
- "label": datasets.Value("string"),
175
- "para": datasets.Value("string"),
176
- }
177
- ),
178
- },
179
- "answerKey": datasets.Value("string"),
180
- "info": {
181
- "grade": datasets.Value("int32"),
182
- "subject": datasets.Value("string"),
183
- "language": datasets.Value("string"),
184
- },
185
- }
186
- )
187
- return datasets.DatasetInfo(
188
- description=_DESCRIPTION,
189
- features=features, # Here we define them above because they are different between the two configurations
190
- supervised_keys=None,
191
- homepage=_HOMEPAGE,
192
- license=_LICENSE,
193
- citation=_CITATION,
194
- )
195
-
196
- def _split_generators(self, dl_manager):
197
- """Returns SplitGenerators."""
198
- archives = dl_manager.download(_URLs)
199
- if self.config.name == "alignments":
200
- return [
201
- datasets.SplitGenerator(
202
- name="full",
203
- gen_kwargs={"filepath": archives["alignments"]},
204
- ),
205
- ]
206
- elif self.config.name in ["multilingual", "multilingual_with_para"]:
207
- return [
208
- datasets.SplitGenerator(
209
- name=spl_enum,
210
- gen_kwargs={
211
- "filepath": f"{spl}{self.config.with_para}.jsonl",
212
- "files": dl_manager.iter_archive(archives[f"{self.config.name}_{spl}"]),
213
- },
214
- )
215
- for spl, spl_enum in [
216
- ("train", datasets.Split.TRAIN),
217
- ("dev", datasets.Split.VALIDATION),
218
- ("test", datasets.Split.TEST),
219
- ]
220
- ]
221
- elif self.config.name in ["crosslingual_test", "crosslingual_with_para_test"]:
222
- return [
223
- datasets.SplitGenerator(
224
- name=datasets.Split.TEST,
225
- gen_kwargs={
226
- "filepath": f"test{self.config.with_para}.jsonl",
227
- "files": dl_manager.iter_archive(archives[self.config.name]),
228
- },
229
- ),
230
- ]
231
- else:
232
- return [
233
- datasets.SplitGenerator(
234
- name=spl_enum,
235
- gen_kwargs={
236
- "filepath": f"{spl}_{self.config.lang}{self.config.with_para}.jsonl",
237
- "files": dl_manager.iter_archive(archives[f"{self.config.name}_{spl}"]),
238
- },
239
- )
240
- for spl, spl_enum in [
241
- ("train", datasets.Split.TRAIN),
242
- ("dev", datasets.Split.VALIDATION),
243
- ]
244
- ]
245
-
246
- def _generate_examples(self, filepath, files=None):
247
- if self.config.name == "alignments":
248
- with open(filepath, encoding="utf-8") as f:
249
- for id_, line in enumerate(f):
250
- line_dict = json.loads(line.strip())
251
- in_id, out_list = list(line_dict.items())[0]
252
- yield id_, {"source_id": in_id, "target_id_list": out_list}
253
- else:
254
- for path, f in files:
255
- if path == filepath:
256
- for id_, line in enumerate(f):
257
- line_dict = json.loads(line.strip())
258
- for choice in line_dict["question"]["choices"]:
259
- choice["para"] = choice.get("para", "")
260
- yield id_, {
261
- "id": line_dict["id"],
262
- "question": line_dict["question"],
263
- "answerKey": line_dict["answerKey"],
264
- "info": line_dict["info"],
265
- }
266
- break