system HF staff commited on
Commit
449984e
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (31) hide show
  1. .gitattributes +27 -0
  2. README.md +411 -0
  3. dataset_infos.json +0 -0
  4. dummy/alignments/1.0.0/dummy_data.zip +3 -0
  5. dummy/crosslingual_bg/1.0.0/dummy_data.zip +3 -0
  6. dummy/crosslingual_hr/1.0.0/dummy_data.zip +3 -0
  7. dummy/crosslingual_hu/1.0.0/dummy_data.zip +3 -0
  8. dummy/crosslingual_it/1.0.0/dummy_data.zip +3 -0
  9. dummy/crosslingual_mk/1.0.0/dummy_data.zip +3 -0
  10. dummy/crosslingual_pl/1.0.0/dummy_data.zip +3 -0
  11. dummy/crosslingual_pt/1.0.0/dummy_data.zip +3 -0
  12. dummy/crosslingual_sq/1.0.0/dummy_data.zip +3 -0
  13. dummy/crosslingual_sr/1.0.0/dummy_data.zip +3 -0
  14. dummy/crosslingual_test/1.0.0/dummy_data.zip +3 -0
  15. dummy/crosslingual_tr/1.0.0/dummy_data.zip +3 -0
  16. dummy/crosslingual_vi/1.0.0/dummy_data.zip +3 -0
  17. dummy/crosslingual_with_para_bg/1.0.0/dummy_data.zip +3 -0
  18. dummy/crosslingual_with_para_hr/1.0.0/dummy_data.zip +3 -0
  19. dummy/crosslingual_with_para_hu/1.0.0/dummy_data.zip +3 -0
  20. dummy/crosslingual_with_para_it/1.0.0/dummy_data.zip +3 -0
  21. dummy/crosslingual_with_para_mk/1.0.0/dummy_data.zip +3 -0
  22. dummy/crosslingual_with_para_pl/1.0.0/dummy_data.zip +3 -0
  23. dummy/crosslingual_with_para_pt/1.0.0/dummy_data.zip +3 -0
  24. dummy/crosslingual_with_para_sq/1.0.0/dummy_data.zip +3 -0
  25. dummy/crosslingual_with_para_sr/1.0.0/dummy_data.zip +3 -0
  26. dummy/crosslingual_with_para_test/1.0.0/dummy_data.zip +3 -0
  27. dummy/crosslingual_with_para_tr/1.0.0/dummy_data.zip +3 -0
  28. dummy/crosslingual_with_para_vi/1.0.0/dummy_data.zip +3 -0
  29. dummy/multilingual/1.0.0/dummy_data.zip +3 -0
  30. dummy/multilingual_with_para/1.0.0/dummy_data.zip +3 -0
  31. exams.py +266 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ alignments:
8
+ - ar
9
+ - bg
10
+ - de
11
+ - es
12
+ - fr
13
+ - hr
14
+ - hu
15
+ - it
16
+ - lt
17
+ - mk
18
+ - pl
19
+ - pt
20
+ - sq
21
+ - sr
22
+ - tr
23
+ - vi
24
+ crosslingual_bg:
25
+ - bg
26
+ crosslingual_hr:
27
+ - hr
28
+ crosslingual_hu:
29
+ - hu
30
+ crosslingual_it:
31
+ - it
32
+ crosslingual_mk:
33
+ - mk
34
+ crosslingual_pl:
35
+ - pl
36
+ crosslingual_pt:
37
+ - pt
38
+ crosslingual_sq:
39
+ - sq
40
+ crosslingual_sr:
41
+ - sr
42
+ crosslingual_test:
43
+ - ar
44
+ - bg
45
+ - de
46
+ - es
47
+ - fr
48
+ - hr
49
+ - hu
50
+ - it
51
+ - lt
52
+ - mk
53
+ - pl
54
+ - pt
55
+ - sq
56
+ - sr
57
+ - tr
58
+ - vi
59
+ crosslingual_tr:
60
+ - tr
61
+ crosslingual_vi:
62
+ - vi
63
+ crosslingual_with_para_bg:
64
+ - bg
65
+ crosslingual_with_para_hr:
66
+ - hr
67
+ crosslingual_with_para_hu:
68
+ - hu
69
+ crosslingual_with_para_it:
70
+ - it
71
+ crosslingual_with_para_mk:
72
+ - mk
73
+ crosslingual_with_para_pl:
74
+ - pl
75
+ crosslingual_with_para_pt:
76
+ - pt
77
+ crosslingual_with_para_sq:
78
+ - sq
79
+ crosslingual_with_para_sr:
80
+ - sr
81
+ crosslingual_with_para_test:
82
+ - ar
83
+ - bg
84
+ - de
85
+ - es
86
+ - fr
87
+ - hr
88
+ - hu
89
+ - it
90
+ - lt
91
+ - mk
92
+ - pl
93
+ - pt
94
+ - sq
95
+ - sr
96
+ - tr
97
+ - vi
98
+ crosslingual_with_para_tr:
99
+ - tr
100
+ crosslingual_with_para_vi:
101
+ - vi
102
+ multilingual:
103
+ - ar
104
+ - bg
105
+ - de
106
+ - es
107
+ - fr
108
+ - hr
109
+ - hu
110
+ - it
111
+ - lt
112
+ - mk
113
+ - pl
114
+ - pt
115
+ - sq
116
+ - sr
117
+ - tr
118
+ - vi
119
+ multilingual_with_para:
120
+ - ar
121
+ - bg
122
+ - de
123
+ - es
124
+ - fr
125
+ - hr
126
+ - hu
127
+ - it
128
+ - lt
129
+ - mk
130
+ - pl
131
+ - pt
132
+ - sq
133
+ - sr
134
+ - tr
135
+ - vi
136
+ licenses:
137
+ - cc-by-sa-4-0
138
+ multilinguality:
139
+ alignments:
140
+ - multilingual
141
+ crosslingual_bg:
142
+ - monolingual
143
+ crosslingual_hr:
144
+ - monolingual
145
+ crosslingual_hu:
146
+ - monolingual
147
+ crosslingual_it:
148
+ - monolingual
149
+ crosslingual_mk:
150
+ - monolingual
151
+ crosslingual_pl:
152
+ - monolingual
153
+ crosslingual_pt:
154
+ - monolingual
155
+ crosslingual_sq:
156
+ - monolingual
157
+ crosslingual_sr:
158
+ - monolingual
159
+ crosslingual_test:
160
+ - multilingual
161
+ crosslingual_tr:
162
+ - monolingual
163
+ crosslingual_vi:
164
+ - monolingual
165
+ crosslingual_with_para_bg:
166
+ - monolingual
167
+ crosslingual_with_para_hr:
168
+ - monolingual
169
+ crosslingual_with_para_hu:
170
+ - monolingual
171
+ crosslingual_with_para_it:
172
+ - monolingual
173
+ crosslingual_with_para_mk:
174
+ - monolingual
175
+ crosslingual_with_para_pl:
176
+ - monolingual
177
+ crosslingual_with_para_pt:
178
+ - monolingual
179
+ crosslingual_with_para_sq:
180
+ - monolingual
181
+ crosslingual_with_para_sr:
182
+ - monolingual
183
+ crosslingual_with_para_test:
184
+ - multilingual
185
+ crosslingual_with_para_tr:
186
+ - monolingual
187
+ crosslingual_with_para_vi:
188
+ - monolingual
189
+ multilingual:
190
+ - multilingual
191
+ multilingual_with_para:
192
+ - multilingual
193
+ size_categories:
194
+ alignments:
195
+ - 10K<n<100K
196
+ crosslingual_bg:
197
+ - 1K<n<10K
198
+ crosslingual_hr:
199
+ - 1K<n<10K
200
+ crosslingual_hu:
201
+ - 1K<n<10K
202
+ crosslingual_it:
203
+ - 1K<n<10K
204
+ crosslingual_mk:
205
+ - 1K<n<10K
206
+ crosslingual_pl:
207
+ - 1K<n<10K
208
+ crosslingual_pt:
209
+ - n<1K
210
+ crosslingual_sq:
211
+ - 1K<n<10K
212
+ crosslingual_sr:
213
+ - 1K<n<10K
214
+ crosslingual_test:
215
+ - 10K<n<100K
216
+ crosslingual_tr:
217
+ - 1K<n<10K
218
+ crosslingual_vi:
219
+ - 1K<n<10K
220
+ crosslingual_with_para_bg:
221
+ - 1K<n<10K
222
+ crosslingual_with_para_hr:
223
+ - 1K<n<10K
224
+ crosslingual_with_para_hu:
225
+ - 1K<n<10K
226
+ crosslingual_with_para_it:
227
+ - 1K<n<10K
228
+ crosslingual_with_para_mk:
229
+ - 1K<n<10K
230
+ crosslingual_with_para_pl:
231
+ - 1K<n<10K
232
+ crosslingual_with_para_pt:
233
+ - n<1K
234
+ crosslingual_with_para_sq:
235
+ - 1K<n<10K
236
+ crosslingual_with_para_sr:
237
+ - 1K<n<10K
238
+ crosslingual_with_para_test:
239
+ - 10K<n<100K
240
+ crosslingual_with_para_tr:
241
+ - 1K<n<10K
242
+ crosslingual_with_para_vi:
243
+ - 1K<n<10K
244
+ multilingual:
245
+ - 10K<n<100K
246
+ multilingual_with_para:
247
+ - 10K<n<100K
248
+ source_datasets:
249
+ - original
250
+ task_categories:
251
+ - question-answering
252
+ task_ids:
253
+ - multiple-choice-qa
254
+ ---
255
+
256
+ # Dataset Card for [Dataset Name]
257
+
258
+ ## Table of Contents
259
+ - [Dataset Description](#dataset-description)
260
+ - [Dataset Summary](#dataset-summary)
261
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
262
+ - [Languages](#languages)
263
+ - [Dataset Structure](#dataset-structure)
264
+ - [Data Instances](#data-instances)
265
+ - [Data Fields](#data-instances)
266
+ - [Data Splits](#data-instances)
267
+ - [Dataset Creation](#dataset-creation)
268
+ - [Curation Rationale](#curation-rationale)
269
+ - [Source Data](#source-data)
270
+ - [Annotations](#annotations)
271
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
272
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
273
+ - [Social Impact of Dataset](#social-impact-of-dataset)
274
+ - [Discussion of Biases](#discussion-of-biases)
275
+ - [Other Known Limitations](#other-known-limitations)
276
+ - [Additional Information](#additional-information)
277
+ - [Dataset Curators](#dataset-curators)
278
+ - [Licensing Information](#licensing-information)
279
+ - [Citation Information](#citation-information)
280
+
281
+ ## Dataset Description
282
+
283
+ - **Repository:** [EXAMS github repository](https://github.com/mhardalov/exams-qa)
284
+ - **Paper:** [EXAMS: A Multi-Subject High School Examinations Dataset for Cross-Lingual and Multilingual Question Answering](https://arxiv.org/abs/2011.03080)
285
+ - **Point of Contact:** [hardalov@@fmi.uni-sofia.bg](hardalov@@fmi.uni-sofia.bg)
286
+
287
+ ### Dataset Summary
288
+
289
+ Eχαµs is a benchmark dataset for multilingual and cross-lingual question answering from high school examinations. It consists of more than 24,000 high-quality high school exam questions in 16 languages, covering 8 language families and 24 school subjects from Natural Sciences and Social Sciences, among others.
290
+
291
+ ### Supported Tasks and Leaderboards
292
+
293
+ [More Information Needed]
294
+
295
+ ### Languages
296
+
297
+ [More Information Needed]
298
+
299
+ ## Dataset Structure
300
+
301
+ ### Data Instances
302
+
303
+ An example of a data instance (with support paragraphs, in Bulgarian) is:
304
+ ```
305
+ {'answerKey': 'C',
306
+ 'id': '35dd6b52-7e71-11ea-9eb1-54bef70b159e',
307
+ 'info': {'grade': 12, 'language': 'Bulgarian', 'subject': 'Biology'},
308
+ 'question': {'choices': {'label': ['A', 'B', 'C', 'D'],
309
+ 'para': ['Това води до наследствени изменения между организмите. Мирновременните вождове са наследствени. Черният, сивият и кафявият цвят на оцветяване на тялото се определя от пигмента меланин и възниква в резултат на наследствени изменения. Тези различия, според Монтескьо, не са наследствени. Те са и важни наследствени вещи в клана. Те са били наследствени архонти и управляват демократично. Реликвите са исторически, религиозни, семейни (наследствени) и технически. Общо са направени 800 изменения. Не всички наследствени аномалии на хемоглобина са вредни, т.е. Моногенните наследствени болести, които водят до мигрена, са редки. Няма наследствени владетели. Повечето от тях са наследствени и се предават на потомството. Всичките синове са ерцхерцози на всичките наследствени земи и претенденти. През 1509 г. Фраунбергите са издигнати на наследствени имперски графове. Фамилията Валдбург заради постиженията са номинирани на „наследствени имперски трушсеси“. Фамилията Валдбург заради постиженията са номинирани на „наследствени имперски трушсеси“. Описани са единични наследствени случаи, но по-често липсва фамилна обремененост. Позициите им са наследствени и се предават в рамките на клана. Внесени са изменения в конструкцията на веригите. и са направени изменения в ходовата част. На храма са правени лоши архитектурни изменения. Изменения са предприети и вътре в двореца. Имало двама наследствени вождове. Имало двама наследствени вождове. Годишният календар, „компасът“ и биологичния часовник са наследствени и при много бозайници.',
310
+ 'Постепенно задълбочаващите се функционални изменения довеждат и до структурни изменения. Те се дължат както на растягането на кожата, така и на въздействието на хормоналните изменения върху кожната тъкан. тези изменения се долавят по-ясно. Впоследствие, той претърпява изменения. Ширината остава без изменения. След тяхното издаване се налагат изменения в първоначалния Кодекс, защото не е съобразен с направените в Дигестите изменения. Еволюционният преход се характеризира със следните изменения: Наблюдават се и сезонни изменения в теглото. Приемат се изменения и допълнения към Устава. Тук се размножават и предизвикват възпалителни изменения. Общо са направени 800 изменения. Бронирането не претърпява съществени изменения. При животните се откриват изменения при злокачествената форма. Срещат се и дегенеративни изменения в семенните каналчета. ТАВКР „Баку“ се строи по изменения проект 1143.4. Трансът се съпровожда с определени изменения на мозъчната дейност. На изменения е подложен и Светия Синод. Внесени са изменения в конструкцията на веригите. На храма са правени лоши архитектурни изменения. Оттогава стиховете претърпяват изменения няколко пъти. Настъпват съществени изменения в музикалната култура. По-късно той претърпява леки изменения. Настъпват съществени изменения в музикалната култура. Претърпява сериозни изменения само носовата надстройка. Хоризонталното брониране е оставено без изменения.',
311
+ 'Модификациите са обратими. Тези реакции са обратими. В началните стадии тези натрупвания са обратими. Всички такива ефекти са временни и обратими. Много от реакциите са обратими и идентични с тези при гликолизата. Ако в обращение има книжни пари, те са обратими в злато при поискване . Общо са направени 800 изменения. Непоследователността е представена от принципа на "симетрия", при който взаимоотношенията са разглеждани като симетрични или обратими. Откакто формулите в клетките на електронната таблица не са обратими, тази техника е с ограничена стойност. Ефектът на Пелтие-Зеебек и ефектът Томсън са обратими (ефектът на Пелтие е обратен на ефекта на Зеебек). Плазмолизата протича в три етапа, в зависимост от силата и продължителността на въздействието:\n\nПървите два етапа са обратими. Внесени са изменения в конструкцията на веригите. и са направени изменения в ходовата част. На храма са правени лоши архитектурни изменения. Изменения са предприети и вътре в двореца. Оттогава насетне екипите не са претърпявали съществени изменения. Изменения са направени и в колесника на машината. Тези изменения са обявени през октомври 1878 година. Последните изменения са внесени през януари 2009 година. В процеса на последващото проектиране са внесени някои изменения. Сериозните изменения са в края на Втората световна война. Внесени са изменения в конструкцията на погребите и подемниците. Внесени са изменения в конструкцията на погребите и подемниците. Внесени са изменения в конструкцията на погребите и подемниците. Постепенно задълбочаващите се функционални изменения довеждат и до структурни изменения.',
312
+ 'Ерозионни процеси от масов характер липсват. Обновлението в редиците на партията приема масов характер. Тя обаче няма масов характер поради спецификата на формата. Движението против десятъка придобива масов характер и в Балчишка околия. Понякога екзекутирането на „обсебените от Сатана“ взимало невероятно масов характер. Укриването на дължими като наряд продукти в селата придобива масов характер. Периодичните миграции са в повечето случаи с масов характер и са свързани със сезонните изменения в природата, а непериодичните са премествания на животни, които настъпват след пожари, замърсяване на средата, висока численост и др. Имат необратим характер. Именно по време на двувековните походи на западните рицари използването на гербовете придобива масов характер. След присъединяването на Южен Кавказ към Русия, изселването на азербайджанци от Грузия придобива масов характер. Те имат нормативен характер. Те имат установителен характер. Освобождаването на работна сила обикновено има масов характер, защото обхваща големи контингенти от носителите на труд. Валежите имат подчертано континентален характер. Имат най-често издънков характер. Приливите имат предимно полуденонощен характер. Някои от тях имат мистериален характер. Тези сведения имат случаен, епизодичен характер. Те имат сезонен или годишен характер. Временните обезпечителни мерки имат временен характер. Други имат пожелателен характер (Здравко, Слава). Ловът и събирачеството имат спомагателен характер. Фактически успяват само малко да усилят бронирането на артилерийските погреби, другите изменения носят само частен характер. Някои карикатури имат само развлекателен характер, докато други имат политически нюанси. Поемите на Хезиод имат по-приложен характер.'],
313
+ 'text': ['дължат се на фенотипни изменения',
314
+ 'имат масов характер',
315
+ 'са наследствени',
316
+ 'са обратими']},
317
+ 'stem': 'Мутационите изменения:'}}
318
+ ```
319
+
320
+ ### Data Fields
321
+
322
+ A data instance contains the following fields:
323
+ - `id`: A question ID, unique across the dataset
324
+ - `question`: the question contains the following:
325
+ - `stem`: a stemmed representation of the question textual
326
+ - `choices`: a set of 3 to 5 candidate answers, which each have:
327
+ - `text`: the text of the answers
328
+ - `label`: a label in `['A', 'B', 'C', 'D', 'E']` used to match to the `answerKey`
329
+ - `para`: (optional) a supported paragraph from Wikipedia in the same language as the question and answer
330
+ - `answerKey`: the key corresponding to the right answer's `label`
331
+ - `info`: some additional information on the question including:
332
+ - `grade`: the school grade for the exam this question was taken from
333
+ - `subject`: a free text description of the academic subject
334
+ - `language`: the English name of the language for this question
335
+
336
+ ### Data Splits
337
+
338
+ [More Information Needed]
339
+
340
+ ## Dataset Creation
341
+
342
+ ### Curation Rationale
343
+
344
+ [More Information Needed]
345
+
346
+ ### Source Data
347
+
348
+ #### Initial Data Collection and Normalization
349
+
350
+ Eχαµs was collected from official state exams prepared by the ministries of education of various countries. These exams are taken by students graduating from high school, and often require knowledge learned through the entire course.
351
+
352
+ The questions cover a large variety of subjects and material based on the country’s education system. They cover major school subjects such as Biology, Chemistry, Geography, History, and Physics, but we also highly specialized ones such as Agriculture, Geology, Informatics, as well as some applied and profiled studies.
353
+
354
+ Some countries allow students to take official examinations in several languages. This dataset rprovides 9,857 parallel question pairs spread across seven languages coming from Croatia (Croatian, Serbian, Italian, Hungarian), Hungary (Hungarian, German, French, Spanish, Croatian, Serbian, Italian), and North Macedonia (Macedonian, Albanian, Turkish).
355
+
356
+ For all languages in the dataset, the first step in the process of data collection was to download the PDF files per year, per subject, and per language (when parallel languages were available in the same source), convert the PDF files to text, and select those that were well formatted and followed the document structure.
357
+
358
+ Then, Regular Expressions (RegEx) were used to parse the questions, their corresponding choices and the correct answer choice. In order to ensure that all our questions are answerable using textual input only, questions that contained visual information were removed, as selected by using curated list of words such as map, table, picture, graph, etc., in the corresponding language.
359
+
360
+ #### Who are the source language producers?
361
+
362
+ [More Information Needed]
363
+
364
+ ### Annotations
365
+
366
+ #### Annotation process
367
+
368
+ [More Information Needed]
369
+
370
+ #### Who are the annotators?
371
+
372
+ [More Information Needed]
373
+
374
+ ### Personal and Sensitive Information
375
+
376
+ [More Information Needed]
377
+
378
+ ## Considerations for Using the Data
379
+
380
+ ### Social Impact of Dataset
381
+
382
+ [More Information Needed]
383
+
384
+ ### Discussion of Biases
385
+
386
+ [More Information Needed]
387
+
388
+ ### Other Known Limitations
389
+
390
+ [More Information Needed]
391
+
392
+ ## Additional Information
393
+
394
+ ### Dataset Curators
395
+
396
+ [More Information Needed]
397
+
398
+ ### Licensing Information
399
+
400
+ The dataset, which contains paragraphs from Wikipedia, is licensed under CC-BY-SA 4.0. The code in this repository is licensed according the [LICENSE file](https://raw.githubusercontent.com/mhardalov/exams-qa/main/LICENSE).
401
+
402
+ ### Citation Information
403
+
404
+ ```
405
+ @article{hardalov2020exams,
406
+ title={EXAMS: A Multi-subject High School Examinations Dataset for Cross-lingual and Multilingual Question Answering},
407
+ author={Hardalov, Momchil and Mihaylov, Todor and Dimitrina Zlatkova and Yoan Dinkov and Ivan Koychev and Preslav Nvakov},
408
+ journal={arXiv preprint arXiv:2011.03080},
409
+ year={2020}
410
+ }
411
+ ```
dataset_infos.json ADDED
The diff for this file is too large to render. See raw diff
 
dummy/alignments/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f4a732a5c3029a64b96d8ff4d088df6f012cf3fa3fcd4856e6fefbe3e5f6117
3
+ size 481
dummy/crosslingual_bg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:661d15eecfbdb71d78ddce2064ffc5393b7b88dd8220e69953d5b8defd80ba77
3
+ size 2942
dummy/crosslingual_hr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8832ba790b04b07f3efb6e72f2fe7d79d6f65c668e474eed474d2e97f6e1be07
3
+ size 2620
dummy/crosslingual_hu/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a3ebb479637c60ba188f93c1eabb7ae7396a075cf918e03804059ced8cea8c
3
+ size 2817
dummy/crosslingual_it/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21c5cd9be5f99a28f9fd73bec6ed70b4a87d66c5beeae09e6acaa50e345247b
3
+ size 2989
dummy/crosslingual_mk/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97fc94d2a6de4371a29a862d45874725d5d81a2a5d18561bbd78834deafedfac
3
+ size 2769
dummy/crosslingual_pl/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c403702c5582b5ad90775c7f5370f2f406d9f8c412a689cf32a71a544a935050
3
+ size 2680
dummy/crosslingual_pt/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:303807206c42f1987e6bc7a4a2eb6d2007c0a7432f750efa9bcef59a7d455e1c
3
+ size 2922
dummy/crosslingual_sq/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56be4eaf890dcaaa3d6c63327ebf86e6218971370bb4a69e7f103a04f9ef74af
3
+ size 2439
dummy/crosslingual_sr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19d21a24c71372c04e07fd0b59a8d416b6efbacafb7d236d3654379110187d1
3
+ size 3107
dummy/crosslingual_test/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9e472e014cee8f6376709f624fe99a7f47e31d2e1e2554f27677ee170479e68
3
+ size 1413
dummy/crosslingual_tr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f8164bfd09b354fd8c0c90fe9fbf5ceae40e8973602ffb70830b6f49ab4248e
3
+ size 2940
dummy/crosslingual_vi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab1c94a2fa7c2dbbb53b98e3edac62cc10cf4a38597e04ae5437410bf5beb2a
3
+ size 3811
dummy/crosslingual_with_para_bg/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b905b968619a79436edea803bc60b9961c8897615962a60eea8241db48a758
3
+ size 29551
dummy/crosslingual_with_para_hr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bffe9a0414ff121f112b6e790fb3c8f58acfd9dea5ef5bfb148cf97540b27af
3
+ size 23948
dummy/crosslingual_with_para_hu/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:788fe315fdf96fafc308406b9733751cbfbc51f66503d5e5a90546918d0b85cf
3
+ size 20635
dummy/crosslingual_with_para_it/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339d48e7cc117c3622a47c489d2c5dbd2c3255e2a45f39901e913145c6f61b75
3
+ size 27435
dummy/crosslingual_with_para_mk/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5651e645711b16a13b841987129e5a26d78c2f2af971573748254b76f5389dfb
3
+ size 35626
dummy/crosslingual_with_para_pl/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d00a9c0c2e1b95d464020481da925225b4076d791a22cabd2078a5763b4f84cd
3
+ size 20216
dummy/crosslingual_with_para_pt/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:782ac7e700f4676e84f39940add0729df057e4bc6957264abf249d3052784955
3
+ size 34735
dummy/crosslingual_with_para_sq/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc34f9f57f57ee27e00b891f894f3dad89e35143fb3bdaac2fdcf52289bc623
3
+ size 26169
dummy/crosslingual_with_para_sr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e62991f40ca22b310e194c1ec24476cc783317ec743ae9bafed873a0570f601f
3
+ size 29392
dummy/crosslingual_with_para_test/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dc8934d8b0b16ae427f43a1c4cee8ce9c852063df1fec91e9aea6d2243d4a3
3
+ size 24421
dummy/crosslingual_with_para_tr/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16164108e5a67f2f045f351f1667bf0b82c44dfb79f5d91bba98f7a058811b83
3
+ size 23250
dummy/crosslingual_with_para_vi/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71fecd1f713e06c03581d5b67ee97f7e933fdadb7f2d2fa48a72cb78c914c63
3
+ size 33041
dummy/multilingual/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd791f57b46e9233a330e68e6f553e0bd291af03dc6b9b75d5460d709f3a48b
3
+ size 4121
dummy/multilingual_with_para/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bfb866556e45f7caae67198e1f962dee56f1d52741d9830492aea27168ef11b
3
+ size 53992
exams.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """EXAMS: a benchmark dataset for multilingual and cross-lingual question answering"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @article{hardalov2020exams,
27
+ title={EXAMS: A Multi-subject High School Examinations Dataset for Cross-lingual and Multilingual Question Answering},
28
+ author={Hardalov, Momchil and Mihaylov, Todor and Dimitrina Zlatkova and Yoan Dinkov and Ivan Koychev and Preslav Nvakov},
29
+ journal={arXiv preprint arXiv:2011.03080},
30
+ year={2020}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ EXAMS is a benchmark dataset for multilingual and cross-lingual question answering from high school examinations.
36
+ It consists of more than 24,000 high-quality high school exam questions in 16 languages,
37
+ covering 8 language families and 24 school subjects from Natural Sciences and Social Sciences, among others.
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/mhardalov/exams-qa"
41
+
42
+ _LICENSE = "CC-BY-SA-4.0"
43
+
44
+ _URLS_LIST = [
45
+ ("alignments", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/parallel_questions.jsonl"),
46
+ ]
47
+ _URLS_LIST += [
48
+ (
49
+ "multilingual_train",
50
+ "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/train.jsonl.tar.gz",
51
+ ),
52
+ ("multilingual_dev", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/dev.jsonl.tar.gz"),
53
+ ("multilingual_test", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/test.jsonl.tar.gz"),
54
+ (
55
+ "multilingual_with_para_train",
56
+ "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/train_with_para.jsonl.tar.gz",
57
+ ),
58
+ (
59
+ "multilingual_with_para_dev",
60
+ "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/dev_with_para.jsonl.tar.gz",
61
+ ),
62
+ (
63
+ "multilingual_with_para_test",
64
+ "https://github.com/mhardalov/exams-qa/raw/main/data/exams/multilingual/with_paragraphs/test_with_para.jsonl.tar.gz",
65
+ ),
66
+ ]
67
+
68
+ _CROSS_LANGUAGES = ["bg", "hr", "hu", "it", "mk", "pl", "pt", "sq", "sr", "tr", "vi"]
69
+ _URLS_LIST += [
70
+ ("crosslingual_test", "https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/test.jsonl.tar.gz"),
71
+ (
72
+ "crosslingual_with_para_test",
73
+ "https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/test_with_para.jsonl.tar.gz",
74
+ ),
75
+ ]
76
+ for ln in _CROSS_LANGUAGES:
77
+ _URLS_LIST += [
78
+ (
79
+ f"crosslingual_{ln}_train",
80
+ f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/train_{ln}.jsonl.tar.gz",
81
+ ),
82
+ (
83
+ f"crosslingual_with_para_{ln}_train",
84
+ f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/train_{ln}_with_para.jsonl.tar.gz",
85
+ ),
86
+ (
87
+ f"crosslingual_{ln}_dev",
88
+ f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/dev_{ln}.jsonl.tar.gz",
89
+ ),
90
+ (
91
+ f"crosslingual_with_para_{ln}_dev",
92
+ f"https://github.com/mhardalov/exams-qa/raw/main/data/exams/cross-lingual/with_paragraphs/dev_{ln}_with_para.jsonl.tar.gz",
93
+ ),
94
+ ]
95
+ _URLs = dict(_URLS_LIST)
96
+
97
+
98
+ class ExamsConfig(datasets.BuilderConfig):
99
+ def __init__(self, lang, with_para, **kwargs):
100
+ super(ExamsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
101
+ self.lang = lang
102
+ self.with_para = "_with_para" if with_para else ""
103
+
104
+
105
+ class Exams(datasets.GeneratorBasedBuilder):
106
+ """Exams dataset"""
107
+
108
+ VERSION = datasets.Version("1.0.0")
109
+ BUILDER_CONFIG_CLASS = ExamsConfig
110
+ BUILDER_CONFIGS = [
111
+ ExamsConfig(
112
+ lang="",
113
+ with_para=False,
114
+ name="alignments",
115
+ description="loads the alignment between question IDs across languages",
116
+ ),
117
+ ExamsConfig(
118
+ lang="all",
119
+ with_para=False,
120
+ name="multilingual",
121
+ description="Loads the unified multilingual train/dev/test split",
122
+ ),
123
+ ExamsConfig(
124
+ lang="all",
125
+ with_para=True,
126
+ name="multilingual_with_para",
127
+ description="Loads the unified multilingual train/dev/test split with Wikipedia support paragraphs",
128
+ ),
129
+ ExamsConfig(
130
+ lang="all", with_para=False, name="crosslingual_test", description="Loads crosslingual test set only"
131
+ ),
132
+ ExamsConfig(
133
+ lang="all",
134
+ with_para=True,
135
+ name="crosslingual_with_para_test",
136
+ description="Loads crosslingual test set only with Wikipedia support paragraphs",
137
+ ),
138
+ ]
139
+ for ln in _CROSS_LANGUAGES:
140
+ BUILDER_CONFIGS += [
141
+ ExamsConfig(
142
+ lang=ln,
143
+ with_para=False,
144
+ name=f"crosslingual_{ln}",
145
+ description=f"Loads crosslingual train and dev set for {ln}",
146
+ ),
147
+ ExamsConfig(
148
+ lang=ln,
149
+ with_para=True,
150
+ name=f"crosslingual_with_para_{ln}",
151
+ description=f"Loads crosslingual train and dev set for {ln} with Wikipedia support paragraphs",
152
+ ),
153
+ ]
154
+
155
+ DEFAULT_CONFIG_NAME = (
156
+ "multilingual_with_para" # It's not mandatory to have a default configuration. Just use one if it make sense.
157
+ )
158
+
159
+ def _info(self):
160
+ if self.config.name == "alignments": # This is the name of the configuration selected in BUILDER_CONFIGS above
161
+ features = datasets.Features(
162
+ {
163
+ "source_id": datasets.Value("string"),
164
+ "target_id_list": datasets.Sequence(datasets.Value("string")),
165
+ }
166
+ )
167
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
168
+ features = datasets.Features(
169
+ {
170
+ "id": datasets.Value("string"),
171
+ "question": {
172
+ "stem": datasets.Value("string"),
173
+ "choices": datasets.Sequence(
174
+ {
175
+ "text": datasets.Value("string"),
176
+ "label": datasets.Value("string"),
177
+ "para": datasets.Value("string"),
178
+ }
179
+ ),
180
+ },
181
+ "answerKey": datasets.Value("string"),
182
+ "info": {
183
+ "grade": datasets.Value("int32"),
184
+ "subject": datasets.Value("string"),
185
+ "language": datasets.Value("string"),
186
+ },
187
+ }
188
+ )
189
+ return datasets.DatasetInfo(
190
+ description=_DESCRIPTION,
191
+ features=features, # Here we define them above because they are different between the two configurations
192
+ supervised_keys=None,
193
+ homepage=_HOMEPAGE,
194
+ license=_LICENSE,
195
+ citation=_CITATION,
196
+ )
197
+
198
+ def _split_generators(self, dl_manager):
199
+ """Returns SplitGenerators."""
200
+ data_dir = dl_manager.download_and_extract(_URLs)
201
+ if self.config.name == "alignments":
202
+ return [
203
+ datasets.SplitGenerator(
204
+ name="full",
205
+ gen_kwargs={
206
+ "filepath": data_dir["alignments"],
207
+ },
208
+ ),
209
+ ]
210
+ elif self.config.name in ["multilingual", "multilingual_with_para"]:
211
+ return [
212
+ datasets.SplitGenerator(
213
+ name=spl_enum,
214
+ gen_kwargs={
215
+ "filepath": os.path.join(
216
+ data_dir[f"{self.config.name}_{spl}"], f"{spl}{self.config.with_para}.jsonl"
217
+ ),
218
+ },
219
+ )
220
+ for spl, spl_enum in [
221
+ ("train", datasets.Split.TRAIN),
222
+ ("dev", datasets.Split.VALIDATION),
223
+ ("test", datasets.Split.TEST),
224
+ ]
225
+ ]
226
+ elif self.config.name in ["crosslingual_test", "crosslingual_with_para_test"]:
227
+ return [
228
+ datasets.SplitGenerator(
229
+ name=datasets.Split.TEST,
230
+ gen_kwargs={
231
+ "filepath": os.path.join(
232
+ data_dir[f"{self.config.name}"], f"test{self.config.with_para}.jsonl"
233
+ ),
234
+ },
235
+ ),
236
+ ]
237
+ else:
238
+ return [
239
+ datasets.SplitGenerator(
240
+ name=spl_enum,
241
+ gen_kwargs={
242
+ "filepath": os.path.join(
243
+ data_dir[f"{self.config.name}_{spl}"],
244
+ f"{spl}_{self.config.lang}{self.config.with_para}.jsonl",
245
+ )
246
+ },
247
+ )
248
+ for spl, spl_enum in [
249
+ ("train", datasets.Split.TRAIN),
250
+ ("dev", datasets.Split.VALIDATION),
251
+ ]
252
+ ]
253
+
254
+ def _generate_examples(self, filepath):
255
+ f = open(filepath, encoding="utf-8")
256
+ if self.config.name == "alignments":
257
+ for id_, line in enumerate(f):
258
+ line_dict = json.loads(line.strip())
259
+ in_id, out_list = list(line_dict.items())[0]
260
+ yield id_, {"source_id": in_id, "target_id_list": out_list}
261
+ else:
262
+ for id_, line in enumerate(f):
263
+ line_dict = json.loads(line.strip())
264
+ for choice in line_dict["question"]["choices"]:
265
+ choice["para"] = choice.get("para", "")
266
+ yield id_, line_dict