holylovenia commited on
Commit
8918460
1 Parent(s): a154870

Upload tydiqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tydiqa.py +437 -0
tydiqa.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+
5
+ from seacrowd.utils import schemas
6
+ from seacrowd.utils.configs import SEACrowdConfig
7
+ from seacrowd.utils.constants import Licenses, Tasks
8
+
9
+ _CITATION = r"""\
10
+ @article{clark-etal-2020-tydi,
11
+ title = "{T}y{D}i {QA}: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages",
12
+ author = "Clark, Jonathan H. and
13
+ Choi, Eunsol and
14
+ Collins, Michael and
15
+ Garrette, Dan and
16
+ Kwiatkowski, Tom and
17
+ Nikolaev, Vitaly and
18
+ Palomaki, Jennimaria",
19
+ editor = "Johnson, Mark and
20
+ Roark, Brian and
21
+ Nenkova, Ani",
22
+ journal = "Transactions of the Association for Computational Linguistics",
23
+ volume = "8",
24
+ year = "2020",
25
+ address = "Cambridge, MA",
26
+ publisher = "MIT Press",
27
+ url = "https://aclanthology.org/2020.tacl-1.30",
28
+ doi = "10.1162/tacl_a_00317",
29
+ pages = "454--470",
30
+ abstract = "Confidently making progress on multilingual modeling requires challenging, trustworthy evaluations.
31
+ We present TyDi QA{---}a question answering dataset covering 11 typologically diverse languages with 204K
32
+ question-answer pairs. The languages of TyDi QA are diverse with regard to their typology{---}the set of
33
+ linguistic features each language expresses{---}such that we expect models performing well on this set to
34
+ generalize across a large number of the world{'}s languages. We present a quantitative analysis of the data
35
+ quality and example-level qualitative linguistic analyses of observed language phenomena that would not be found
36
+ in English-only corpora. To provide a realistic information-seeking task and avoid priming effects, questions are
37
+ written by people who want to know the answer, but don{'}t know the answer yet, and the data is collected directly
38
+ in each language without the use of translation.",
39
+ }
40
+
41
+ @inproceedings{cahyawijaya-etal-2021-indonlg,
42
+ title = "{I}ndo{NLG}: Benchmark and Resources for Evaluating {I}ndonesian Natural Language Generation",
43
+ author = "Cahyawijaya, Samuel and
44
+ Winata, Genta Indra and
45
+ Wilie, Bryan and
46
+ Vincentio, Karissa and
47
+ Li, Xiaohong and
48
+ Kuncoro, Adhiguna and
49
+ Ruder, Sebastian and
50
+ Lim, Zhi Yuan and
51
+ Bahar, Syafri and
52
+ Khodra, Masayu and
53
+ Purwarianti, Ayu and
54
+ Fung, Pascale",
55
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
56
+ month = nov,
57
+ year = "2021",
58
+ address = "Online and Punta Cana, Dominican Republic",
59
+ publisher = "Association for Computational Linguistics",
60
+ url = "https://aclanthology.org/2021.emnlp-main.699",
61
+ doi = "10.18653/v1/2021.emnlp-main.699",
62
+ pages = "8875--8898"
63
+ }
64
+ """
65
+
66
+ _DATASETNAME = "tydiqa"
67
+
68
+ _DESCRIPTION = """\
69
+ TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs.
70
+ The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language
71
+ expresses -- such that we expect models performing well on this set to generalize across a large number of the languages
72
+ in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic
73
+ information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but
74
+ don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language
75
+ without the use of translation (unlike MLQA and XQuAD).
76
+ """
77
+
78
+ _HOMEPAGE = "https://github.com/google-research-datasets/tydiqa"
79
+ _LICENSE = Licenses.APACHE_2_0.value
80
+ _HF_URL = "https://huggingface.co/datasets/tydiqa"
81
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
82
+ _LANGUAGES = ["ind", "tha"]
83
+ _LOCAL = False
84
+ _SOURCE_VERSION = "1.0.0"
85
+ _SOURCE_VERSION_P = "1.0.0"
86
+ _SOURCE_VERSION_S = "1.1.0"
87
+ _SEACROWD_VERSION = "2024.06.20"
88
+
89
+ _URL = "https://storage.googleapis.com/tydiqa/"
90
+ _PRIMARY_URLS = {
91
+ "train": _URL + "v1.0/tydiqa-v1.0-train.jsonl.gz",
92
+ "dev": _URL + "v1.0/tydiqa-v1.0-dev.jsonl.gz",
93
+ }
94
+ _SECONDARY_URLS = {
95
+ "train": _URL + "v1.1/tydiqa-goldp-v1.1-train.json",
96
+ "dev": _URL + "v1.1/tydiqa-goldp-v1.1-dev.json",
97
+ }
98
+
99
+ _SELECTP_DESP = """Passage selection task (SelectP): Given a list of the passages in the article, return either (a) the index of
100
+ the passage that answers the question or (b) NULL if no such passage exists.
101
+ """
102
+ _MINSPAN_DESP = """Minimal answer span task (MinSpan): Given the full text of an article, return one of (a) the start and end
103
+ byte indices of the minimal span that completely answers the question; (b) YES or NO if the question requires
104
+ a yes/no answer and we can draw a conclusion from the passage; (c) NULL if it is not possible to produce a
105
+ minimal answer for this question."""
106
+ _GOLDP_DESP = """Gold passage task (GoldP): Given a passage that is guaranteed to contain the
107
+ answer, predict the single contiguous span of characters that answers the question. This is more similar to
108
+ existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
109
+ """
110
+ _ID_DESP = """{I}ndo{NLG}: Benchmark and Resources for Evaluating {I}ndonesian Natural Language Generation, is a benchmark
111
+ for evaluating Indonesian natural language generation (NLG) systems. The question-answer pairs are collected
112
+ for each language without using translation services. It uses the Indonesian data from the secondary Gold
113
+ passage task of the TyDiQA dataset. As the original dataset only provides training and validation sets,
114
+ TydiQA-ID randomly split off 15% of the training data and use it as the test set.
115
+ """
116
+
117
+
118
+ def config_constructor(subset_id, schema, desc, version):
119
+ return SEACrowdConfig(name=f"{_DATASETNAME}_{subset_id}_{schema}", description=desc, version=datasets.Version(version), schema=schema, subset_id=subset_id)
120
+
121
+
122
+ class TydiqaDataset(datasets.GeneratorBasedBuilder):
123
+ """
124
+ This is a main class of SEACrowd dataloader for TyDi QA, which is a question answering dataset covering 11 typologically
125
+ diverse languages with 204K question-answer pairs. The languages of TyDi QA are diverse with regard to their typology.
126
+ Here we also specially provide the split on the primary and secondary task for SEA language like indonesian and thai.
127
+ """
128
+
129
+ BUILDER_CONFIGS = [
130
+ # source schema
131
+ # selectp source schema
132
+ config_constructor(subset_id="selectp", schema="source", desc=_SELECTP_DESP, version=_SOURCE_VERSION_P),
133
+ config_constructor(subset_id="selectp_ind", schema="source", desc=_SELECTP_DESP, version=_SOURCE_VERSION_P),
134
+ config_constructor(subset_id="selectp_tha", schema="source", desc=_SELECTP_DESP, version=_SOURCE_VERSION_P),
135
+ # minspan source schema
136
+ config_constructor(subset_id="minspan", schema="source", desc=_MINSPAN_DESP, version=_SOURCE_VERSION_P),
137
+ config_constructor(subset_id="minspan_ind", schema="source", desc=_MINSPAN_DESP, version=_SOURCE_VERSION_P),
138
+ config_constructor(subset_id="minspan_tha", schema="source", desc=_MINSPAN_DESP, version=_SOURCE_VERSION_P),
139
+ # goldp source schema
140
+ config_constructor(subset_id="goldp", schema="source", desc=_GOLDP_DESP, version=_SOURCE_VERSION_S),
141
+ config_constructor(subset_id="goldp_ind", schema="source", desc=_GOLDP_DESP, version=_SOURCE_VERSION_S),
142
+ # tydiqa_id source schema
143
+ config_constructor(subset_id="id", schema="source", desc=_ID_DESP, version=_SOURCE_VERSION_P),
144
+ # seacrowd schema
145
+ # selectp seacrowd schema
146
+ config_constructor(subset_id="selectp", schema="seacrowd_qa", desc=_SELECTP_DESP, version=_SEACROWD_VERSION),
147
+ config_constructor(subset_id="selectp_ind", schema="seacrowd_qa", desc=_SELECTP_DESP, version=_SEACROWD_VERSION),
148
+ config_constructor(subset_id="selectp_tha", schema="seacrowd_qa", desc=_SELECTP_DESP, version=_SEACROWD_VERSION),
149
+ # minspan seacrowd schema
150
+ config_constructor(subset_id="minspan", schema="seacrowd_qa", desc=_MINSPAN_DESP, version=_SEACROWD_VERSION),
151
+ config_constructor(subset_id="minspan_ind", schema="seacrowd_qa", desc=_MINSPAN_DESP, version=_SEACROWD_VERSION),
152
+ config_constructor(subset_id="minspan_tha", schema="seacrowd_qa", desc=_MINSPAN_DESP, version=_SEACROWD_VERSION),
153
+ # goldp seacrowd schema
154
+ config_constructor(subset_id="goldp", schema="seacrowd_qa", desc=_GOLDP_DESP, version=_SEACROWD_VERSION),
155
+ config_constructor(subset_id="goldp_ind", schema="seacrowd_qa", desc=_GOLDP_DESP, version=_SEACROWD_VERSION),
156
+ # tydiqa_id seacrowd schema
157
+ config_constructor(subset_id="id", schema="seacrowd_qa", desc=_ID_DESP, version=_SEACROWD_VERSION),
158
+ ]
159
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_id_source"
160
+
161
+ def _info(self):
162
+ if ("selectp" in self.config.name) or ("minspan" in self.config.name):
163
+ if "source" in self.config.name:
164
+ features = datasets.Features(
165
+ {
166
+ "passage_answer_candidates": datasets.features.Sequence(
167
+ {
168
+ "plaintext_start_byte": datasets.Value("int32"),
169
+ "plaintext_end_byte": datasets.Value("int32"),
170
+ }
171
+ ),
172
+ "question_text": datasets.Value("string"),
173
+ "document_title": datasets.Value("string"),
174
+ "language": datasets.Value("string"),
175
+ "annotations": datasets.features.Sequence(
176
+ {
177
+ "passage_answer_candidate_index": datasets.Value("int32"),
178
+ "minimal_answers_start_byte": datasets.Value("int32"),
179
+ "minimal_answers_end_byte": datasets.Value("int32"),
180
+ "yes_no_answer": datasets.Value("string"),
181
+ }
182
+ ),
183
+ "document_plaintext": datasets.Value("string"),
184
+ "document_url": datasets.Value("string"),
185
+ }
186
+ )
187
+ elif "seacrowd" in self.config.name:
188
+ features = schemas.qa_features
189
+ features["meta"] = {
190
+ "passage_answer_candidates": datasets.features.Sequence(
191
+ {
192
+ "plaintext_start_byte": datasets.Value("int32"),
193
+ "plaintext_end_byte": datasets.Value("int32"),
194
+ }
195
+ ),
196
+ "annotations": datasets.features.Sequence(
197
+ {
198
+ "passage_answer_candidate_index": datasets.Value("int32"),
199
+ "minimal_answers_start_byte": datasets.Value("int32"),
200
+ "minimal_answers_end_byte": datasets.Value("int32"),
201
+ "yes_no_answer": datasets.Value("string"),
202
+ }
203
+ ),
204
+ "language": datasets.Value("string"),
205
+ }
206
+
207
+ elif ("goldp" in self.config.name) or ("tydiqa_id" in self.config.name):
208
+ if "source" in self.config.name:
209
+ features = datasets.Features(
210
+ {
211
+ "id": datasets.Value("string"),
212
+ "title": datasets.Value("string"),
213
+ "context": datasets.Value("string"),
214
+ "question": datasets.Value("string"),
215
+ "answers": datasets.features.Sequence(
216
+ {
217
+ "text": datasets.Value("string"),
218
+ "answer_start": datasets.Value("int32"),
219
+ }
220
+ ),
221
+ }
222
+ )
223
+ elif "seacrowd" in self.config.name:
224
+ features = schemas.qa_features
225
+ features["meta"] = {
226
+ "answer_start": datasets.Sequence(datasets.Value("int32")),
227
+ }
228
+ return datasets.DatasetInfo(
229
+ description=_DESCRIPTION,
230
+ features=features,
231
+ citation=_CITATION,
232
+ homepage=_HOMEPAGE,
233
+ license=_LICENSE,
234
+ )
235
+
236
+ def _split_generators(self, dl_manager):
237
+ """Returns SplitGenerators."""
238
+ primary_downloaded = dl_manager.download_and_extract(_PRIMARY_URLS)
239
+ secondary_downloaded = dl_manager.download_and_extract(_SECONDARY_URLS)
240
+
241
+ if ("selectp" in self.config.name) or ("minspan" in self.config.name):
242
+ return [
243
+ datasets.SplitGenerator(
244
+ name=datasets.Split.TRAIN,
245
+ gen_kwargs={"filepath": primary_downloaded["train"]},
246
+ ),
247
+ datasets.SplitGenerator(
248
+ name=datasets.Split.VALIDATION,
249
+ gen_kwargs={"filepath": primary_downloaded["dev"]},
250
+ ),
251
+ ]
252
+
253
+ elif "goldp" in self.config.name:
254
+ return [
255
+ datasets.SplitGenerator(
256
+ name=datasets.Split.TRAIN,
257
+ gen_kwargs={"filepath": secondary_downloaded["train"]},
258
+ ),
259
+ datasets.SplitGenerator(
260
+ name=datasets.Split.VALIDATION,
261
+ gen_kwargs={"filepath": secondary_downloaded["dev"]},
262
+ ),
263
+ ]
264
+ elif "tydiqa_id" in self.config.name:
265
+ return [
266
+ datasets.SplitGenerator(
267
+ name=datasets.Split.TRAIN,
268
+ gen_kwargs={"filepath": secondary_downloaded["train"], "split": "train"},
269
+ ),
270
+ datasets.SplitGenerator(
271
+ name=datasets.Split.TEST,
272
+ gen_kwargs={"filepath": secondary_downloaded["train"], "split": "test"},
273
+ ),
274
+ datasets.SplitGenerator(
275
+ name=datasets.Split.VALIDATION,
276
+ gen_kwargs={"filepath": secondary_downloaded["dev"], "split": "validation"},
277
+ ),
278
+ ]
279
+
280
+ def _generate_examples(self, filepath, split=None):
281
+ """Yields examples."""
282
+
283
+ if ("selectp" in self.config.name) or ("minspan" in self.config.name):
284
+ with open(filepath, encoding="utf-8") as f:
285
+ for id_, row in enumerate(f):
286
+ data = json.loads(row)
287
+ passages = data["passage_answer_candidates"]
288
+ end_byte = [passage["plaintext_end_byte"] for passage in passages]
289
+ start_byte = [passage["plaintext_start_byte"] for passage in passages]
290
+ title = data["document_title"]
291
+ lang = data["language"]
292
+ question = data["question_text"]
293
+ annotations = data["annotations"]
294
+ yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations]
295
+ min_answers_end_byte = [annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations]
296
+ min_answers_start_byte = [annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations]
297
+ passage_cand_answers = [annotation["passage_answer"]["candidate_index"] for annotation in annotations]
298
+ doc = data["document_plaintext"]
299
+ url = data["document_url"]
300
+ if (self.config.name == "tydiqa_selectp_source") or (self.config.name == "tydiqa_minspan_source"):
301
+ yield id_, primary_source_helper(id_, start_byte, end_byte, question, title, lang, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, doc, url)
302
+ elif (self.config.name == "tydiqa_selectp_ind_source") or (self.config.name == "tydiqa_minspan_ind_source"):
303
+ if lang == "indonesian":
304
+ yield id_, primary_source_helper(id_, start_byte, end_byte, question, title, lang, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, doc, url)
305
+ elif (self.config.name == "tydiqa_selectp_tha_source") or (self.config.name == "tydiqa_minspan_tha_source"):
306
+ if lang == "thai":
307
+ yield id_, primary_source_helper(id_, start_byte, end_byte, question, title, lang, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, doc, url)
308
+ # seacrowd
309
+ elif (self.config.name == "tydiqa_selectp_seacrowd_qa") or (self.config.name == "tydiqa_minspan_seacrowd_qa"):
310
+ yield id_, primary_seacrowd_helper(id_, title, question, doc, start_byte, end_byte, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, lang)
311
+ elif (self.config.name == "tydiqa_selectp_ind_seacrowd_qa") or (self.config.name == "tydiqa_minspan_ind_seacrowd_qa"):
312
+ if lang == "indonesian":
313
+ yield id_, primary_seacrowd_helper(id_, title, question, doc, start_byte, end_byte, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, lang)
314
+ elif (self.config.name == "tydiqa_selectp_tha_seacrowd_qa") or (self.config.name == "tydiqa_minspan_tha_seacrowd_qa"):
315
+ if lang == "thai":
316
+ yield id_, primary_seacrowd_helper(id_, title, question, doc, start_byte, end_byte, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, lang)
317
+ else:
318
+ raise ValueError(f"No configs to match {self.config.name} in primary_task")
319
+
320
+ elif ("goldp" in self.config.name) or ("tydiqa_id" in self.config.name):
321
+ with (open(filepath, encoding="utf-8") as f):
322
+ data = json.load(f)
323
+ tydiqa_id_num = 0
324
+ for article in data["data"]:
325
+ title = article.get("title", "").strip()
326
+ for paragraph in article["paragraphs"]:
327
+ context = paragraph["context"].strip()
328
+ for qa in paragraph["qas"]:
329
+ question = qa["question"].strip()
330
+ id_ = qa["id"]
331
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
332
+ answers = [answer["text"].strip() for answer in qa["answers"]]
333
+ if self.config.name == "tydiqa_goldp_source":
334
+ yield id_, second_source_helper(id_, title, context, question, answer_starts, answers)
335
+
336
+ elif self.config.name == "tydiqa_goldp_ind_source":
337
+ if id_.startswith("indonesian"):
338
+ yield id_, second_source_helper(id_, title, context, question, answer_starts, answers)
339
+ elif self.config.name == "tydiqa_id_source":
340
+ if id_.startswith("indonesian"):
341
+ tydiqa_id_num += 1
342
+ if split == "train" and tydiqa_id_num >= 856:
343
+ yield id_, second_source_helper(id_, title, context, question, answer_starts, answers)
344
+ if split == "test" and tydiqa_id_num < 856:
345
+ yield id_, second_source_helper(id_, title, context, question, answer_starts, answers)
346
+ if split == "validation":
347
+ yield id_, second_source_helper(id_, title, context, question, answer_starts, answers)
348
+
349
+ elif self.config.name == "tydiqa_goldp_seacrowd_qa":
350
+ yield id_, second_seacrowd_helper(id_, question, context, answers, answer_starts)
351
+ elif self.config.name == "tydiqa_goldp_ind_seacrowd_qa":
352
+ if id_.startswith("indonesian"):
353
+ yield id_, second_seacrowd_helper(id_, question, context, answers, answer_starts)
354
+ elif self.config.name == "tydiqa_id_seacrowd_qa":
355
+ if id_.startswith("indonesian"):
356
+ tydiqa_id_num += 1
357
+ if split == "train" and tydiqa_id_num >= 856:
358
+ yield id_, second_seacrowd_helper(id_, question, context, answers, answer_starts)
359
+ if split == "test" and tydiqa_id_num < 856:
360
+ yield id_, second_seacrowd_helper(id_, question, context, answers, answer_starts)
361
+ if split == "validation":
362
+ yield id_, second_seacrowd_helper(id_, question, context, answers, answer_starts)
363
+ else:
364
+ raise ValueError(f"No configs to match {self.config.name} in secondary_task")
365
+
366
+
367
+ def primary_source_helper(id_, start_byte, end_byte, question, title, lang, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, doc, url):
368
+ return {
369
+ "passage_answer_candidates": {
370
+ "plaintext_start_byte": start_byte,
371
+ "plaintext_end_byte": end_byte,
372
+ },
373
+ "question_text": question,
374
+ "document_title": title,
375
+ "language": lang,
376
+ "annotations": {
377
+ "passage_answer_candidate_index": passage_cand_answers,
378
+ "minimal_answers_start_byte": min_answers_start_byte,
379
+ "minimal_answers_end_byte": min_answers_end_byte,
380
+ "yes_no_answer": yes_no_answers,
381
+ },
382
+ "document_plaintext": doc,
383
+ "document_url": url,
384
+ }
385
+
386
+
387
+ def primary_seacrowd_helper(id_, title, question, doc, start_byte, end_byte, passage_cand_answers, min_answers_start_byte, min_answers_end_byte, yes_no_answers, lang):
388
+ return {
389
+ "id": str(id_),
390
+ "question_id": title,
391
+ "document_id": title,
392
+ "question": question,
393
+ "type": "multiple_choice",
394
+ "choices": [""],
395
+ "context": doc,
396
+ "answer": [""],
397
+ "meta": {
398
+ "passage_answer_candidates": {
399
+ "plaintext_start_byte": start_byte,
400
+ "plaintext_end_byte": end_byte,
401
+ },
402
+ "annotations": {
403
+ "passage_answer_candidate_index": passage_cand_answers,
404
+ "minimal_answers_start_byte": min_answers_start_byte,
405
+ "minimal_answers_end_byte": min_answers_end_byte,
406
+ "yes_no_answer": yes_no_answers,
407
+ },
408
+ "language": lang,
409
+ },
410
+ }
411
+
412
+
413
+ def second_source_helper(id_, title, context, question, answer_starts, answers):
414
+ return {
415
+ "title": title,
416
+ "context": context,
417
+ "question": question,
418
+ "id": id_,
419
+ "answers": {
420
+ "answer_start": answer_starts,
421
+ "text": answers,
422
+ },
423
+ }
424
+
425
+
426
+ def second_seacrowd_helper(id_, question, context, answers, answer_starts):
427
+ return {
428
+ "id": id_,
429
+ "question_id": id_,
430
+ "document_id": id_,
431
+ "question": question,
432
+ "type": "abstractive",
433
+ "choices": [],
434
+ "context": context,
435
+ "answer": answers,
436
+ "meta": {"answer_start": answer_starts},
437
+ }