Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
241b331
1 Parent(s): df02e04

Delete loading script

Browse files
Files changed (1) hide show
  1. mrqa.py +0 -196
mrqa.py DELETED
@@ -1,196 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """MRQA 2019 Shared task dataset."""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{fisch2019mrqa,
25
- title={{MRQA} 2019 Shared Task: Evaluating Generalization in Reading Comprehension},
26
- author={Adam Fisch and Alon Talmor and Robin Jia and Minjoon Seo and Eunsol Choi and Danqi Chen},
27
- booktitle={Proceedings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP},
28
- year={2019},
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- The MRQA 2019 Shared Task focuses on generalization in question answering.
34
- An effective question answering system should do more than merely
35
- interpolate from the training set to answer test examples drawn
36
- from the same distribution: it should also be able to extrapolate
37
- to out-of-distribution examples — a significantly harder challenge.
38
-
39
- The dataset is a collection of 18 existing QA dataset (carefully selected
40
- subset of them) and converted to the same format (SQuAD format). Among
41
- these 18 datasets, six datasets were made available for training,
42
- six datasets were made available for development, and the final six
43
- for testing. The dataset is released as part of the MRQA 2019 Shared Task.
44
- """
45
-
46
- _HOMEPAGE = "https://mrqa.github.io/2019/shared.html"
47
-
48
- _LICENSE = "Unknwon"
49
-
50
- _URLs = {
51
- # Train sub-datasets
52
- "train+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SQuAD.jsonl.gz",
53
- "train+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NewsQA.jsonl.gz",
54
- "train+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/TriviaQA-web.jsonl.gz",
55
- "train+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SearchQA.jsonl.gz",
56
- "train+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/HotpotQA.jsonl.gz",
57
- "train+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NaturalQuestionsShort.jsonl.gz",
58
- # Validation sub-datasets
59
- "validation+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SQuAD.jsonl.gz",
60
- "validation+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NewsQA.jsonl.gz",
61
- "validation+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TriviaQA-web.jsonl.gz",
62
- "validation+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SearchQA.jsonl.gz",
63
- "validation+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/HotpotQA.jsonl.gz",
64
- "validation+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NaturalQuestionsShort.jsonl.gz",
65
- # Test sub-datasets
66
- "test+BioASQ": "http://participants-area.bioasq.org/MRQA2019/", # BioASQ.jsonl.gz
67
- "test+DROP": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DROP.jsonl.gz",
68
- "test+DuoRC": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DuoRC.ParaphraseRC.jsonl.gz",
69
- "test+RACE": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RACE.jsonl.gz",
70
- "test+RelationExtraction": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RelationExtraction.jsonl.gz",
71
- "test+TextbookQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TextbookQA.jsonl.gz",
72
- }
73
-
74
-
75
- class Mrqa(datasets.GeneratorBasedBuilder):
76
- """MRQA 2019 Shared task dataset."""
77
-
78
- VERSION = datasets.Version("1.1.0")
79
-
80
- BUILDER_CONFIGS = [
81
- datasets.BuilderConfig(name="plain_text", description="Plain text", version=VERSION),
82
- ]
83
-
84
- def _info(self):
85
- return datasets.DatasetInfo(
86
- description=_DESCRIPTION,
87
- # Format is derived from https://github.com/mrqa/MRQA-Shared-Task-2019#mrqa-format
88
- features=datasets.Features(
89
- {
90
- "subset": datasets.Value("string"),
91
- "context": datasets.Value("string"),
92
- "context_tokens": datasets.Sequence(
93
- {
94
- "tokens": datasets.Value("string"),
95
- "offsets": datasets.Value("int32"),
96
- }
97
- ),
98
- "qid": datasets.Value("string"),
99
- "question": datasets.Value("string"),
100
- "question_tokens": datasets.Sequence(
101
- {
102
- "tokens": datasets.Value("string"),
103
- "offsets": datasets.Value("int32"),
104
- }
105
- ),
106
- "detected_answers": datasets.Sequence(
107
- {
108
- "text": datasets.Value("string"),
109
- "char_spans": datasets.Sequence(
110
- {
111
- "start": datasets.Value("int32"),
112
- "end": datasets.Value("int32"),
113
- }
114
- ),
115
- "token_spans": datasets.Sequence(
116
- {
117
- "start": datasets.Value("int32"),
118
- "end": datasets.Value("int32"),
119
- }
120
- ),
121
- }
122
- ),
123
- "answers": datasets.Sequence(datasets.Value("string")),
124
- }
125
- ),
126
- supervised_keys=None,
127
- homepage=_HOMEPAGE,
128
- license=_LICENSE,
129
- citation=_CITATION,
130
- )
131
-
132
- def _split_generators(self, dl_manager):
133
- """Returns SplitGenerators."""
134
- data_dir = dl_manager.download_and_extract(_URLs)
135
-
136
- return [
137
- datasets.SplitGenerator(
138
- name=datasets.Split.TRAIN,
139
- gen_kwargs={
140
- "filepaths_dict": data_dir,
141
- "split": "train",
142
- },
143
- ),
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TEST,
146
- gen_kwargs={
147
- "filepaths_dict": data_dir,
148
- "split": "test",
149
- },
150
- ),
151
- datasets.SplitGenerator(
152
- name=datasets.Split.VALIDATION,
153
- gen_kwargs={
154
- "filepaths_dict": data_dir,
155
- "split": "validation",
156
- },
157
- ),
158
- ]
159
-
160
- def _generate_examples(self, filepaths_dict, split):
161
- """Yields examples."""
162
- for source, filepath in filepaths_dict.items():
163
- if split not in source:
164
- continue
165
- with open(filepath, encoding="utf-8") as f:
166
- header = next(f)
167
- subset = json.loads(header)["header"]["dataset"]
168
-
169
- for row in f:
170
- paragraph = json.loads(row)
171
- context = paragraph["context"].strip()
172
- context_tokens = [{"tokens": t[0], "offsets": t[1]} for t in paragraph["context_tokens"]]
173
- for qa in paragraph["qas"]:
174
- qid = qa["qid"]
175
- question = qa["question"].strip()
176
- question_tokens = [{"tokens": t[0], "offsets": t[1]} for t in qa["question_tokens"]]
177
- detected_answers = []
178
- for detect_ans in qa["detected_answers"]:
179
- detected_answers.append(
180
- {
181
- "text": detect_ans["text"].strip(),
182
- "char_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["char_spans"]],
183
- "token_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["token_spans"]],
184
- }
185
- )
186
- answers = qa["answers"]
187
- yield f"{source}_{qid}", {
188
- "subset": subset,
189
- "context": context,
190
- "context_tokens": context_tokens,
191
- "qid": qid,
192
- "question": question,
193
- "question_tokens": question_tokens,
194
- "detected_answers": detected_answers,
195
- "answers": answers,
196
- }