Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
f3178d9
1 Parent(s): 5effdb8

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (df02e042b8345d44acdba68b3b268ecf497a9723)
- Delete loading script (241b331d6cea3479dcb46677f87cb3f3fd82008c)

README.md CHANGED
@@ -26,6 +26,7 @@ task_ids:
26
  paperswithcode_id: mrqa-2019
27
  pretty_name: MRQA 2019
28
  dataset_info:
 
29
  features:
30
  - name: subset
31
  dtype: string
@@ -65,19 +66,28 @@ dataset_info:
65
  dtype: int32
66
  - name: answers
67
  sequence: string
68
- config_name: plain_text
69
  splits:
70
  - name: train
71
- num_bytes: 4090681873
72
  num_examples: 516819
73
- - name: test
74
- num_bytes: 57712177
75
- num_examples: 9633
76
  - name: validation
77
- num_bytes: 484107026
78
  num_examples: 58221
79
- download_size: 1479518355
80
- dataset_size: 4632501076
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  ---
82
 
83
  # Dataset Card for MRQA 2019
 
26
  paperswithcode_id: mrqa-2019
27
  pretty_name: MRQA 2019
28
  dataset_info:
29
+ config_name: plain_text
30
  features:
31
  - name: subset
32
  dtype: string
 
66
  dtype: int32
67
  - name: answers
68
  sequence: string
 
69
  splits:
70
  - name: train
71
+ num_bytes: 4090677713
72
  num_examples: 516819
 
 
 
73
  - name: validation
74
+ num_bytes: 484106546
75
  num_examples: 58221
76
+ - name: test
77
+ num_bytes: 57712097
78
+ num_examples: 9633
79
+ download_size: 1679161250
80
+ dataset_size: 4632496356
81
+ configs:
82
+ - config_name: plain_text
83
+ data_files:
84
+ - split: train
85
+ path: plain_text/train-*
86
+ - split: validation
87
+ path: plain_text/validation-*
88
+ - split: test
89
+ path: plain_text/test-*
90
+ default: true
91
  ---
92
 
93
  # Dataset Card for MRQA 2019
mrqa.py DELETED
@@ -1,196 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """MRQA 2019 Shared task dataset."""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{fisch2019mrqa,
25
- title={{MRQA} 2019 Shared Task: Evaluating Generalization in Reading Comprehension},
26
- author={Adam Fisch and Alon Talmor and Robin Jia and Minjoon Seo and Eunsol Choi and Danqi Chen},
27
- booktitle={Proceedings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP},
28
- year={2019},
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- The MRQA 2019 Shared Task focuses on generalization in question answering.
34
- An effective question answering system should do more than merely
35
- interpolate from the training set to answer test examples drawn
36
- from the same distribution: it should also be able to extrapolate
37
- to out-of-distribution examples — a significantly harder challenge.
38
-
39
- The dataset is a collection of 18 existing QA dataset (carefully selected
40
- subset of them) and converted to the same format (SQuAD format). Among
41
- these 18 datasets, six datasets were made available for training,
42
- six datasets were made available for development, and the final six
43
- for testing. The dataset is released as part of the MRQA 2019 Shared Task.
44
- """
45
-
46
- _HOMEPAGE = "https://mrqa.github.io/2019/shared.html"
47
-
48
- _LICENSE = "Unknwon"
49
-
50
- _URLs = {
51
- # Train sub-datasets
52
- "train+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SQuAD.jsonl.gz",
53
- "train+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NewsQA.jsonl.gz",
54
- "train+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/TriviaQA-web.jsonl.gz",
55
- "train+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SearchQA.jsonl.gz",
56
- "train+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/HotpotQA.jsonl.gz",
57
- "train+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NaturalQuestionsShort.jsonl.gz",
58
- # Validation sub-datasets
59
- "validation+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SQuAD.jsonl.gz",
60
- "validation+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NewsQA.jsonl.gz",
61
- "validation+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TriviaQA-web.jsonl.gz",
62
- "validation+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SearchQA.jsonl.gz",
63
- "validation+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/HotpotQA.jsonl.gz",
64
- "validation+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NaturalQuestionsShort.jsonl.gz",
65
- # Test sub-datasets
66
- "test+BioASQ": "http://participants-area.bioasq.org/MRQA2019/", # BioASQ.jsonl.gz
67
- "test+DROP": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DROP.jsonl.gz",
68
- "test+DuoRC": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DuoRC.ParaphraseRC.jsonl.gz",
69
- "test+RACE": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RACE.jsonl.gz",
70
- "test+RelationExtraction": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RelationExtraction.jsonl.gz",
71
- "test+TextbookQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TextbookQA.jsonl.gz",
72
- }
73
-
74
-
75
- class Mrqa(datasets.GeneratorBasedBuilder):
76
- """MRQA 2019 Shared task dataset."""
77
-
78
- VERSION = datasets.Version("1.1.0")
79
-
80
- BUILDER_CONFIGS = [
81
- datasets.BuilderConfig(name="plain_text", description="Plain text", version=VERSION),
82
- ]
83
-
84
- def _info(self):
85
- return datasets.DatasetInfo(
86
- description=_DESCRIPTION,
87
- # Format is derived from https://github.com/mrqa/MRQA-Shared-Task-2019#mrqa-format
88
- features=datasets.Features(
89
- {
90
- "subset": datasets.Value("string"),
91
- "context": datasets.Value("string"),
92
- "context_tokens": datasets.Sequence(
93
- {
94
- "tokens": datasets.Value("string"),
95
- "offsets": datasets.Value("int32"),
96
- }
97
- ),
98
- "qid": datasets.Value("string"),
99
- "question": datasets.Value("string"),
100
- "question_tokens": datasets.Sequence(
101
- {
102
- "tokens": datasets.Value("string"),
103
- "offsets": datasets.Value("int32"),
104
- }
105
- ),
106
- "detected_answers": datasets.Sequence(
107
- {
108
- "text": datasets.Value("string"),
109
- "char_spans": datasets.Sequence(
110
- {
111
- "start": datasets.Value("int32"),
112
- "end": datasets.Value("int32"),
113
- }
114
- ),
115
- "token_spans": datasets.Sequence(
116
- {
117
- "start": datasets.Value("int32"),
118
- "end": datasets.Value("int32"),
119
- }
120
- ),
121
- }
122
- ),
123
- "answers": datasets.Sequence(datasets.Value("string")),
124
- }
125
- ),
126
- supervised_keys=None,
127
- homepage=_HOMEPAGE,
128
- license=_LICENSE,
129
- citation=_CITATION,
130
- )
131
-
132
- def _split_generators(self, dl_manager):
133
- """Returns SplitGenerators."""
134
- data_dir = dl_manager.download_and_extract(_URLs)
135
-
136
- return [
137
- datasets.SplitGenerator(
138
- name=datasets.Split.TRAIN,
139
- gen_kwargs={
140
- "filepaths_dict": data_dir,
141
- "split": "train",
142
- },
143
- ),
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TEST,
146
- gen_kwargs={
147
- "filepaths_dict": data_dir,
148
- "split": "test",
149
- },
150
- ),
151
- datasets.SplitGenerator(
152
- name=datasets.Split.VALIDATION,
153
- gen_kwargs={
154
- "filepaths_dict": data_dir,
155
- "split": "validation",
156
- },
157
- ),
158
- ]
159
-
160
- def _generate_examples(self, filepaths_dict, split):
161
- """Yields examples."""
162
- for source, filepath in filepaths_dict.items():
163
- if split not in source:
164
- continue
165
- with open(filepath, encoding="utf-8") as f:
166
- header = next(f)
167
- subset = json.loads(header)["header"]["dataset"]
168
-
169
- for row in f:
170
- paragraph = json.loads(row)
171
- context = paragraph["context"].strip()
172
- context_tokens = [{"tokens": t[0], "offsets": t[1]} for t in paragraph["context_tokens"]]
173
- for qa in paragraph["qas"]:
174
- qid = qa["qid"]
175
- question = qa["question"].strip()
176
- question_tokens = [{"tokens": t[0], "offsets": t[1]} for t in qa["question_tokens"]]
177
- detected_answers = []
178
- for detect_ans in qa["detected_answers"]:
179
- detected_answers.append(
180
- {
181
- "text": detect_ans["text"].strip(),
182
- "char_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["char_spans"]],
183
- "token_spans": [{"start": t[0], "end": t[1]} for t in detect_ans["token_spans"]],
184
- }
185
- )
186
- answers = qa["answers"]
187
- yield f"{source}_{qid}", {
188
- "subset": subset,
189
- "context": context,
190
- "context_tokens": context_tokens,
191
- "qid": qid,
192
- "question": question,
193
- "question_tokens": question_tokens,
194
- "detected_answers": detected_answers,
195
- "answers": answers,
196
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99b099a4701074612b2b98bb3c937b4e09cc72411d9a8915c6ec51186bbb5a2
3
+ size 15483098
plain_text/train-00000-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f3bdb15ac4179e7194c5f36e7b90754b2888361c7d5f5024f42bf920e7d76b
3
+ size 40937968
plain_text/train-00001-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee901275a64a557398407475cc3c9f3c7ad27075939359b8dc649d1800bb63ad
3
+ size 83288515
plain_text/train-00002-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:153cc1d83cb80b2fec592d2a06bcfd2d37778d0d9e33ecca82eebe0f0dff9012
3
+ size 168678531
plain_text/train-00003-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b07c5521698aeaa7620c4595bbfb4dd0c2712d9af4604e4c5a24db055e2efff7
3
+ size 330623073
plain_text/train-00004-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22376ff3475fc42ae3873ef0576a0aa67712c9ca4ceeb1fc7b9e3f07314e504
3
+ size 313450289
plain_text/train-00005-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9639612c7bb352b0544376f039810cfb048be63f620f8035f96d65e4fb8516c0
3
+ size 295803474
plain_text/train-00006-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de662f1c252d1ce32f075de2ce4c3803e4595a0f9e95d9c38183f1ffc22ae3ed
3
+ size 96192618
plain_text/train-00007-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457c4aa13081b48baf58794bbdbbb89463018713498608ab086c2d5427c2798f
3
+ size 80208968
plain_text/train-00008-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:064252dd7d9f8c2b83dae571bc608249f45822e649aaef102ad9566e46616a1d
3
+ size 76950665
plain_text/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ebb638a2a152391211fc0b5ea13b635aa694ea8cdbf48999090ede84ab3f85d
3
+ size 177544051