Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
80596c4
1 Parent(s): e4d7191

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (167c717c7aed0d8300f27651433c35daff1440f2)
- Delete loading script (bc1432158a3d147e8bdda46a43cc84471ac2433d)
- Delete legacy dataset_infos.json (dccd52e142ef4ad27083cc78299b15ebccb8fd13)

README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: SQuAD2.0
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
@@ -20,23 +19,9 @@ task_ids:
20
  - open-domain-qa
21
  - extractive-qa
22
  paperswithcode_id: squad
23
- train-eval-index:
24
- - config: squad_v2
25
- task: question-answering
26
- task_id: extractive_question_answering
27
- splits:
28
- train_split: train
29
- eval_split: validation
30
- col_mapping:
31
- question: question
32
- context: context
33
- answers:
34
- text: text
35
- answer_start: answer_start
36
- metrics:
37
- - type: squad_v2
38
- name: SQuAD v2
39
  dataset_info:
 
40
  features:
41
  - name: id
42
  dtype: string
@@ -52,16 +37,39 @@ dataset_info:
52
  dtype: string
53
  - name: answer_start
54
  dtype: int32
55
- config_name: squad_v2
56
  splits:
57
  - name: train
58
- num_bytes: 116699950
59
  num_examples: 130319
60
  - name: validation
61
- num_bytes: 11660302
62
  num_examples: 11873
63
- download_size: 46494161
64
- dataset_size: 128360252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ---
66
 
67
  # Dataset Card for "squad_v2"
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
19
  - open-domain-qa
20
  - extractive-qa
21
  paperswithcode_id: squad
22
+ pretty_name: SQuAD2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  dataset_info:
24
+ config_name: squad_v2
25
  features:
26
  - name: id
27
  dtype: string
37
  dtype: string
38
  - name: answer_start
39
  dtype: int32
 
40
  splits:
41
  - name: train
42
+ num_bytes: 116732025
43
  num_examples: 130319
44
  - name: validation
45
+ num_bytes: 11661091
46
  num_examples: 11873
47
+ download_size: 17720493
48
+ dataset_size: 128393116
49
+ configs:
50
+ - config_name: squad_v2
51
+ data_files:
52
+ - split: train
53
+ path: squad_v2/train-*
54
+ - split: validation
55
+ path: squad_v2/validation-*
56
+ default: true
57
+ train-eval-index:
58
+ - config: squad_v2
59
+ task: question-answering
60
+ task_id: extractive_question_answering
61
+ splits:
62
+ train_split: train
63
+ eval_split: validation
64
+ col_mapping:
65
+ question: question
66
+ context: context
67
+ answers:
68
+ text: text
69
+ answer_start: answer_start
70
+ metrics:
71
+ - type: squad_v2
72
+ name: SQuAD v2
73
  ---
74
 
75
  # Dataset Card for "squad_v2"
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"squad_v2": {"description": "combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers\n to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but\n also determine when no answer is supported by the paragraph and abstain from answering.\n", "citation": "@article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\narchivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "question-answering-extractive", "question_column": "question", "context_column": "context", "answers_column": "answers"}], "builder_name": "squad_v2", "config_name": "squad_v2", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 116699950, "num_examples": 130319, "dataset_name": "squad_v2"}, "validation": {"name": "validation", "num_bytes": 11660302, "num_examples": 11873, "dataset_name": "squad_v2"}}, "download_checksums": {"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json": {"num_bytes": 42123633, "checksum": "68dcfbb971bd3e96d5b46c7177b16c1a4e7d4bdef19fb204502738552dede002"}, "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json": {"num_bytes": 4370528, "checksum": "80a5225e94905956a6446d296ca1093975c4d3b3260f1d6c8f68bc2ab77182d8"}}, "download_size": 46494161, "post_processing_size": null, "dataset_size": 128360252, "size_in_bytes": 174854413}}
 
squad_v2.py DELETED
@@ -1,133 +0,0 @@
1
- """TODO(squad_v2): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
- from datasets.tasks import QuestionAnsweringExtractive
8
-
9
-
10
- # TODO(squad_v2): BibTeX citation
11
- _CITATION = """\
12
- @article{2016arXiv160605250R,
13
- author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
14
- Konstantin and {Liang}, Percy},
15
- title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
16
- journal = {arXiv e-prints},
17
- year = 2016,
18
- eid = {arXiv:1606.05250},
19
- pages = {arXiv:1606.05250},
20
- archivePrefix = {arXiv},
21
- eprint = {1606.05250},
22
- }
23
- """
24
-
25
- _DESCRIPTION = """\
26
- combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers
27
- to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but
28
- also determine when no answer is supported by the paragraph and abstain from answering.
29
- """
30
-
31
- _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
32
- _URLS = {
33
- "train": _URL + "train-v2.0.json",
34
- "dev": _URL + "dev-v2.0.json",
35
- }
36
-
37
-
38
- class SquadV2Config(datasets.BuilderConfig):
39
- """BuilderConfig for SQUAD."""
40
-
41
- def __init__(self, **kwargs):
42
- """BuilderConfig for SQUADV2.
43
-
44
- Args:
45
- **kwargs: keyword arguments forwarded to super.
46
- """
47
- super(SquadV2Config, self).__init__(**kwargs)
48
-
49
-
50
- class SquadV2(datasets.GeneratorBasedBuilder):
51
- """TODO(squad_v2): Short description of my dataset."""
52
-
53
- # TODO(squad_v2): Set up version.
54
- BUILDER_CONFIGS = [
55
- SquadV2Config(name="squad_v2", version=datasets.Version("2.0.0"), description="SQuAD plaint text version 2"),
56
- ]
57
-
58
- def _info(self):
59
- # TODO(squad_v2): Specifies the datasets.DatasetInfo object
60
- return datasets.DatasetInfo(
61
- # This is the description that will appear on the datasets page.
62
- description=_DESCRIPTION,
63
- # datasets.features.FeatureConnectors
64
- features=datasets.Features(
65
- {
66
- "id": datasets.Value("string"),
67
- "title": datasets.Value("string"),
68
- "context": datasets.Value("string"),
69
- "question": datasets.Value("string"),
70
- "answers": datasets.features.Sequence(
71
- {
72
- "text": datasets.Value("string"),
73
- "answer_start": datasets.Value("int32"),
74
- }
75
- ),
76
- # These are the features of your dataset like images, labels ...
77
- }
78
- ),
79
- # If there's a common (input, target) tuple from the features,
80
- # specify them here. They'll be used if as_supervised=True in
81
- # builder.as_dataset.
82
- supervised_keys=None,
83
- # Homepage of the dataset for documentation
84
- homepage="https://rajpurkar.github.io/SQuAD-explorer/",
85
- citation=_CITATION,
86
- task_templates=[
87
- QuestionAnsweringExtractive(
88
- question_column="question", context_column="context", answers_column="answers"
89
- )
90
- ],
91
- )
92
-
93
- def _split_generators(self, dl_manager):
94
- """Returns SplitGenerators."""
95
- # TODO(squad_v2): Downloads the data and defines the splits
96
- # dl_manager is a datasets.download.DownloadManager that can be used to
97
- # download and extract URLs
98
- urls_to_download = _URLS
99
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
100
-
101
- return [
102
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
103
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
104
- ]
105
-
106
- def _generate_examples(self, filepath):
107
- """Yields examples."""
108
- # TODO(squad_v2): Yields (key, example) tuples from the dataset
109
- with open(filepath, encoding="utf-8") as f:
110
- squad = json.load(f)
111
- for example in squad["data"]:
112
- title = example.get("title", "")
113
- for paragraph in example["paragraphs"]:
114
- context = paragraph["context"] # do not strip leading blank spaces GH-2585
115
- for qa in paragraph["qas"]:
116
- question = qa["question"]
117
- id_ = qa["id"]
118
-
119
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
120
- answers = [answer["text"] for answer in qa["answers"]]
121
-
122
- # Features currently used are "context", "question", and "answers".
123
- # Others are extracted here for the ease of future expansions.
124
- yield id_, {
125
- "title": title,
126
- "context": context,
127
- "question": question,
128
- "id": id_,
129
- "answers": {
130
- "answer_start": answer_starts,
131
- "text": answers,
132
- },
133
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
squad_v2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6da32ffb482ff463ad056477740d1bb284b96a45db3a08bee6a225ca6abf291
3
+ size 16369982
squad_v2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0560174ab095c5ac0a8c8dc8da05f1625453c45a77e4ce9cabc6947ddfdd24cb
3
+ size 1350511