albertvillanova HF staff commited on
Commit
388097e
1 Parent(s): 1c468ff

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (8c4888010de9b23a2d136ebd31d5f5ea9f3d90fa)
- Add additional data files (102463177d892baddd13db933a909a45faa02f64)
- Delete loading script (8230ac3f228b44cad2acd5038012ffed728badeb)
- Delete legacy dataset_infos.json (f7c7ec7688698d17b3876f6033a61b297fbbb0e5)

README.md CHANGED
@@ -10,7 +10,6 @@ license:
10
  - unknown
11
  multilinguality:
12
  - monolingual
13
- pretty_name: OpenBookQA
14
  size_categories:
15
  - 1K<n<10K
16
  source_datasets:
@@ -20,8 +19,9 @@ task_categories:
20
  task_ids:
21
  - open-domain-qa
22
  paperswithcode_id: openbookqa
 
23
  dataset_info:
24
- - config_name: main
25
  features:
26
  - name: id
27
  dtype: string
@@ -35,19 +35,27 @@ dataset_info:
35
  dtype: string
36
  - name: answerKey
37
  dtype: string
 
 
 
 
 
 
 
 
38
  splits:
39
  - name: train
40
- num_bytes: 896034
41
  num_examples: 4957
42
  - name: validation
43
- num_bytes: 95519
44
  num_examples: 500
45
  - name: test
46
- num_bytes: 91850
47
  num_examples: 500
48
- download_size: 1446098
49
- dataset_size: 1083403
50
- - config_name: additional
51
  features:
52
  - name: id
53
  dtype: string
@@ -61,26 +69,36 @@ dataset_info:
61
  dtype: string
62
  - name: answerKey
63
  dtype: string
64
- - name: fact1
65
- dtype: string
66
- - name: humanScore
67
- dtype: float32
68
- - name: clarity
69
- dtype: float32
70
- - name: turkIdAnonymized
71
- dtype: string
72
  splits:
73
  - name: train
74
- num_bytes: 1290473
75
  num_examples: 4957
76
  - name: validation
77
- num_bytes: 136141
78
  num_examples: 500
79
  - name: test
80
- num_bytes: 130926
81
  num_examples: 500
82
- download_size: 1446098
83
- dataset_size: 1557540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  ---
85
 
86
  # Dataset Card for OpenBookQA
10
  - unknown
11
  multilinguality:
12
  - monolingual
 
13
  size_categories:
14
  - 1K<n<10K
15
  source_datasets:
19
  task_ids:
20
  - open-domain-qa
21
  paperswithcode_id: openbookqa
22
+ pretty_name: OpenBookQA
23
  dataset_info:
24
+ - config_name: additional
25
  features:
26
  - name: id
27
  dtype: string
35
  dtype: string
36
  - name: answerKey
37
  dtype: string
38
+ - name: fact1
39
+ dtype: string
40
+ - name: humanScore
41
+ dtype: float32
42
+ - name: clarity
43
+ dtype: float32
44
+ - name: turkIdAnonymized
45
+ dtype: string
46
  splits:
47
  - name: train
48
+ num_bytes: 1288577
49
  num_examples: 4957
50
  - name: validation
51
+ num_bytes: 135916
52
  num_examples: 500
53
  - name: test
54
+ num_bytes: 130701
55
  num_examples: 500
56
+ download_size: 783789
57
+ dataset_size: 1555194
58
+ - config_name: main
59
  features:
60
  - name: id
61
  dtype: string
69
  dtype: string
70
  - name: answerKey
71
  dtype: string
 
 
 
 
 
 
 
 
72
  splits:
73
  - name: train
74
+ num_bytes: 895386
75
  num_examples: 4957
76
  - name: validation
77
+ num_bytes: 95428
78
  num_examples: 500
79
  - name: test
80
+ num_bytes: 91759
81
  num_examples: 500
82
+ download_size: 609613
83
+ dataset_size: 1082573
84
+ configs:
85
+ - config_name: additional
86
+ data_files:
87
+ - split: train
88
+ path: additional/train-*
89
+ - split: validation
90
+ path: additional/validation-*
91
+ - split: test
92
+ path: additional/test-*
93
+ - config_name: main
94
+ data_files:
95
+ - split: train
96
+ path: main/train-*
97
+ - split: validation
98
+ path: main/validation-*
99
+ - split: test
100
+ path: main/test-*
101
+ default: true
102
  ---
103
 
104
  # Dataset Card for OpenBookQA
additional/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33b318ea8e2354484868bc601c1b30a58149e9deb93162ff422bb8de980c7105
3
+ size 72461
additional/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d16d719e87efb86ed0a2ac4c8cdf380f7bfb94b602088393674c0a64ce9ed3d3
3
+ size 635446
additional/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92e5e68e4da7bec7d130d925385abf377c2d82b89a16de502b4e1b9cf3f50a26
3
+ size 75882
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"main": {"description": "OpenBookQA aims to promote research in advanced question-answering, probing a deeper understanding of both the topic\n(with salient facts summarized as an open book, also provided with the dataset) and the language it is expressed in. In\nparticular, it contains questions that require multi-step reasoning, use of additional common and commonsense knowledge,\nand rich text comprehension.\nOpenBookQA is a new kind of question-answering dataset modeled after open book exams for assessing human understanding\nof a subject.\n", "citation": "@inproceedings{OpenBookQA2018,\n title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},\n author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},\n booktitle={EMNLP},\n year={2018}\n}\n", "homepage": "https://allenai.org/data/open-book-qa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question_stem": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answerKey": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "openbookqa", "config_name": "main", "version": {"version_str": "1.0.1", "description": "", "major": 1, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 896034, "num_examples": 4957, "dataset_name": "openbookqa"}, "validation": {"name": "validation", "num_bytes": 95519, "num_examples": 500, "dataset_name": "openbookqa"}, "test": {"name": "test", "num_bytes": 91850, "num_examples": 500, "dataset_name": "openbookqa"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/OpenBookQA-V1-Sep2018.zip": {"num_bytes": 1446098, "checksum": "82368cf05df2e3b309c17d162e10b888b4d768fad6e171e0a041954c8553be46"}}, "download_size": 1446098, "post_processing_size": null, "dataset_size": 1083403, "size_in_bytes": 2529501}, "additional": {"description": "OpenBookQA aims to promote research in advanced question-answering, probing a deeper understanding of both the topic\n(with salient facts summarized as an open book, also provided with the dataset) and the language it is expressed in. In\nparticular, it contains questions that require multi-step reasoning, use of additional common and commonsense knowledge,\nand rich text comprehension.\nOpenBookQA is a new kind of question-answering dataset modeled after open book exams for assessing human understanding\nof a subject.\n", "citation": "@inproceedings{OpenBookQA2018,\n title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},\n author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},\n booktitle={EMNLP},\n year={2018}\n}\n", "homepage": "https://allenai.org/data/open-book-qa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question_stem": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answerKey": {"dtype": "string", "id": null, "_type": "Value"}, "fact1": {"dtype": "string", "id": null, "_type": "Value"}, "humanScore": {"dtype": "float32", "id": null, "_type": "Value"}, "clarity": {"dtype": "float32", "id": null, "_type": "Value"}, "turkIdAnonymized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "openbookqa", "config_name": "additional", "version": {"version_str": "1.0.1", "description": "", "major": 1, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 1290473, "num_examples": 4957, "dataset_name": "openbookqa"}, "validation": {"name": "validation", "num_bytes": 136141, "num_examples": 500, "dataset_name": "openbookqa"}, "test": {"name": "test", "num_bytes": 130926, "num_examples": 500, "dataset_name": "openbookqa"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/OpenBookQA-V1-Sep2018.zip": {"num_bytes": 1446098, "checksum": "82368cf05df2e3b309c17d162e10b888b4d768fad6e171e0a041954c8553be46"}}, "download_size": 1446098, "post_processing_size": null, "dataset_size": 1557540, "size_in_bytes": 3003638}}
 
main/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd5483e366daa230c1c87bbdc512d8b7229f14f6dd04d19fc8b1a3855aaaa8a3
3
+ size 55535
main/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98148f8a54e62eb862346a75192d5fb824d6cbb68f2f59aecd793d39ecb5cd8b
3
+ size 495845
main/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35370b9cfee8c1ff325ccc74adc434d12c47ca0ac3244aa87f3fa77069285206
3
+ size 58233
openbookqa.py DELETED
@@ -1,159 +0,0 @@
1
- """OpenBookQA dataset."""
2
-
3
-
4
- import json
5
- import os
6
- import textwrap
7
-
8
- import datasets
9
-
10
-
11
- _HOMEPAGE = "https://allenai.org/data/open-book-qa"
12
-
13
- _DESCRIPTION = """\
14
- OpenBookQA aims to promote research in advanced question-answering, probing a deeper understanding of both the topic
15
- (with salient facts summarized as an open book, also provided with the dataset) and the language it is expressed in. In
16
- particular, it contains questions that require multi-step reasoning, use of additional common and commonsense knowledge,
17
- and rich text comprehension.
18
- OpenBookQA is a new kind of question-answering dataset modeled after open book exams for assessing human understanding
19
- of a subject.
20
- """
21
-
22
- _CITATION = """\
23
- @inproceedings{OpenBookQA2018,
24
- title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
25
- author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
26
- booktitle={EMNLP},
27
- year={2018}
28
- }
29
- """
30
-
31
- _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/OpenBookQA-V1-Sep2018.zip"
32
-
33
-
34
- class OpenbookqaConfig(datasets.BuilderConfig):
35
- def __init__(self, data_dir=None, filenames=None, version=datasets.Version("1.0.1", ""), **kwargs):
36
- """BuilderConfig for openBookQA dataset
37
-
38
- Args:
39
- data_dir: directory for the given dataset name
40
- **kwargs: keyword arguments forwarded to super.
41
- """
42
- super().__init__(version=version, **kwargs)
43
- self.data_dir = data_dir
44
- self.filenames = filenames
45
-
46
-
47
- class Openbookqa(datasets.GeneratorBasedBuilder):
48
- """OpenBookQA dataset."""
49
-
50
- BUILDER_CONFIGS = [
51
- OpenbookqaConfig(
52
- name="main",
53
- description=textwrap.dedent(
54
- """\
55
- It consists of 5,957 multiple-choice elementary-level science questions (4,957 train, 500 dev, 500 test),
56
- which probe the understanding of a small “book” of 1,326 core science facts and the application of these facts to novel
57
- situations. For training, the dataset includes a mapping from each question to the core science fact it was designed to
58
- probe. Answering OpenBookQA questions requires additional broad common knowledge, not contained in the book. The questions,
59
- by design, are answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. Strong neural
60
- baselines achieve around 50% on OpenBookQA, leaving a large gap to the 92% accuracy of crowd-workers.
61
- """
62
- ),
63
- data_dir="Main",
64
- filenames={
65
- "train": "train.jsonl",
66
- "validation": "dev.jsonl",
67
- "test": "test.jsonl",
68
- },
69
- ),
70
- OpenbookqaConfig(
71
- name="additional",
72
- description=textwrap.dedent(
73
- """\
74
- Additionally, we provide 5,167 crowd-sourced common knowledge facts, and an expanded version of the train/dev/test questions where
75
- each question is associated with its originating core fact, a human accuracy score, a clarity score, and an anonymized crowd-worker
76
- ID (in the 'Additional' folder).
77
- """
78
- ),
79
- data_dir="Additional",
80
- filenames={
81
- "train": "train_complete.jsonl",
82
- "validation": "dev_complete.jsonl",
83
- "test": "test_complete.jsonl",
84
- },
85
- ),
86
- ]
87
- DEFAULT_CONFIG_NAME = "main"
88
-
89
- def _info(self):
90
- if self.config.name == "main":
91
- features = datasets.Features(
92
- {
93
- "id": datasets.Value("string"),
94
- "question_stem": datasets.Value("string"),
95
- "choices": datasets.features.Sequence(
96
- {
97
- "text": datasets.Value("string"),
98
- "label": datasets.Value("string"),
99
- }
100
- ),
101
- "answerKey": datasets.Value("string"),
102
- }
103
- )
104
- else:
105
- features = datasets.Features(
106
- {
107
- "id": datasets.Value("string"),
108
- "question_stem": datasets.Value("string"),
109
- "choices": datasets.features.Sequence(
110
- {
111
- "text": datasets.Value("string"),
112
- "label": datasets.Value("string"),
113
- }
114
- ),
115
- "answerKey": datasets.Value("string"),
116
- "fact1": datasets.Value("string"),
117
- "humanScore": datasets.Value("float"),
118
- "clarity": datasets.Value("float"),
119
- "turkIdAnonymized": datasets.Value("string"),
120
- }
121
- )
122
- return datasets.DatasetInfo(
123
- description=_DESCRIPTION,
124
- features=features,
125
- homepage=_HOMEPAGE,
126
- citation=_CITATION,
127
- )
128
-
129
- def _split_generators(self, dl_manager):
130
- """Returns SplitGenerators."""
131
- dl_dir = dl_manager.download_and_extract(_URL)
132
- data_dir = os.path.join(dl_dir, "OpenBookQA-V1-Sep2018", "Data", self.config.data_dir)
133
- splits = [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
134
- return [
135
- datasets.SplitGenerator(
136
- name=split,
137
- gen_kwargs={"filepath": os.path.join(data_dir, self.config.filenames[split])},
138
- )
139
- for split in splits
140
- ]
141
-
142
- def _generate_examples(self, filepath):
143
- """Yields examples."""
144
- with open(filepath, encoding="utf-8") as f:
145
- for uid, row in enumerate(f):
146
- data = json.loads(row)
147
- example = {
148
- "id": data["id"],
149
- "question_stem": data["question"]["stem"],
150
- "choices": {
151
- "text": [choice["text"] for choice in data["question"]["choices"]],
152
- "label": [choice["label"] for choice in data["question"]["choices"]],
153
- },
154
- "answerKey": data["answerKey"],
155
- }
156
- if self.config.name == "additional":
157
- for key in ["fact1", "humanScore", "clarity", "turkIdAnonymized"]:
158
- example[key] = data[key]
159
- yield uid, example