albertvillanova HF staff commited on
Commit
2c94ad3
1 Parent(s): 2781d61

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (c7ea11f06ac799e1206befb10da77d7a34c8ce46)
- Delete loading script (04c3d428e8bfe0bfc89838e1f5efc20f8880e0e7)
- Delete legacy dataset_infos.json (ff6ff3901bdb539d0c822ddd27fff0301722c7f9)

README.md CHANGED
@@ -35,16 +35,25 @@ dataset_info:
35
  dtype: string
36
  splits:
37
  - name: train
38
- num_bytes: 6556427
39
  num_examples: 11679
40
  - name: validation
41
- num_bytes: 555019
42
  num_examples: 1000
43
  - name: test
44
- num_bytes: 564826
45
  num_examples: 1000
46
- download_size: 2821345
47
- dataset_size: 7676272
 
 
 
 
 
 
 
 
 
48
  ---
49
 
50
  # Dataset Card for "sciq"
35
  dtype: string
36
  splits:
37
  - name: train
38
+ num_bytes: 6546183
39
  num_examples: 11679
40
  - name: validation
41
+ num_bytes: 554120
42
  num_examples: 1000
43
  - name: test
44
+ num_bytes: 563927
45
  num_examples: 1000
46
+ download_size: 4674410
47
+ dataset_size: 7664230
48
+ configs:
49
+ - config_name: default
50
+ data_files:
51
+ - split: train
52
+ path: data/train-*
53
+ - split: validation
54
+ path: data/validation-*
55
+ - split: test
56
+ path: data/test-*
57
  ---
58
 
59
  # Dataset Card for "sciq"
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a719356a29b127fc54ef3c7f51a034db4bd105d5717215e8c85d2aa58d60667
3
+ size 342808
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19644360954006d06e9ad3df07bddb34f8535c081b831d48f604603c713ac167
3
+ size 3993099
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:455dd9f1d725cd3ecbce369799a2fbbdbbfecf51ab84a86d56ba3370dc847b8a
3
+ size 338503
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.\n\n", "citation": "@inproceedings{SciQ,\n title={Crowdsourcing Multiple Choice Science Questions},\n author={Johannes Welbl, Nelson F. Liu, Matt Gardner},\n year={2017},\n journal={arXiv:1707.06209v1}\n}\n", "homepage": "https://allenai.org/data/sciq", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "distractor3": {"dtype": "string", "id": null, "_type": "Value"}, "distractor1": {"dtype": "string", "id": null, "_type": "Value"}, "distractor2": {"dtype": "string", "id": null, "_type": "Value"}, "correct_answer": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "sciq", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 564826, "num_examples": 1000, "dataset_name": "sciq"}, "train": {"name": "train", "num_bytes": 6556427, "num_examples": 11679, "dataset_name": "sciq"}, "validation": {"name": "validation", "num_bytes": 555019, "num_examples": 1000, "dataset_name": "sciq"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip": {"num_bytes": 2821345, "checksum": "7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c"}}, "download_size": 2821345, "dataset_size": 7676272, "size_in_bytes": 10497617}}
 
sciq.py DELETED
@@ -1,91 +0,0 @@
1
- """TODO(sciQ): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(sciQ): BibTeX citation
11
- _CITATION = """\
12
- @inproceedings{SciQ,
13
- title={Crowdsourcing Multiple Choice Science Questions},
14
- author={Johannes Welbl, Nelson F. Liu, Matt Gardner},
15
- year={2017},
16
- journal={arXiv:1707.06209v1}
17
- }
18
- """
19
-
20
- # TODO(sciQ):
21
- _DESCRIPTION = """\
22
- The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.
23
-
24
- """
25
- _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip"
26
-
27
-
28
- class Sciq(datasets.GeneratorBasedBuilder):
29
- """TODO(sciQ): Short description of my dataset."""
30
-
31
- # TODO(sciQ): Set up version.
32
- VERSION = datasets.Version("0.1.0")
33
-
34
- def _info(self):
35
- # TODO(sciQ): Specifies the datasets.DatasetInfo object
36
- return datasets.DatasetInfo(
37
- # This is the description that will appear on the datasets page.
38
- description=_DESCRIPTION,
39
- # datasets.features.FeatureConnectors
40
- features=datasets.Features(
41
- {
42
- # These are the features of your dataset like images, labels ...
43
- "question": datasets.Value("string"),
44
- "distractor3": datasets.Value("string"),
45
- "distractor1": datasets.Value("string"),
46
- "distractor2": datasets.Value("string"),
47
- "correct_answer": datasets.Value("string"),
48
- "support": datasets.Value("string"),
49
- }
50
- ),
51
- # If there's a common (input, target) tuple from the features,
52
- # specify them here. They'll be used if as_supervised=True in
53
- # builder.as_dataset.
54
- supervised_keys=None,
55
- # Homepage of the dataset for documentation
56
- homepage="https://allenai.org/data/sciq",
57
- citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- # TODO(sciQ): Downloads the data and defines the splits
63
- # dl_manager is a datasets.download.DownloadManager that can be used to
64
- # download and extract URLs
65
- dl_dir = dl_manager.download_and_extract(_URL)
66
- data_dir = os.path.join(dl_dir, "SciQ dataset-2 3")
67
- return [
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TRAIN,
70
- # These kwargs will be passed to _generate_examples
71
- gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.VALIDATION,
75
- # These kwargs will be passed to _generate_examples
76
- gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TEST,
80
- # These kwargs will be passed to _generate_examples
81
- gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
82
- ),
83
- ]
84
-
85
- def _generate_examples(self, filepath):
86
- """Yields examples."""
87
- # TODO(sciQ): Yields (key, example) tuples from the dataset
88
- with open(filepath, encoding="utf-8") as f:
89
- data = json.load(f)
90
- for id_, row in enumerate(data):
91
- yield id_, row