Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
04c3d42
1 Parent(s): c7ea11f

Delete loading script

Browse files
Files changed (1) hide show
  1. sciq.py +0 -91
sciq.py DELETED
@@ -1,91 +0,0 @@
1
- """TODO(sciQ): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(sciQ): BibTeX citation
11
- _CITATION = """\
12
- @inproceedings{SciQ,
13
- title={Crowdsourcing Multiple Choice Science Questions},
14
- author={Johannes Welbl, Nelson F. Liu, Matt Gardner},
15
- year={2017},
16
- journal={arXiv:1707.06209v1}
17
- }
18
- """
19
-
20
- # TODO(sciQ):
21
- _DESCRIPTION = """\
22
- The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.
23
-
24
- """
25
- _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip"
26
-
27
-
28
- class Sciq(datasets.GeneratorBasedBuilder):
29
- """TODO(sciQ): Short description of my dataset."""
30
-
31
- # TODO(sciQ): Set up version.
32
- VERSION = datasets.Version("0.1.0")
33
-
34
- def _info(self):
35
- # TODO(sciQ): Specifies the datasets.DatasetInfo object
36
- return datasets.DatasetInfo(
37
- # This is the description that will appear on the datasets page.
38
- description=_DESCRIPTION,
39
- # datasets.features.FeatureConnectors
40
- features=datasets.Features(
41
- {
42
- # These are the features of your dataset like images, labels ...
43
- "question": datasets.Value("string"),
44
- "distractor3": datasets.Value("string"),
45
- "distractor1": datasets.Value("string"),
46
- "distractor2": datasets.Value("string"),
47
- "correct_answer": datasets.Value("string"),
48
- "support": datasets.Value("string"),
49
- }
50
- ),
51
- # If there's a common (input, target) tuple from the features,
52
- # specify them here. They'll be used if as_supervised=True in
53
- # builder.as_dataset.
54
- supervised_keys=None,
55
- # Homepage of the dataset for documentation
56
- homepage="https://allenai.org/data/sciq",
57
- citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- # TODO(sciQ): Downloads the data and defines the splits
63
- # dl_manager is a datasets.download.DownloadManager that can be used to
64
- # download and extract URLs
65
- dl_dir = dl_manager.download_and_extract(_URL)
66
- data_dir = os.path.join(dl_dir, "SciQ dataset-2 3")
67
- return [
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TRAIN,
70
- # These kwargs will be passed to _generate_examples
71
- gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.VALIDATION,
75
- # These kwargs will be passed to _generate_examples
76
- gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TEST,
80
- # These kwargs will be passed to _generate_examples
81
- gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
82
- ),
83
- ]
84
-
85
- def _generate_examples(self, filepath):
86
- """Yields examples."""
87
- # TODO(sciQ): Yields (key, example) tuples from the dataset
88
- with open(filepath, encoding="utf-8") as f:
89
- data = json.load(f)
90
- for id_, row in enumerate(data):
91
- yield id_, row