system HF staff commited on
Commit
6786cf6
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/0.1.0/dummy_data.zip +3 -0
  4. sciq.py +92 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.\n\n", "citation": "@inproceedings{SciQ,\n title={Crowdsourcing Multiple Choice Science Questions},\n author={Johannes Welbl, Nelson F. Liu, Matt Gardner},\n year={2017},\n journal={arXiv:1707.06209v1}\n}\n", "homepage": "https://allenai.org/data/sciq", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "distractor3": {"dtype": "string", "id": null, "_type": "Value"}, "distractor1": {"dtype": "string", "id": null, "_type": "Value"}, "distractor2": {"dtype": "string", "id": null, "_type": "Value"}, "correct_answer": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "sciq", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 564826, "num_examples": 1000, "dataset_name": "sciq"}, "train": {"name": "train", "num_bytes": 6556427, "num_examples": 11679, "dataset_name": "sciq"}, "validation": {"name": "validation", "num_bytes": 555019, "num_examples": 1000, "dataset_name": "sciq"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip": {"num_bytes": 2821345, "checksum": "7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c"}}, "download_size": 2821345, "dataset_size": 7676272, "size_in_bytes": 10497617}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e1cc62da6973055bdafa3d247bcdd7577779a8b2b9e2ff3b5337232c60138e6
3
+ size 3796
sciq.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(sciQ): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(sciQ): BibTeX citation
12
+ _CITATION = """\
13
+ @inproceedings{SciQ,
14
+ title={Crowdsourcing Multiple Choice Science Questions},
15
+ author={Johannes Welbl, Nelson F. Liu, Matt Gardner},
16
+ year={2017},
17
+ journal={arXiv:1707.06209v1}
18
+ }
19
+ """
20
+
21
+ # TODO(sciQ):
22
+ _DESCRIPTION = """\
23
+ The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.
24
+
25
+ """
26
+ _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/SciQ.zip"
27
+
28
+
29
+ class Sciq(datasets.GeneratorBasedBuilder):
30
+ """TODO(sciQ): Short description of my dataset."""
31
+
32
+ # TODO(sciQ): Set up version.
33
+ VERSION = datasets.Version("0.1.0")
34
+
35
+ def _info(self):
36
+ # TODO(sciQ): Specifies the datasets.DatasetInfo object
37
+ return datasets.DatasetInfo(
38
+ # This is the description that will appear on the datasets page.
39
+ description=_DESCRIPTION,
40
+ # datasets.features.FeatureConnectors
41
+ features=datasets.Features(
42
+ {
43
+ # These are the features of your dataset like images, labels ...
44
+ "question": datasets.Value("string"),
45
+ "distractor3": datasets.Value("string"),
46
+ "distractor1": datasets.Value("string"),
47
+ "distractor2": datasets.Value("string"),
48
+ "correct_answer": datasets.Value("string"),
49
+ "support": datasets.Value("string"),
50
+ }
51
+ ),
52
+ # If there's a common (input, target) tuple from the features,
53
+ # specify them here. They'll be used if as_supervised=True in
54
+ # builder.as_dataset.
55
+ supervised_keys=None,
56
+ # Homepage of the dataset for documentation
57
+ homepage="https://allenai.org/data/sciq",
58
+ citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ """Returns SplitGenerators."""
63
+ # TODO(sciQ): Downloads the data and defines the splits
64
+ # dl_manager is a datasets.download.DownloadManager that can be used to
65
+ # download and extract URLs
66
+ dl_dir = dl_manager.download_and_extract(_URL)
67
+ data_dir = os.path.join(dl_dir, "SciQ dataset-2 3")
68
+ return [
69
+ datasets.SplitGenerator(
70
+ name=datasets.Split.TRAIN,
71
+ # These kwargs will be passed to _generate_examples
72
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
73
+ ),
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.VALIDATION,
76
+ # These kwargs will be passed to _generate_examples
77
+ gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TEST,
81
+ # These kwargs will be passed to _generate_examples
82
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, filepath):
87
+ """Yields examples."""
88
+ # TODO(sciQ): Yields (key, example) tuples from the dataset
89
+ with open(filepath, encoding="utf-8") as f:
90
+ data = json.load(f)
91
+ for id_, row in enumerate(data):
92
+ yield id_, row