system HF staff commited on
Commit
0dcbec6
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/0.1.0/dummy_data.zip +3 -0
  4. quarel.py +103 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nQuaRel is a crowdsourced dataset of 2771 multiple-choice story questions, including their logical forms.\n", "citation": "@inproceedings{quarel_v1,\n title={QuaRel: A Dataset and Models for Answering Questions about Qualitative Relationships},\n author={Oyvind Tafjord, Peter Clark, Matt Gardner, Wen-tau Yih, Ashish Sabharwal},\n year={2018},\n journal={arXiv:1805.05377v1}\n}\n", "homepage": "https://allenai.org/data/quarel", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "answer_index": {"dtype": "int32", "id": null, "_type": "Value"}, "logical_forms": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "logical_form_pretty": {"dtype": "string", "id": null, "_type": "Value"}, "world_literals": {"feature": {"world1": {"dtype": "string", "id": null, "_type": "Value"}, "world2": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "question": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "quarel", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1072874, "num_examples": 1941, "dataset_name": "quarel"}, "test": {"name": "test", "num_bytes": 307588, "num_examples": 552, "dataset_name": "quarel"}, "validation": {"name": "validation", "num_bytes": 154308, "num_examples": 278, "dataset_name": "quarel"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/quarel-dataset-v1-nov2018.zip": {"num_bytes": 631370, "checksum": "95437eb445467676a9b2fbda4b2021b88d788ff483ed31e63988c67ac4c6a27c"}}, "download_size": 631370, "dataset_size": 1534770, "size_in_bytes": 2166140}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4eb48145b3a5c3db853ac37d5e41b32e889104255d8af5587325acd61be2e9d
3
+ size 2655
quarel.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(quarel): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(quarel): BibTeX citation
12
+ _CITATION = """\
13
+ @inproceedings{quarel_v1,
14
+ title={QuaRel: A Dataset and Models for Answering Questions about Qualitative Relationships},
15
+ author={Oyvind Tafjord, Peter Clark, Matt Gardner, Wen-tau Yih, Ashish Sabharwal},
16
+ year={2018},
17
+ journal={arXiv:1805.05377v1}
18
+ }
19
+ """
20
+
21
+ # TODO(quarel):
22
+ _DESCRIPTION = """
23
+ QuaRel is a crowdsourced dataset of 2771 multiple-choice story questions, including their logical forms.
24
+ """
25
+ _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/quarel-dataset-v1-nov2018.zip"
26
+
27
+
28
+ class Quarel(datasets.GeneratorBasedBuilder):
29
+ """TODO(quarel): Short description of my dataset."""
30
+
31
+ # TODO(quarel): Set up version.
32
+ VERSION = datasets.Version("0.1.0")
33
+
34
+ def _info(self):
35
+ # TODO(quarel): Specifies the datasets.DatasetInfo object
36
+ return datasets.DatasetInfo(
37
+ # This is the description that will appear on the datasets page.
38
+ description=_DESCRIPTION,
39
+ # datasets.features.FeatureConnectors
40
+ features=datasets.Features(
41
+ {
42
+ # These are the features of your dataset like images, labels ...
43
+ "id": datasets.Value("string"),
44
+ "answer_index": datasets.Value("int32"),
45
+ "logical_forms": datasets.features.Sequence(datasets.Value("string")),
46
+ "logical_form_pretty": datasets.Value("string"),
47
+ "world_literals": datasets.features.Sequence(
48
+ {"world1": datasets.Value("string"), "world2": datasets.Value("string")}
49
+ ),
50
+ "question": datasets.Value("string"),
51
+ }
52
+ ),
53
+ # If there's a common (input, target) tuple from the features,
54
+ # specify them here. They'll be used if as_supervised=True in
55
+ # builder.as_dataset.
56
+ supervised_keys=None,
57
+ # Homepage of the dataset for documentation
58
+ homepage="https://allenai.org/data/quarel",
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ """Returns SplitGenerators."""
64
+ # TODO(quarel): Downloads the data and defines the splits
65
+ # dl_manager is a datasets.download.DownloadManager that can be used to
66
+ # download and extract URLs
67
+ dl_dir = dl_manager.download_and_extract(_URL)
68
+ data_dir = os.path.join(dl_dir, "quarel-dataset-v1")
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ # These kwargs will be passed to _generate_examples
73
+ gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-train.jsonl")},
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TEST,
77
+ # These kwargs will be passed to _generate_examples
78
+ gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-test.jsonl")},
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.VALIDATION,
82
+ # These kwargs will be passed to _generate_examples
83
+ gen_kwargs={"filepath": os.path.join(data_dir, "quarel-v1-dev.jsonl")},
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, filepath):
88
+ """Yields examples."""
89
+ # TODO(quarel): Yields (key, example) tuples from the dataset
90
+ with open(filepath, encoding="utf-8") as f:
91
+ for id_, row in enumerate(f):
92
+ data = json.loads(row)
93
+ yield id_, {
94
+ "id": data["id"],
95
+ "answer_index": data["answer_index"],
96
+ "logical_forms": data["logical_forms"],
97
+ "world_literals": {
98
+ "world1": [data["world_literals"]["world1"]],
99
+ "world2": [data["world_literals"]["world2"]],
100
+ },
101
+ "logical_form_pretty": data["logical_form_pretty"],
102
+ "question": data["question"],
103
+ }