Datasets:
tau
/

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
e7ccf29
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
commonsense_qa.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(commonsense_qa): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(commonsense_qa): BibTeX citation
12
+ _CITATION = """\
13
+ @InProceedings{commonsense_QA,
14
+ title={COMMONSENSEQA: A Question Answering Challenge Targeting Commonsense Knowledge},
15
+ author={Alon, Talmor and Jonathan, Herzig and Nicholas, Lourie and Jonathan ,Berant},
16
+ journal={arXiv preprint arXiv:1811.00937v2},
17
+ year={2019}
18
+
19
+ """
20
+
21
+ # TODO(commonsense_qa):
22
+ _DESCRIPTION = """\
23
+ CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge
24
+ to predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.
25
+ The dataset is provided in two major training/validation/testing set splits: "Random split" which is the main evaluation
26
+ split, and "Question token split", see paper for details.
27
+ """
28
+ _URL = "https://s3.amazonaws.com/commensenseqa"
29
+ _TRAINING_FILE = "train_rand_split.jsonl"
30
+ _DEV_FILE = "dev_rand_split.jsonl"
31
+ _TEST_FILE = "test_rand_split_no_answers.jsonl"
32
+
33
+
34
+ class CommonsenseQa(datasets.GeneratorBasedBuilder):
35
+ """TODO(commonsense_qa): Short description of my dataset."""
36
+
37
+ # TODO(commonsense_qa): Set up version.
38
+ VERSION = datasets.Version("0.1.0")
39
+
40
+ def _info(self):
41
+ # These are the features of your dataset like images, labels ...
42
+ features = datasets.Features(
43
+ {
44
+ "answerKey": datasets.Value("string"),
45
+ "question": datasets.Value("string"),
46
+ "choices": datasets.features.Sequence(
47
+ {
48
+ "label": datasets.Value("string"),
49
+ "text": datasets.Value("string"),
50
+ }
51
+ ),
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # datasets.features.FeatureConnectors
58
+ features=features,
59
+ # If there's a common (input, target) tuple from the features,
60
+ # specify them here. They'll be used if as_supervised=True in
61
+ # builder.as_dataset.
62
+ supervised_keys=None,
63
+ # Homepage of the dataset for documentation
64
+ homepage="https://www.tau-datasets.org/commonsenseqa",
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+
71
+ download_urls = {
72
+ "train": os.path.join(_URL, _TRAINING_FILE),
73
+ "test": os.path.join(_URL, _TEST_FILE),
74
+ "dev": os.path.join(_URL, _DEV_FILE),
75
+ }
76
+
77
+ downloaded_files = dl_manager.download_and_extract(download_urls)
78
+
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ gen_kwargs={
86
+ "filepath": downloaded_files["dev"],
87
+ "split": "dev",
88
+ },
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ gen_kwargs={
93
+ "filepath": downloaded_files["test"],
94
+ "split": "test",
95
+ },
96
+ ),
97
+ ]
98
+
99
+ def _generate_examples(self, filepath, split):
100
+ """Yields examples."""
101
+ # TODO(commonsense_qa): Yields (key, example) tuples from the dataset
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for id_, row in enumerate(f):
104
+ data = json.loads(row)
105
+ question = data["question"]
106
+ choices = question["choices"]
107
+ labels = [label["label"] for label in choices]
108
+ texts = [text["text"] for text in choices]
109
+ stem = question["stem"]
110
+ if split == "test":
111
+ answerkey = ""
112
+ else:
113
+ answerkey = data["answerKey"]
114
+
115
+ yield id_, {
116
+ "answerKey": answerkey,
117
+ "question": stem,
118
+ "choices": {"label": labels, "text": texts},
119
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge\n to predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.\n The dataset is provided in two major training/validation/testing set splits: \"Random split\" which is the main evaluation\n split, and \"Question token split\", see paper for details.\n", "citation": "@InProceedings{commonsense_QA,\ntitle={COMMONSENSEQA: A Question Answering Challenge Targeting Commonsense Knowledge},\nauthor={Alon, Talmor and Jonathan, Herzig and Nicholas, Lourie and Jonathan ,Berant},\njournal={arXiv preprint arXiv:1811.00937v2},\nyear={2019}\n\n", "homepage": "https://www.tau-nlp.org/commonsenseqa", "license": "", "features": {"answerKey": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"label": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "commonsense_qa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 205066, "num_examples": 1140, "dataset_name": "commonsense_qa"}, "train": {"name": "train", "num_bytes": 1755851, "num_examples": 9741, "dataset_name": "commonsense_qa"}, "validation": {"name": "validation", "num_bytes": 217503, "num_examples": 1221, "dataset_name": "commonsense_qa"}}, "download_checksums": {"https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl": {"num_bytes": 3785890, "checksum": "58ffa3c8472410e24b8c43f423d89c8a003d8284698a6ed7874355dedd09a2fb"}, "https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl": {"num_bytes": 423148, "checksum": "b426896d71a9cd064cf01cfaf6e920817c51701ef66028883ac1af2e73ad5f29"}, "https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl": {"num_bytes": 471653, "checksum": "3210497fdaae614ac085d9eb873dd7f4d49b6f965a93adadc803e1229fd8a02a"}}, "download_size": 4680691, "dataset_size": 2178420, "size_in_bytes": 6859111}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:335351ace2a443be2a7c4197e32fde850f8c6fd6c615de3caf4084296a92d940
3
+ size 2155