system HF staff commited on
Commit
6323813
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. cosmos_qa.py +125 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.1.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
cosmos_qa.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(cosmos_qa): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import json
7
+ import os
8
+
9
+ import datasets
10
+
11
+
12
+ # TODO(cosmos_qa): BibTeX citation
13
+ _CITATION = """\
14
+ @inproceedings{cosmos,
15
+ title={COSMOS QA: Machine Reading Comprehension
16
+ with Contextual Commonsense Reasoning},
17
+ author={Lifu Huang and Ronan Le Bras and Chandra Bhagavatula and Yejin Choi},
18
+ booktitle ={arXiv:1909.00277v2},
19
+ year={2019}
20
+ }
21
+ """
22
+
23
+ # TODO(cosmos_qa):
24
+ _DESCRIPTION = """\
25
+ Cosmos QA is a large-scale dataset of 35.6K problems that require commonsense-based reading comprehension, formulated as multiple-choice questions. It focuses on reading between the lines over a diverse collection of people's everyday narratives, asking questions concerning on the likely causes or effects of events that require reasoning beyond the exact text spans in the context
26
+ """
27
+ _URL = "https://github.com/wilburOne/cosmosqa/raw/master/data/"
28
+ _TEST_FILE = "test.jsonl"
29
+ _TRAIN_FILE = "train.csv"
30
+ _DEV_FILE = "valid.csv"
31
+
32
+
33
+ class CosmosQa(datasets.GeneratorBasedBuilder):
34
+ """TODO(cosmos_qa): Short description of my dataset."""
35
+
36
+ # TODO(cosmos_qa): Set up version.
37
+ VERSION = datasets.Version("0.1.0")
38
+
39
+ def _info(self):
40
+ # TODO(cosmos_qa): Specifies the datasets.DatasetInfo object
41
+ return datasets.DatasetInfo(
42
+ # This is the description that will appear on the datasets page.
43
+ description=_DESCRIPTION,
44
+ # datasets.features.FeatureConnectors
45
+ features=datasets.Features(
46
+ {
47
+ "id": datasets.Value("string"),
48
+ "context": datasets.Value("string"),
49
+ "question": datasets.Value("string"),
50
+ "answer0": datasets.Value("string"),
51
+ "answer1": datasets.Value("string"),
52
+ "answer2": datasets.Value("string"),
53
+ "answer3": datasets.Value("string"),
54
+ "label": datasets.Value("int32")
55
+ # These are the features of your dataset like images, labels ...
56
+ }
57
+ ),
58
+ # If there's a common (input, target) tuple from the features,
59
+ # specify them here. They'll be used if as_supervised=True in
60
+ # builder.as_dataset.
61
+ supervised_keys=None,
62
+ # Homepage of the dataset for documentation
63
+ homepage="https://wilburone.github.io/cosmos/",
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ """Returns SplitGenerators."""
69
+ # TODO(cosmos_qa): Downloads the data and defines the splits
70
+ # dl_manager is a datasets.download.DownloadManager that can be used to
71
+ # download and extract URLs
72
+ urls_to_download = {
73
+ "train": os.path.join(_URL, _TRAIN_FILE),
74
+ "test": os.path.join(_URL, _TEST_FILE),
75
+ "dev": os.path.join(_URL, _DEV_FILE),
76
+ }
77
+ dl_dir = dl_manager.download_and_extract(urls_to_download)
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ # These kwargs will be passed to _generate_examples
82
+ gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
83
+ ),
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TEST,
86
+ # These kwargs will be passed to _generate_examples
87
+ gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.VALIDATION,
91
+ # These kwargs will be passed to _generate_examples
92
+ gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
93
+ ),
94
+ ]
95
+
96
+ def _generate_examples(self, filepath, split):
97
+ """Yields examples."""
98
+ # TODO(cosmos_qa): Yields (key, example) tuples from the dataset
99
+ with open(filepath, encoding="utf-8") as f:
100
+ if split == "test":
101
+ for id_, row in enumerate(f):
102
+ data = json.loads(row)
103
+ yield id_, {
104
+ "id": data["id"],
105
+ "context": data["context"],
106
+ "question": data["question"],
107
+ "answer0": data["answer0"],
108
+ "answer1": data["answer1"],
109
+ "answer2": data["answer2"],
110
+ "answer3": data["answer3"],
111
+ "label": int(data.get("label", -1)),
112
+ }
113
+ else:
114
+ data = csv.DictReader(f)
115
+ for id_, row in enumerate(data):
116
+ yield id_, {
117
+ "id": row["id"],
118
+ "context": row["context"],
119
+ "question": row["question"],
120
+ "answer0": row["answer0"],
121
+ "answer1": row["answer1"],
122
+ "answer2": row["answer2"],
123
+ "answer3": row["answer3"],
124
+ "label": int(row.get("label", -1)),
125
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "Cosmos QA is a large-scale dataset of 35.6K problems that require commonsense-based reading comprehension, formulated as multiple-choice questions. It focuses on reading between the lines over a diverse collection of people's everyday narratives, asking questions concerning on the likely causes or effects of events that require reasoning beyond the exact text spans in the context\n", "citation": "@inproceedings{cosmos,\n title={COSMOS QA: Machine Reading Comprehension\n with Contextual Commonsense Reasoning},\n author={Lifu Huang and Ronan Le Bras and Chandra Bhagavatula and Yejin Choi},\n booktitle ={arXiv:1909.00277v2},\n year={2019}\n}\n", "homepage": "https://wilburone.github.io/cosmos/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answer0": {"dtype": "string", "id": null, "_type": "Value"}, "answer1": {"dtype": "string", "id": null, "_type": "Value"}, "answer2": {"dtype": "string", "id": null, "_type": "Value"}, "answer3": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "cosmos_qa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5128447, "num_examples": 6963, "dataset_name": "cosmos_qa"}, "train": {"name": "train", "num_bytes": 17185126, "num_examples": 25262, "dataset_name": "cosmos_qa"}, "validation": {"name": "validation", "num_bytes": 2189979, "num_examples": 2985, "dataset_name": "cosmos_qa"}}, "download_checksums": {"https://github.com/wilburOne/cosmosqa/raw/master/data/train.csv": {"num_bytes": 16660449, "checksum": "d8d5ca1f9f6534b6530550718591af89372d976a8fc419360fab4158dee4d0b2"}, "https://github.com/wilburOne/cosmosqa/raw/master/data/test.jsonl": {"num_bytes": 5610681, "checksum": "70005196dc2588b95de34f1657b25e2c1a4810cfe55b5bb0c0e15580c37b3ed0"}, "https://github.com/wilburOne/cosmosqa/raw/master/data/valid.csv": {"num_bytes": 2128345, "checksum": "a6a94fc1463ca82bb10f98ef68ed535405e6f5c36e044ff8e136b5c19dea63f3"}}, "download_size": 24399475, "dataset_size": 24503552, "size_in_bytes": 48903027}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c83fe07c5e4cc1381a999258f8e787c735a0d763b10b9436ed0f0bafc0393f00
3
+ size 6688