system HF staff commited on
Commit
f2fe64f
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language_creators:
5
+ - crowdsourced
6
+ languages:
7
+ - en
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 1K<n<10K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - question-answering
18
+ task_ids:
19
+ - multiple-choice-qa
20
+ ---
21
+
22
+ # Dataset Card Creation Guide
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-instances)
32
+ - [Data Splits](#data-instances)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** [Add homepage URL here if available (unless it's a GitHub repository)]()
50
+ - **Repository:** [If the dataset is hosted on github or has a github homepage, add URL here]()
51
+ - **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]()
52
+ - **Leaderboard:** [If the dataset supports an active leaderboard, add link here]()
53
+ - **Point of Contact:** [If known, name and email of at least one person the reader can contact for questions about the dataset.]()
54
+
55
+ ### Dataset Summary
56
+
57
+ [More Information Needed]
58
+
59
+ ### Supported Tasks and Leaderboards
60
+
61
+ [More Information Needed]
62
+
63
+ ### Languages
64
+
65
+ [More Information Needed]
66
+
67
+ ## Dataset Structure
68
+
69
+ ### Data Instances
70
+
71
+ [More Information Needed]
72
+
73
+ ### Data Fields
74
+
75
+ [More Information Needed]
76
+
77
+ ### Data Splits
78
+
79
+ [More Information Needed]
80
+ ## Dataset Creation
81
+
82
+ ### Curation Rationale
83
+
84
+ [More Information Needed]
85
+
86
+ ### Source Data
87
+
88
+ [More Information Needed]
89
+
90
+ #### Initial Data Collection and Normalization
91
+
92
+ [More Information Needed]
93
+
94
+ #### Who are the source language producers?
95
+
96
+ [More Information Needed]
97
+
98
+ ### Annotations
99
+
100
+ [More Information Needed]
101
+
102
+ #### Annotation process
103
+
104
+ [More Information Needed]
105
+
106
+ #### Who are the annotators?
107
+
108
+ [More Information Needed]
109
+
110
+ ### Personal and Sensitive Information
111
+
112
+ [More Information Needed]
113
+
114
+ ## Considerations for Using the Data
115
+
116
+ ### Social Impact of Dataset
117
+
118
+ [More Information Needed]
119
+
120
+ ### Discussion of Biases
121
+
122
+ [More Information Needed]
123
+
124
+ ### Other Known Limitations
125
+
126
+ [More Information Needed]
127
+
128
+ ## Additional Information
129
+
130
+ ### Dataset Curators
131
+
132
+ [More Information Needed]
133
+
134
+ ### Licensing Information
135
+
136
+ [More Information Needed]
137
+
138
+ ### Citation Information
139
+
140
+ [More Information Needed]
codah.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The COmmonsense Dataset Adversarially-authored by Humans (CODAH)"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{chen2019codah,
26
+ title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},
27
+ author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},
28
+ booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},
29
+ pages={63--69},
30
+ year={2019}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense \
36
+ question-answering in the sentence completion style of SWAG. As opposed to other automatically \
37
+ generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback \
38
+ from a pre-trained model and use this information to design challenging commonsense questions. \
39
+ Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.
40
+ """
41
+
42
+ _URL = "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/"
43
+ _FULL_DATA_URL = _URL + "full_data.tsv"
44
+
45
+ QUESTION_CATEGORIES_MAPPING = {
46
+ "i": "Idioms",
47
+ "r": "Reference",
48
+ "p": "Polysemy",
49
+ "n": "Negation",
50
+ "q": "Quantitative",
51
+ "o": "Others",
52
+ }
53
+
54
+
55
+ class CodahConfig(datasets.BuilderConfig):
56
+ """BuilderConfig for CODAH."""
57
+
58
+ def __init__(self, fold=None, **kwargs):
59
+ """BuilderConfig for CODAH.
60
+
61
+ Args:
62
+ fold: `string`, official cross validation fold.
63
+ **kwargs: keyword arguments forwarded to super.
64
+ """
65
+ super(CodahConfig, self).__init__(**kwargs)
66
+ self.fold = fold
67
+
68
+
69
+ class Codah(datasets.GeneratorBasedBuilder):
70
+ """The COmmonsense Dataset Adversarially-authored by Humans (CODAH)"""
71
+
72
+ VERSION = datasets.Version("1.0.0")
73
+ BUILDER_CONFIGS = [
74
+ CodahConfig(name="codah", version=datasets.Version("1.0.0"), description="Full CODAH dataset", fold=None),
75
+ CodahConfig(
76
+ name="fold_0", version=datasets.Version("1.0.0"), description="Official CV split (fold_0)", fold="fold_0"
77
+ ),
78
+ CodahConfig(
79
+ name="fold_1", version=datasets.Version("1.0.0"), description="Official CV split (fold_1)", fold="fold_1"
80
+ ),
81
+ CodahConfig(
82
+ name="fold_2", version=datasets.Version("1.0.0"), description="Official CV split (fold_2)", fold="fold_2"
83
+ ),
84
+ CodahConfig(
85
+ name="fold_3", version=datasets.Version("1.0.0"), description="Official CV split (fold_3)", fold="fold_3"
86
+ ),
87
+ CodahConfig(
88
+ name="fold_4", version=datasets.Version("1.0.0"), description="Official CV split (fold_4)", fold="fold_4"
89
+ ),
90
+ ]
91
+
92
+ def _info(self):
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=datasets.Features(
96
+ {
97
+ "id": datasets.Value("int32"),
98
+ "question_category": datasets.features.ClassLabel(
99
+ names=["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"]
100
+ ),
101
+ "question_propmt": datasets.Value("string"),
102
+ "candidate_answers": datasets.features.Sequence(datasets.Value("string")),
103
+ "correct_answer_idx": datasets.Value("int32"),
104
+ }
105
+ ),
106
+ supervised_keys=None,
107
+ homepage="https://github.com/Websail-NU/CODAH",
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ if self.config.name == "codah":
113
+ data_file = dl_manager.download(_FULL_DATA_URL)
114
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": data_file})]
115
+
116
+ base_url = f"{_URL}cv_split/{self.config.fold}/"
117
+ _urls = {
118
+ "train": base_url + "train.tsv",
119
+ "dev": base_url + "dev.tsv",
120
+ "test": base_url + "test.tsv",
121
+ }
122
+ downloaded_files = dl_manager.download_and_extract(_urls)
123
+
124
+ return [
125
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": downloaded_files["train"]}),
126
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_file": downloaded_files["dev"]}),
127
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_file": downloaded_files["test"]}),
128
+ ]
129
+
130
+ def _generate_examples(self, data_file):
131
+ with open(data_file, encoding="utf-8") as f:
132
+ rows = csv.reader(f, delimiter="\t")
133
+ for i, row in enumerate(rows):
134
+ question_category = QUESTION_CATEGORIES_MAPPING[row[0]] if row[0] != "" else -1
135
+ example = {
136
+ "id": i,
137
+ "question_category": question_category,
138
+ "question_propmt": row[1],
139
+ "candidate_answers": row[2:-1],
140
+ "correct_answer_idx": int(row[-1]),
141
+ }
142
+ yield i, example
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"codah": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "codah", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 571208, "num_examples": 2776, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/full_data.tsv": {"num_bytes": 485130, "checksum": "96689f2abf2f09eb91af50e2d8f92bb553e157601908a1cee83fe68670b5c3ea"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571208, "size_in_bytes": 1056338}, "fold_0": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "fold_0", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 344912, "num_examples": 1665, "dataset_name": "codah"}, "validation": {"name": "validation", "num_bytes": 114211, "num_examples": 556, "dataset_name": "codah"}, "test": {"name": "test", "num_bytes": 112109, "num_examples": 555, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_0/train.tsv": {"num_bytes": 293278, "checksum": "790edcdfcba7d07b66ae2f3074c9ed17239b1f0865962f1bea0e0768207f0d43"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_0/dev.tsv": {"num_bytes": 96963, "checksum": "96c69a4240e6b0ef1f2364aeaa9d549e1b36188814f63d52bbc678a57b855923"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_0/test.tsv": {"num_bytes": 94889, "checksum": "2065089015df59d7cf0328bdd32a9e2088ff66ab9639d926acf26a3b0203454e"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571232, "size_in_bytes": 1056362}, "fold_1": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "fold_1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 340990, "num_examples": 1665, "dataset_name": "codah"}, "validation": {"name": "validation", "num_bytes": 114211, "num_examples": 556, "dataset_name": "codah"}, "test": {"name": "test", "num_bytes": 116031, "num_examples": 555, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_1/train.tsv": {"num_bytes": 289356, "checksum": "99841c58cbef1afb8aff8bf4ee8827d0824c3e787ff55d9a365a39284edde7ec"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_1/dev.tsv": {"num_bytes": 96963, "checksum": "96c69a4240e6b0ef1f2364aeaa9d549e1b36188814f63d52bbc678a57b855923"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_1/test.tsv": {"num_bytes": 98811, "checksum": "0f63cf4b30a7bd01f44f466cfa072bd99156e9f9a9042ac659f87b11e9bdbebb"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571232, "size_in_bytes": 1056362}, "fold_2": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "fold_2", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 342293, "num_examples": 1665, "dataset_name": "codah"}, "validation": {"name": "validation", "num_bytes": 114211, "num_examples": 556, "dataset_name": "codah"}, "test": {"name": "test", "num_bytes": 114728, "num_examples": 555, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_2/train.tsv": {"num_bytes": 290657, "checksum": "79ea621ba442cb8ebb64bf06179a038cfbccc875947a49fca1a487ce79bdc652"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_2/dev.tsv": {"num_bytes": 96963, "checksum": "96c69a4240e6b0ef1f2364aeaa9d549e1b36188814f63d52bbc678a57b855923"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_2/test.tsv": {"num_bytes": 97510, "checksum": "285996f708245858789a9a99740de07cb20c33b8fb071e772a047907727ed0ac"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571232, "size_in_bytes": 1056362}, "fold_3": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "fold_3", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 342844, "num_examples": 1665, "dataset_name": "codah"}, "validation": {"name": "validation", "num_bytes": 114211, "num_examples": 556, "dataset_name": "codah"}, "test": {"name": "test", "num_bytes": 114177, "num_examples": 555, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_3/train.tsv": {"num_bytes": 291210, "checksum": "27e6a6b55770bde68cedb97f8046af2fa469ca6082eb3ffc803ab61b39555bae"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_3/dev.tsv": {"num_bytes": 96963, "checksum": "96c69a4240e6b0ef1f2364aeaa9d549e1b36188814f63d52bbc678a57b855923"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_3/test.tsv": {"num_bytes": 96957, "checksum": "eb91713dd8aaca7bf7997168bd072ea0aa55c947c91e6f0869f48a2904a3c8fd"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571232, "size_in_bytes": 1056362}, "fold_4": {"description": "The COmmonsense Dataset Adversarially-authored by Humans (CODAH) is an evaluation set for commonsense question-answering in the sentence completion style of SWAG. As opposed to other automatically generated NLI datasets, CODAH is adversarially constructed by humans who can view feedback from a pre-trained model and use this information to design challenging commonsense questions. Our experimental results show that CODAH questions present a complementary extension to the SWAG dataset, testing additional modes of common sense.\n", "citation": "@inproceedings{chen2019codah,\n title={CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},\n author={Chen, Michael and D'Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},\n booktitle={Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP},\n pages={63--69},\n year={2019}\n}\n", "homepage": "https://github.com/Websail-NU/CODAH", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question_category": {"num_classes": 6, "names": ["Idioms", "Reference", "Polysemy", "Negation", "Quantitative", "Others"], "names_file": null, "id": null, "_type": "ClassLabel"}, "question_propmt": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "codah", "config_name": "fold_4", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 342844, "num_examples": 1665, "dataset_name": "codah"}, "validation": {"name": "validation", "num_bytes": 114177, "num_examples": 555, "dataset_name": "codah"}, "test": {"name": "test", "num_bytes": 114211, "num_examples": 556, "dataset_name": "codah"}}, "download_checksums": {"https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_4/train.tsv": {"num_bytes": 291210, "checksum": "27e6a6b55770bde68cedb97f8046af2fa469ca6082eb3ffc803ab61b39555bae"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_4/dev.tsv": {"num_bytes": 96957, "checksum": "eb91713dd8aaca7bf7997168bd072ea0aa55c947c91e6f0869f48a2904a3c8fd"}, "https://raw.githubusercontent.com/Websail-NU/CODAH/master/data/cv_split/fold_4/test.tsv": {"num_bytes": 96963, "checksum": "96c69a4240e6b0ef1f2364aeaa9d549e1b36188814f63d52bbc678a57b855923"}}, "download_size": 485130, "post_processing_size": null, "dataset_size": 571232, "size_in_bytes": 1056362}}
dummy/codah/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa659c7c3e15efef7f8030cbeacb3b9788904323a8967831081aa64efd283c0
3
+ size 676
dummy/fold_0/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac95759be699d59d4dda4a428a77c44ab9801dfb643f1ce33505e70043945791
3
+ size 1823
dummy/fold_1/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ee7049cb5d4f0c0ebe01da71dc07aca92735d78f237a7887095567f395d266
3
+ size 1823
dummy/fold_2/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f81d0267835980ab15319f23252277ef5cbc8283c30b9c4c53821771a350a17
3
+ size 2009
dummy/fold_3/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49038c44f750eee9fe4a2965542fa4c776d0303af9abbb99a7befdf7662ab07c
3
+ size 1809
dummy/fold_4/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a9f48da854b45290db468b7050aa1fc51df39bcf50c94027a7ff0ec3da42da0
3
+ size 1809