Datasets:
hans

Task Categories: text-classification
Languages: English
Multilinguality: monolingual
Size Categories: 10K<n<100K
Language Creators: expert-generated
Annotations Creators: expert-generated
Source Datasets: original
Licenses: unknown
system commited on
Commit
bedff50
1 Parent(s): a6404e4

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"plain_text": {"description": "The HANS dataset is an NLI evaluation set that tests specific hypotheses about invalid heuristics that NLI models are likely to learn.\n", "citation": "@article{DBLP:journals/corr/abs-1902-01007,\n author = {R. Thomas McCoy and\n Ellie Pavlick and\n Tal Linzen},\n title = {Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural\n Language Inference},\n journal = {CoRR},\n volume = {abs/1902.01007},\n year = {2019},\n url = {http://arxiv.org/abs/1902.01007},\n archivePrefix = {arXiv},\n eprint = {1902.01007},\n timestamp = {Tue, 21 May 2019 18:03:36 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-1902-01007.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/tommccoy1/hans", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "non-entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": {"features": null, "resources_checksums": {"train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "hans", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3024446, "num_examples": 30000, "dataset_name": "hans"}, "validation": {"name": "validation", "num_bytes": 3019374, "num_examples": 30000, "dataset_name": "hans"}}, "download_checksums": {"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_train_set.txt": {"num_bytes": 15485296, "checksum": "49245bd5fdb0b185dcbfbf48f0f16513c62ad5bc9fad0b8800dc48d6818ee5cf"}, "https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt": {"num_bytes": 15462062, "checksum": "c55b62feef9913070e88f38938dc2492018c945ac81f70139346472494124e79"}}, "download_size": 30947358, "post_processing_size": 0, "dataset_size": 6043820, "size_in_bytes": 36991178}}
1
+ {"plain_text": {"description": "The HANS dataset is an NLI evaluation set that tests specific hypotheses about invalid heuristics that NLI models are likely to learn.\n", "citation": "@article{DBLP:journals/corr/abs-1902-01007,\n author = {R. Thomas McCoy and\n Ellie Pavlick and\n Tal Linzen},\n title = {Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural\n Language Inference},\n journal = {CoRR},\n volume = {abs/1902.01007},\n year = {2019},\n url = {http://arxiv.org/abs/1902.01007},\n archivePrefix = {arXiv},\n eprint = {1902.01007},\n timestamp = {Tue, 21 May 2019 18:03:36 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-1902-01007.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/tommccoy1/hans", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "non-entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "heuristic": {"dtype": "string", "id": null, "_type": "Value"}, "subcase": {"dtype": "string", "id": null, "_type": "Value"}, "template": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "hans", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15916371, "num_examples": 30000, "dataset_name": "hans"}, "validation": {"name": "validation", "num_bytes": 15893137, "num_examples": 30000, "dataset_name": "hans"}}, "download_checksums": {"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_train_set.txt": {"num_bytes": 15485296, "checksum": "49245bd5fdb0b185dcbfbf48f0f16513c62ad5bc9fad0b8800dc48d6818ee5cf"}, "https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt": {"num_bytes": 15462062, "checksum": "c55b62feef9913070e88f38938dc2492018c945ac81f70139346472494124e79"}}, "download_size": 30947358, "post_processing_size": 0, "dataset_size": 31809508, "size_in_bytes": 62756866}}
dummy/plain_text/1.0.0/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6aa173a553448e1f186b8cede9e0755f2aaa5234c76704401655f755844270e6
3
- size 226
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbc6b4f4ab76a3cddfa874134b1daf4c9f77c8c1eccfcaea5b06070357b066a7
3
+ size 1295
hans.py CHANGED
@@ -76,6 +76,13 @@ class Hans(datasets.GeneratorBasedBuilder):
76
  "premise": datasets.Value("string"),
77
  "hypothesis": datasets.Value("string"),
78
  "label": datasets.features.ClassLabel(names=["entailment", "non-entailment"]),
 
 
 
 
 
 
 
79
  }
80
  ),
81
  # No default supervised_keys (as we have to pass both premise
@@ -122,4 +129,15 @@ class Hans(datasets.GeneratorBasedBuilder):
122
  if split_line[0] == "-":
123
  continue
124
  # Works for both splits even though dev has some extra human labels.
125
- yield idx, {"premise": split_line[5], "hypothesis": split_line[6], "label": split_line[0]}
 
 
 
 
 
 
 
 
 
 
 
76
  "premise": datasets.Value("string"),
77
  "hypothesis": datasets.Value("string"),
78
  "label": datasets.features.ClassLabel(names=["entailment", "non-entailment"]),
79
+ "parse_premise": datasets.Value("string"),
80
+ "parse_hypothesis": datasets.Value("string"),
81
+ "binary_parse_premise": datasets.Value("string"),
82
+ "binary_parse_hypothesis": datasets.Value("string"),
83
+ "heuristic": datasets.Value("string"),
84
+ "subcase": datasets.Value("string"),
85
+ "template": datasets.Value("string"),
86
  }
87
  ),
88
  # No default supervised_keys (as we have to pass both premise
129
  if split_line[0] == "-":
130
  continue
131
  # Works for both splits even though dev has some extra human labels.
132
+ yield idx, {
133
+ "premise": split_line[5],
134
+ "hypothesis": split_line[6],
135
+ "label": split_line[0],
136
+ "binary_parse_premise": split_line[1],
137
+ "binary_parse_hypothesis": split_line[2],
138
+ "parse_premise": split_line[3],
139
+ "parse_hypothesis": split_line[4],
140
+ "heuristic": split_line[8],
141
+ "subcase": split_line[9],
142
+ "template": split_line[10],
143
+ }