Mario Šaško commited on
Commit
6be40aa
1 Parent(s): 72fa009

Make HANS dataset streamable (#4155)

Browse files

* Make HANS dataset streamable

* Fix tags

Commit from https://github.com/huggingface/datasets/commit/0060f4c7d3f8e4fb7a3694a925ca3b7f44e1f2ea

Files changed (3) hide show
  1. README.md +16 -0
  2. dataset_infos.json +1 -1
  3. hans.py +2 -2
README.md CHANGED
@@ -1,6 +1,22 @@
1
  ---
 
 
 
 
2
  languages:
3
  - en
 
 
 
 
 
 
 
 
 
 
 
 
4
  paperswithcode_id: hans
5
  pretty_name: Heuristic Analysis for NLI Systems
6
  ---
1
  ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - expert-generated
6
  languages:
7
  - en
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 10K<n<100K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - natural-language-inference
20
  paperswithcode_id: hans
21
  pretty_name: Heuristic Analysis for NLI Systems
22
  ---
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"plain_text": {"description": "The HANS dataset is an NLI evaluation set that tests specific hypotheses about invalid heuristics that NLI models are likely to learn.\n", "citation": "@article{DBLP:journals/corr/abs-1902-01007,\n author = {R. Thomas McCoy and\n Ellie Pavlick and\n Tal Linzen},\n title = {Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural\n Language Inference},\n journal = {CoRR},\n volume = {abs/1902.01007},\n year = {2019},\n url = {http://arxiv.org/abs/1902.01007},\n archivePrefix = {arXiv},\n eprint = {1902.01007},\n timestamp = {Tue, 21 May 2019 18:03:36 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-1902-01007.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/tommccoy1/hans", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "non-entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "heuristic": {"dtype": "string", "id": null, "_type": "Value"}, "subcase": {"dtype": "string", "id": null, "_type": "Value"}, "template": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "hans", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15916371, "num_examples": 30000, "dataset_name": "hans"}, "validation": {"name": "validation", "num_bytes": 15893137, "num_examples": 30000, "dataset_name": "hans"}}, "download_checksums": {"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_train_set.txt": {"num_bytes": 15485296, "checksum": "49245bd5fdb0b185dcbfbf48f0f16513c62ad5bc9fad0b8800dc48d6818ee5cf"}, "https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt": {"num_bytes": 15462062, "checksum": "c55b62feef9913070e88f38938dc2492018c945ac81f70139346472494124e79"}}, "download_size": 30947358, "post_processing_size": 0, "dataset_size": 31809508, "size_in_bytes": 62756866}}
1
+ {"plain_text": {"description": "The HANS dataset is an NLI evaluation set that tests specific hypotheses about invalid heuristics that NLI models are likely to learn.\n", "citation": "@article{DBLP:journals/corr/abs-1902-01007,\n author = {R. Thomas McCoy and\n Ellie Pavlick and\n Tal Linzen},\n title = {Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural\n Language Inference},\n journal = {CoRR},\n volume = {abs/1902.01007},\n year = {2019},\n url = {http://arxiv.org/abs/1902.01007},\n archivePrefix = {arXiv},\n eprint = {1902.01007},\n timestamp = {Tue, 21 May 2019 18:03:36 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-1902-01007.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/tommccoy1/hans", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "non-entailment"], "id": null, "_type": "ClassLabel"}, "parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_premise": {"dtype": "string", "id": null, "_type": "Value"}, "binary_parse_hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "heuristic": {"dtype": "string", "id": null, "_type": "Value"}, "subcase": {"dtype": "string", "id": null, "_type": "Value"}, "template": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"train": {}, "validation": {}}}, "supervised_keys": null, "task_templates": null, "builder_name": "hans", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15916371, "num_examples": 30000, "dataset_name": "hans"}, "validation": {"name": "validation", "num_bytes": 15893137, "num_examples": 30000, "dataset_name": "hans"}}, "download_checksums": {"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_train_set.txt": {"num_bytes": 15485296, "checksum": "49245bd5fdb0b185dcbfbf48f0f16513c62ad5bc9fad0b8800dc48d6818ee5cf"}, "https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt": {"num_bytes": 15462062, "checksum": "c55b62feef9913070e88f38938dc2492018c945ac81f70139346472494124e79"}}, "download_size": 30947358, "post_processing_size": 0, "dataset_size": 31809508, "size_in_bytes": 62756866}}
hans.py CHANGED
@@ -118,10 +118,10 @@ class Hans(datasets.GeneratorBasedBuilder):
118
  Yields:
119
  dictionaries containing "premise", "hypothesis" and "label" strings
120
  """
121
- for idx, line in enumerate(open(filepath, "rb")):
122
  if idx == 0:
123
  continue # skip header
124
- line = line.strip().decode("utf-8")
125
  split_line = line.split("\t")
126
  # Examples not marked with a three out of five consensus are marked with
127
  # "-" and should not be used in standard evaluations.
118
  Yields:
119
  dictionaries containing "premise", "hypothesis" and "label" strings
120
  """
121
+ for idx, line in enumerate(open(filepath, "r", encoding="utf-8")):
122
  if idx == 0:
123
  continue # skip header
124
+ line = line.strip()
125
  split_line = line.split("\t")
126
  # Examples not marked with a three out of five consensus are marked with
127
  # "-" and should not be used in standard evaluations.