Datasets:
Sagnik Ray Choudhury
commited on
Commit
•
e89d7cb
1
Parent(s):
316d87e
chore: fix missing label + loader script
Browse files- snli-cf-kaushik.py +104 -0
- test.jsonl +2 -2
- train.jsonl +2 -2
- validation.jsonl +2 -2
snli-cf-kaushik.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
|
9 |
+
_CITATION = """\
|
10 |
+
@inproceedings{DBLP:conf/iclr/KaushikHL20,
|
11 |
+
author = {Divyansh Kaushik and
|
12 |
+
Eduard H. Hovy and
|
13 |
+
Zachary Chase Lipton},
|
14 |
+
title = {Learning The Difference That Makes {A} Difference With Counterfactually-Augmented
|
15 |
+
Data},
|
16 |
+
booktitle = {8th International Conference on Learning Representations, {ICLR} 2020,
|
17 |
+
Addis Ababa, Ethiopia, April 26-30, 2020},
|
18 |
+
publisher = {OpenReview.net},
|
19 |
+
year = {2020},
|
20 |
+
url = {https://openreview.net/forum?id=Sklgs0NFvr},
|
21 |
+
timestamp = {Thu, 07 May 2020 17:11:48 +0200},
|
22 |
+
biburl = {https://dblp.org/rec/conf/iclr/KaushikHL20.bib},
|
23 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
24 |
+
}
|
25 |
+
"""
|
26 |
+
|
27 |
+
_DESCRIPTION = """\
|
28 |
+
The SNLI corpus (version 1.0) is a collection of 570k human-written English sentence pairs manually labeled for balanced classification with the labels entailment, contradiction, and neutral, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). In the ICLR 2020 paper [Learning the Difference that Makes a Difference with Counterfactually-Augmented Data](https://openreview.net/forum?id=Sklgs0NFvr), Kaushik et. al. provided a dataset with counterfactual perturbations on the SNLI and IMDB data. This repository contains the original and counterfactual perturbations for the SNLI data, which was generated after processing the original data from [here](https://github.com/acmi-lab/counterfactually-augmented-data)."""
|
29 |
+
|
30 |
+
_URL = "https://huggingface.co/datasets/sagnikrayc/snli-cf-kaushik/resolve/main"
|
31 |
+
_URLS = {
|
32 |
+
"train": f"{_URL}/train.jsonl",
|
33 |
+
"validation": f"{_URL}/validation.jsonl",
|
34 |
+
"test": f"{_URL}/test.jsonl",
|
35 |
+
}
|
36 |
+
|
37 |
+
|
38 |
+
class SnliCFConfig(datasets.BuilderConfig):
|
39 |
+
"""BuilderConfig for SQUAD."""
|
40 |
+
|
41 |
+
def __init__(self, **kwargs):
|
42 |
+
"""BuilderConfig for SQUAD.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
**kwargs: keyword arguments forwarded to super.
|
46 |
+
"""
|
47 |
+
super(SnliCFConfig, self).__init__(**kwargs)
|
48 |
+
|
49 |
+
|
50 |
+
class SnliCF(datasets.GeneratorBasedBuilder):
|
51 |
+
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
|
52 |
+
|
53 |
+
BUILDER_CONFIGS = [
|
54 |
+
SnliCFConfig(
|
55 |
+
name="plain_text",
|
56 |
+
version=datasets.Version("1.0.0", ""),
|
57 |
+
description="Plain text",
|
58 |
+
),
|
59 |
+
]
|
60 |
+
|
61 |
+
def _info(self):
|
62 |
+
return datasets.DatasetInfo(
|
63 |
+
description=_DESCRIPTION,
|
64 |
+
features=datasets.Features(
|
65 |
+
{
|
66 |
+
"idx": datasets.Value("string"),
|
67 |
+
"premise": datasets.Value("string"),
|
68 |
+
"hypothesis": datasets.Value("string"),
|
69 |
+
"label": datasets.Value("string"),
|
70 |
+
"type": datasets.Value("string"),
|
71 |
+
}
|
72 |
+
),
|
73 |
+
# No default supervised_keys (as we have to pass both question
|
74 |
+
# and context as input).
|
75 |
+
supervised_keys=None,
|
76 |
+
homepage="https://github.com/acmi-lab/counterfactually-augmented-data",
|
77 |
+
citation=_CITATION,
|
78 |
+
)
|
79 |
+
|
80 |
+
def _split_generators(self, dl_manager):
|
81 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
82 |
+
|
83 |
+
return [
|
84 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
85 |
+
datasets.SplitGenerator(
|
86 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}
|
87 |
+
),
|
88 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
|
89 |
+
]
|
90 |
+
|
91 |
+
def _generate_examples(self, filepath):
|
92 |
+
"""This function returns the examples in the raw (text) form."""
|
93 |
+
logger.info("generating examples from = %s", filepath)
|
94 |
+
with open(filepath, encoding="utf-8") as rf:
|
95 |
+
for idx, line in enumerate(rf):
|
96 |
+
print(line)
|
97 |
+
if line:
|
98 |
+
_line = json.loads(line)
|
99 |
+
yield idx, {
|
100 |
+
"premise": _line["premise"],
|
101 |
+
"hypothesis": _line["hypothesis"],
|
102 |
+
"idx": _line["idx"],
|
103 |
+
"type": _line["type"]
|
104 |
+
}
|
test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d56337df608b93433e65730078632d16b01af0f831efb9bd09958902c18935e0
|
3 |
+
size 437468
|
train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97ca27b9afe6bb6591e1bae1eae76f2566612cb5005d451c88c8c92a2ab50a41
|
3 |
+
size 1771712
|
validation.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3a1511ce05dd960b858ab21e6cea532e17821a84c890329041931c8705828c2
|
3 |
+
size 217479
|