kiddothe2b commited on
Commit
6d3278d
1 Parent(s): 82ac621

Create new file

Browse files
Files changed (1) hide show
  1. contract-nli.py +218 -0
contract-nli.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ContractNLI: A Benchmark Dataset for ContractNLI in English."""
16
+
17
+ import json
18
+ import os
19
+ import textwrap
20
+
21
+ import datasets
22
+
23
+ MAIN_PATH = 'https://huggingface.co/datasets/cognitivplus/contract-nli/resolve/main'
24
+
25
+ MAIN_CITATION = """\
26
+ @inproceedings{koreeda-manning-2021-contractnli-dataset,
27
+ title = "{C}ontract{NLI}: A Dataset for Document-level Natural Language Inference for Contracts",
28
+ author = "Koreeda, Yuta and
29
+ Manning, Christopher",
30
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
31
+ month = nov,
32
+ year = "2021",
33
+ address = "Punta Cana, Dominican Republic",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://aclanthology.org/2021.findings-emnlp.164",
36
+ doi = "10.18653/v1/2021.findings-emnlp.164",
37
+ pages = "1907--1919",
38
+ }"""
39
+
40
+ _DESCRIPTION = """\
41
+ ContractNLI: A Benchmark Dataset for ContractNLI in English
42
+ """
43
+
44
+
45
+ CONTRACTNLI_LABELS = ["contradiction", "entailment", "neutral"]
46
+
47
+
48
+ class ContractNLIConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for ContractNLI."""
50
+
51
+ def __init__(
52
+ self,
53
+ url,
54
+ data_url,
55
+ data_file,
56
+ citation,
57
+ label_classes=None,
58
+ **kwargs,
59
+ ):
60
+ """BuilderConfig for ContractNLI.
61
+
62
+ Args:
63
+ text_column: ``string`, name of the column in the jsonl file corresponding
64
+ to the text
65
+ label_column: `string`, name of the column in the jsonl file corresponding
66
+ to the label
67
+ url: `string`, url for the original project
68
+ data_url: `string`, url to download the zip file from
69
+ data_file: `string`, filename for data set
70
+ citation: `string`, citation for the data set
71
+ url: `string`, url for information about the data set
72
+ label_classes: `list[string]`, the list of classes if the label is
73
+ categorical. If not provided, then the label will be of type
74
+ `datasets.Value('float32')`.
75
+ multi_label: `boolean`, True if the task is multi-label
76
+ dev_column: `string`, name for the development subset
77
+ **kwargs: keyword arguments forwarded to super.
78
+ """
79
+ super(ContractNLIConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
80
+ self.label_classes = label_classes
81
+ self.url = url
82
+ self.data_url = data_url
83
+ self.data_file = data_file
84
+ self.citation = citation
85
+
86
+
87
+ class ContractNLI(datasets.GeneratorBasedBuilder):
88
+ """ContractNLI: A Benchmark Dataset for ContractNLI in English. Version 1.0"""
89
+
90
+ BUILDER_CONFIGS = [
91
+ ContractNLIConfig(
92
+ name="contractnli_a",
93
+ description=textwrap.dedent(
94
+ """\
95
+ The ContractNLI dataset consists of Non-Disclosure Agreements (NDAs). All NDAs have been labeled based
96
+ on several hypothesis templates as entailment, neutral or contradiction. In this version of the task
97
+ (Task A), the input consists of the relevant part of the document w.r.t. to the hypothesis.
98
+ """
99
+ ),
100
+ text_column="premise",
101
+ label_classes=CONTRACTNLI_LABELS,
102
+ data_url=f"{MAIN_PATH}/contract_nli.zip",
103
+ data_file="contract_nli_v1.jsonl",
104
+ url="https://stanfordnlp.github.io/ contract- nli/",
105
+ citation=textwrap.dedent(
106
+ """\
107
+ @inproceedings{koreeda-manning-2021-contractnli-dataset,
108
+ title = "{C}ontract{NLI}: A Dataset for Document-level Natural Language Inference for Contracts",
109
+ author = "Koreeda, Yuta and
110
+ Manning, Christopher",
111
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
112
+ month = nov,
113
+ year = "2021",
114
+ address = "Punta Cana, Dominican Republic",
115
+ publisher = "Association for Computational Linguistics",
116
+ url = "https://aclanthology.org/2021.findings-emnlp.164",
117
+ doi = "10.18653/v1/2021.findings-emnlp.164",
118
+ pages = "1907--1919",
119
+ }
120
+ }"""
121
+ ),
122
+ ),
123
+ ContractNLIConfig(
124
+ name="contractnli_b",
125
+ description=textwrap.dedent(
126
+ """\
127
+ The ContractNLI dataset consists of Non-Disclosure Agreements (NDAs). All NDAs have been labeled based
128
+ on several hypothesis templates as entailment, neutral or contradiction. In this version of the task
129
+ (Task B), the input consists of the full document.
130
+ """
131
+ ),
132
+ text_column="premise",
133
+ label_classes=CONTRACTNLI_LABELS,
134
+ data_url=f"{MAIN_PATH}/contract_nli_long.zip",
135
+ data_file="contract_nli_long.jsonl",
136
+ url="https://stanfordnlp.github.io/ contract- nli/",
137
+ citation=textwrap.dedent(
138
+ """\
139
+ @inproceedings{koreeda-manning-2021-contractnli-dataset,
140
+ title = "{C}ontract{NLI}: A Dataset for Document-level Natural Language Inference for Contracts",
141
+ author = "Koreeda, Yuta and
142
+ Manning, Christopher",
143
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
144
+ month = nov,
145
+ year = "2021",
146
+ address = "Punta Cana, Dominican Republic",
147
+ publisher = "Association for Computational Linguistics",
148
+ url = "https://aclanthology.org/2021.findings-emnlp.164",
149
+ doi = "10.18653/v1/2021.findings-emnlp.164",
150
+ pages = "1907--1919",
151
+ }
152
+ }"""
153
+ ),
154
+ ),
155
+ ]
156
+
157
+ def _info(self):
158
+ features = {
159
+ "premise": datasets.Value("string"),
160
+ "hypothesis": datasets.Value("string"),
161
+ "label": datasets.ClassLabel(names=CONTRACTNLI_LABELS)
162
+ }
163
+
164
+ return datasets.DatasetInfo(
165
+ description=self.config.description,
166
+ features=datasets.Features(features),
167
+ homepage=self.config.url,
168
+ citation=self.config.citation + "\n" + MAIN_CITATION,
169
+ )
170
+
171
+ def _split_generators(self, dl_manager):
172
+ data_dir = dl_manager.download_and_extract(self.config.data_url)
173
+ return [
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TRAIN,
176
+ # These kwargs will be passed to _generate_examples
177
+ gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_file), "split": "train"},
178
+ ),
179
+ datasets.SplitGenerator(
180
+ name=datasets.Split.TEST,
181
+ # These kwargs will be passed to _generate_examples
182
+ gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_file), "split": "test"},
183
+ ),
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.VALIDATION,
186
+ # These kwargs will be passed to _generate_examples
187
+ gen_kwargs={
188
+ "filepath": os.path.join(data_dir, self.config.data_file),
189
+ "split": "dev",
190
+ },
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepath, split):
195
+ """This function returns the examples in the raw (text) form."""
196
+ if self.config.name == "contractnli_a":
197
+ with open(filepath, "r", encoding="utf-8") as f:
198
+ for id_, row in enumerate(f):
199
+ data = json.loads(row)
200
+ if data["subset"] == split:
201
+ yield id_, {
202
+ "premise": data["premise"],
203
+ "hypothesis": data["hypothesis"],
204
+ "label": data["label"],
205
+ }
206
+ elif self.config.name == "contractnli_b":
207
+ with open(filepath, "r", encoding="utf-8") as f:
208
+ sid = -1
209
+ for id_, row in enumerate(f):
210
+ data = json.loads(row)
211
+ if data["subset"] == split:
212
+ for sample in data['hypothesises/labels']:
213
+ sid += 1
214
+ yield sid, {
215
+ "premise": data["premise"],
216
+ "hypothesis": sample['hypothesis'],
217
+ "label": sample['label'],
218
+ }