chenz16 commited on
Commit
93689a9
1 Parent(s): f95ef43

Upload curriculum_benchmark.py

Browse files
Files changed (1) hide show
  1. curriculum_benchmark.py +226 -0
curriculum_benchmark.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """CURRICULUM Benchmark"""
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _CITATION = """\
14
+ @misc{https://doi.org/10.48550/arxiv.2204.06283,
15
+ doi = {10.48550/ARXIV.2204.06283},
16
+ url = {https://arxiv.org/abs/2204.06283},
17
+ author = {Chen, Zeming and Gao, Qiyue},
18
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
19
+ title = {Curriculum: A Broad-Coverage Benchmark for Linguistic Phenomena in Natural Language Understanding},
20
+ publisher = {arXiv},
21
+ year = {2022},
22
+ copyright = {Creative Commons Attribution 4.0 International}
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ We introduce Curriculum as a new format of NLI benchmark for evaluation of broad-coverage linguistic phenomena.
28
+ Curriculum contains a collection of datasets that covers 36 types of major linguistic phenomena and an evaluation procedure
29
+ for diagnosing how well a language model captures reasoning skills for distinct types of linguistic phenomena.
30
+ We show that this linguistic-phenomena-driven benchmark can serve as an effective tool for diagnosing
31
+ model behavior and verifying model learning quality.
32
+ """
33
+
34
+ _HOMEPAGE = "https://github.com/eric11eca/curriculum-ling"
35
+ _LICENSE = "CC BY-SA 3.0"
36
+ _URL = "https://github.com/eric11eca/curriculum-ling/blob/main/benchmark/tasks/"
37
+
38
+
39
+ _DESCRIPTION_MAP = {
40
+ "analytic": "analytical thinking.",
41
+ "atomic": "reasoning on commonsense knowledge graph.",
42
+ }
43
+
44
+ _TAKS_NAMES = ["analytic", "defeasible", "boolean", "comparative",
45
+ "conditional", "context_align", "control", "coreference",
46
+ "cosmoqa", "counterfactual", "counting", "drop",
47
+ "entailment_tree", "ester", "hellaswag", "hypernymy",
48
+ "hyponymy", "kg_relations", "lexical", "logiqa",
49
+ "monotonicity_infer", "negation", "ner", "physicalqa",
50
+ "puns", "quantifier", "sentiment", "socialqa",
51
+ "spatial", "sprl", "syntactic_alternation", "syntactic_variation",
52
+ "temporal", "transitive", "verbcorner", "verbnet"]
53
+
54
+ task_label_dict = {
55
+ "lexical": ["entailed", "not-entailed"],
56
+ "transitive": ["entailed", "not-entailed"],
57
+ "hypernymy": ["entailed", "not-entailed"],
58
+ "hyponymy": ["entailed", "not-entailed"],
59
+ "ner": ["entailed", "not-entailed"],
60
+ "verbnet": ["entailed", "not-entailed"],
61
+ "verbcorner": ["entailed", "not-entailed"],
62
+ "syntactic_alternation": ["entailed", "not-entailed"],
63
+ "syntactic_variation": ["entailed", "not-entailed"],
64
+ "boolean": ["entailment", "contradiction", "neutral"],
65
+ "comparative": ["entailment", "contradiction", "neutral"],
66
+ "conditional": ["entailment", "contradiction", "neutral"],
67
+ "counting": ["entailment", "contradiction", "neutral"],
68
+ "negation": ["entailment", "contradiction", "neutral"],
69
+ "quantifier": ["entailment", "contradiction", "neutral"],
70
+ "monotonicity_infer": ["entailed", "not-entailed"],
71
+ "sentiment": ["entailed", "not-entailed"],
72
+ "kg_relations": ["entailed", "not-entailed"],
73
+ "puns": ["entailed", "not-entailed"],
74
+ "coreference": ["entailed", "not-entailed"],
75
+ "context_align": ["entailed", "not-entailed"],
76
+ "sprl": ["entailed", "not-entailed"],
77
+ "analytic": ["entailed", "not-entailed"],
78
+ "entailment_tree": ["entailed", "not-entailed"],
79
+ "socialqa": ["entailed", "not-entailed"],
80
+ "physicalqa": ["entailed", "not-entailed"],
81
+ "hellaswag": ["entailed", "not-entailed"],
82
+ "cosmoqa": ["entailed", "not-entailed"],
83
+ "logiqa": ["entailed", "not-entailed"],
84
+ "ester": ["entailed", "not-entailed"],
85
+ "drop": ["entailed", "not-entailed"],
86
+ "control": ["entailment", "contradiction", "neutral"],
87
+ "spatial": ["entailed", "not-entailed"],
88
+ "temporal": ["entailed", "not-entailed"],
89
+ "defeasible": ["entailed", "not-entailed"],
90
+ "counterfactual": ["entailed", "not-entailed"]
91
+ }
92
+
93
+
94
+ def read_file(path, mode="r", **kwargs):
95
+ with open(path, mode=mode, **kwargs) as f:
96
+ return f.read()
97
+
98
+
99
+ def write_file(data, path, mode="w", **kwargs):
100
+ with open(path, mode=mode, **kwargs) as f:
101
+ f.write(data)
102
+
103
+
104
+ def read_json(path, mode="r", **kwargs):
105
+ return json.loads(read_file(path, mode=mode, **kwargs))
106
+
107
+
108
+ def write_json(data, path):
109
+ return write_file(json.dumps(data, indent=2), path)
110
+
111
+
112
+ def read_jsonl(path, mode="r", **kwargs):
113
+ # Manually open because .splitlines is different from iterating over lines
114
+ ls = []
115
+ with open(path, mode, **kwargs) as f:
116
+ for line in f:
117
+ ls.append(json.loads(line))
118
+ return ls
119
+
120
+
121
+ def write_jsonl(data, path):
122
+ assert isinstance(data, list)
123
+ lines = [to_jsonl(elem) for elem in data]
124
+ write_file("\n".join(lines), path)
125
+
126
+
127
+ def to_jsonl(data):
128
+ return json.dumps(data).replace("\n", "")
129
+
130
+
131
+ class CurriculumConfig(datasets.BuilderConfig):
132
+ """BuilderConfig for Curriculum."""
133
+
134
+ def __init__(self, features, data_url, citation, url, label_classes=["entailed", "not-entailed"], **kwargs):
135
+ """BuilderConfig for Curriculum.
136
+ Args:
137
+ features: `list[string]`, list of the features that will appear in the
138
+ feature dict. Should not include "label".
139
+ data_url: `string`, url to download the zip file from.
140
+ citation: `string`, citation for the data set.
141
+ url: `string`, url for information about the data set.
142
+ label_classes: `list[string]`, the list of classes for the label if the
143
+ label is present as a string. Non-string labels will be cast to either
144
+ 'False' or 'True'.
145
+ **kwargs: keyword arguments forwarded to super.
146
+ """
147
+ # Version history:
148
+ # 1.0.0: Initial version.
149
+ super(CurriculumConfig, self).__init__(
150
+ version=datasets.Version("1.0.0"), **kwargs)
151
+ self.features = features
152
+ self.label_classes = label_classes
153
+ self.data_url = data_url
154
+ self.citation = citation
155
+ self.url = url
156
+
157
+
158
+ class CurriculumBenchmark(datasets.GeneratorBasedBuilder):
159
+ """Curriculum Benchmark. Version 1.0.0"""
160
+
161
+ BUILDER_CONFIGS = [
162
+ CurriculumConfig(
163
+ name=task_name,
164
+ description=_DESCRIPTION,
165
+ label_classes=task_label_dict[task_name],
166
+ features=["premise", "hypothesis", "idx", "gold_label"],
167
+ data_url=f"https://github.com/eric11eca/curriculum-ling/raw/main/benchmark/tasks/{task_name}.zip",
168
+ citation=_CITATION,
169
+ url="https://github.com/eric11eca/curriculum-ling/",
170
+ ) for task_name in _TAKS_NAMES
171
+ ]
172
+
173
+ def _info(self):
174
+ features = {feature: datasets.Value(
175
+ "string") for feature in self.config.features}
176
+ return datasets.DatasetInfo(
177
+ description=_DESCRIPTION,
178
+ features=datasets.Features(features),
179
+ supervised_keys=None,
180
+ homepage=_HOMEPAGE,
181
+ citation=_CITATION,
182
+ )
183
+
184
+ @staticmethod
185
+ def _get_filepath(dl_dir, split):
186
+ return os.path.join(dl_dir, split + ".jsonl")
187
+
188
+ def _split_generators(self, dl_manager):
189
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
190
+ task_name = _get_task_name_from_data_url(self.config.data_url)
191
+ dl_dir = os.path.join(dl_dir, task_name)
192
+
193
+ return [
194
+ datasets.SplitGenerator(
195
+ name=datasets.Split.TRAIN,
196
+ gen_kwargs={
197
+ "data_file": os.path.join(dl_dir, "train.jsonl"),
198
+ "split": datasets.Split.TRAIN,
199
+ },
200
+ ),
201
+ datasets.SplitGenerator(
202
+ name=datasets.Split.VALIDATION,
203
+ gen_kwargs={
204
+ "data_file": os.path.join(dl_dir, "val.jsonl"),
205
+ "split": datasets.Split.VALIDATION,
206
+ },
207
+ )
208
+ ]
209
+
210
+ def _generate_examples(self, data_file, split):
211
+ """This function returns the examples in the raw (text) form."""
212
+ logger.info("generating examples from = %s", data_file)
213
+
214
+ dataset = read_jsonl(data_file)
215
+ for id_, data in enumerate(dataset):
216
+
217
+ yield id_, {
218
+ "premise": data["premise"],
219
+ "hypothesis": data["hypothesis"],
220
+ "gold_label": data["gold_label"],
221
+ "idx": id_
222
+ }
223
+
224
+
225
+ def _get_task_name_from_data_url(data_url):
226
+ return data_url.split("/")[-1].split(".")[0]