albertvillanova HF staff commited on
Commit
ca822b4
1 Parent(s): 2adbb30

Delete loading script

Browse files
Files changed (1) hide show
  1. adv_glue.py +0 -330
adv_glue.py DELETED
@@ -1,330 +0,0 @@
1
- """The Adversarial GLUE (AdvGLUE) benchmark.
2
- Homepage: https://adversarialglue.github.io/
3
- """
4
- import json
5
- import os
6
- import textwrap
7
-
8
- import datasets
9
-
10
-
11
- _ADV_GLUE_CITATION = """\
12
- @article{Wang2021AdversarialGA,
13
- title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},
14
- author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},
15
- journal={ArXiv},
16
- year={2021},
17
- volume={abs/2111.02840}
18
- }
19
- """
20
-
21
- _ADV_GLUE_DESCRIPTION = """\
22
- Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark
23
- that focuses on the adversarial robustness evaluation of language models. It covers five
24
- natural language understanding tasks from the famous GLUE tasks and is an adversarial
25
- version of GLUE benchmark.
26
- """
27
-
28
- _MNLI_BASE_KWARGS = dict(
29
- text_features={
30
- "premise": "premise",
31
- "hypothesis": "hypothesis",
32
- },
33
- label_classes=["entailment", "neutral", "contradiction"],
34
- label_column="label",
35
- data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
36
- data_dir="MNLI",
37
- citation=textwrap.dedent(
38
- """\
39
- @InProceedings{N18-1101,
40
- author = "Williams, Adina
41
- and Nangia, Nikita
42
- and Bowman, Samuel",
43
- title = "A Broad-Coverage Challenge Corpus for
44
- Sentence Understanding through Inference",
45
- booktitle = "Proceedings of the 2018 Conference of
46
- the North American Chapter of the
47
- Association for Computational Linguistics:
48
- Human Language Technologies, Volume 1 (Long
49
- Papers)",
50
- year = "2018",
51
- publisher = "Association for Computational Linguistics",
52
- pages = "1112--1122",
53
- location = "New Orleans, Louisiana",
54
- url = "http://aclweb.org/anthology/N18-1101"
55
- }
56
- @article{bowman2015large,
57
- title={A large annotated corpus for learning natural language inference},
58
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
59
- journal={arXiv preprint arXiv:1508.05326},
60
- year={2015}
61
- }"""
62
- ),
63
- url="http://www.nyu.edu/projects/bowman/multinli/",
64
- )
65
-
66
- ADVGLUE_DEV_URL = "https://adversarialglue.github.io/dataset/dev.zip"
67
-
68
-
69
- class AdvGlueConfig(datasets.BuilderConfig):
70
- """BuilderConfig for Adversarial GLUE."""
71
-
72
- def __init__(
73
- self,
74
- text_features,
75
- label_column,
76
- data_url,
77
- data_dir,
78
- citation,
79
- url,
80
- label_classes=None,
81
- process_label=lambda x: x,
82
- **kwargs,
83
- ):
84
- """BuilderConfig for Adversarial GLUE.
85
-
86
- Args:
87
- text_features: `dict[string, string]`, map from the name of the feature
88
- dict for each text field to the name of the column in the tsv file
89
- label_column: `string`, name of the column in the tsv file corresponding
90
- to the label
91
- data_url: `string`, url to download the zip file from
92
- data_dir: `string`, the path to the folder containing the tsv files in the
93
- downloaded zip
94
- citation: `string`, citation for the data set
95
- url: `string`, url for information about the data set
96
- label_classes: `list[string]`, the list of classes if the label is
97
- categorical. If not provided, then the label will be of type
98
- `datasets.Value('float32')`.
99
- process_label: `Function[string, any]`, function taking in the raw value
100
- of the label and processing it to the form required by the label feature
101
- **kwargs: keyword arguments forwarded to super.
102
- """
103
- super(AdvGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
104
- self.text_features = text_features
105
- self.label_column = label_column
106
- self.label_classes = label_classes
107
- self.data_url = data_url
108
- self.data_dir = data_dir
109
- self.citation = citation
110
- self.url = url
111
- self.process_label = process_label
112
-
113
-
114
- ADVGLUE_BUILDER_CONFIGS = [
115
- AdvGlueConfig(
116
- name="adv_sst2",
117
- description=textwrap.dedent(
118
- """Adversarial version of SST-2.
119
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
120
- human annotations of their sentiment. The task is to predict the sentiment of a
121
- given sentence. We use the two-way (positive/negative) class split, and use only
122
- sentence-level labels."""
123
- ),
124
- text_features={"sentence": "sentence"},
125
- label_classes=["negative", "positive"],
126
- label_column="label",
127
- data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
128
- data_dir="SST-2",
129
- citation=textwrap.dedent(
130
- """\
131
- @inproceedings{socher2013recursive,
132
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
133
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
134
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
135
- pages={1631--1642},
136
- year={2013}
137
- }"""
138
- ),
139
- url="https://datasets.stanford.edu/sentiment/index.html",
140
- ),
141
- AdvGlueConfig(
142
- name="adv_qqp",
143
- description=textwrap.dedent(
144
- """Adversarial version of QQP.
145
- The Quora Question Pairs2 dataset is a collection of question pairs from the
146
- community question-answering website Quora. The task is to determine whether a
147
- pair of questions are semantically equivalent."""
148
- ),
149
- text_features={
150
- "question1": "question1",
151
- "question2": "question2",
152
- },
153
- label_classes=["not_duplicate", "duplicate"],
154
- label_column="label",
155
- data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
156
- data_dir="QQP",
157
- citation=textwrap.dedent(
158
- """\
159
- @online{WinNT,
160
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
161
- title = {First Quora Dataset Release: Question Pairs},
162
- year = {2017},
163
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
164
- urldate = {2019-04-03}
165
- }"""
166
- ),
167
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
168
- ),
169
- AdvGlueConfig(
170
- name="adv_mnli",
171
- description=textwrap.dedent(
172
- """Adversarial version of MNLI.
173
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
174
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
175
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
176
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
177
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
178
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
179
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
180
- the SNLI corpus as 550k examples of auxiliary training data."""
181
- ),
182
- **_MNLI_BASE_KWARGS,
183
- ),
184
- AdvGlueConfig(
185
- name="adv_mnli_mismatched",
186
- description=textwrap.dedent(
187
- """Adversarial version of MNLI-mismatched.
188
- The mismatched validation and test splits from MNLI.
189
- See the "mnli" BuilderConfig for additional information."""
190
- ),
191
- **_MNLI_BASE_KWARGS,
192
- ),
193
- AdvGlueConfig(
194
- name="adv_qnli",
195
- description=textwrap.dedent(
196
- """Adversarial version of QNLI.
197
- The Stanford Question Answering Dataset is a question-answering
198
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
199
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
200
- convert the task into sentence pair classification by forming a pair between each question and each
201
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
202
- question and the context sentence. The task is to determine whether the context sentence contains
203
- the answer to the question. This modified version of the original task removes the requirement that
204
- the model select the exact answer, but also removes the simplifying assumptions that the answer
205
- is always present in the input and that lexical overlap is a reliable cue."""
206
- ), # pylint: disable=line-too-long
207
- text_features={
208
- "question": "question",
209
- "sentence": "sentence",
210
- },
211
- label_classes=["entailment", "not_entailment"],
212
- label_column="label",
213
- data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
214
- data_dir="QNLI",
215
- citation=textwrap.dedent(
216
- """\
217
- @article{rajpurkar2016squad,
218
- title={Squad: 100,000+ questions for machine comprehension of text},
219
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
220
- journal={arXiv preprint arXiv:1606.05250},
221
- year={2016}
222
- }"""
223
- ),
224
- url="https://rajpurkar.github.io/SQuAD-explorer/",
225
- ),
226
- AdvGlueConfig(
227
- name="adv_rte",
228
- description=textwrap.dedent(
229
- """Adversarial version of RTE.
230
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
231
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
232
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
233
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
234
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
235
- ), # pylint: disable=line-too-long
236
- text_features={
237
- "sentence1": "sentence1",
238
- "sentence2": "sentence2",
239
- },
240
- label_classes=["entailment", "not_entailment"],
241
- label_column="label",
242
- data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
243
- data_dir="RTE",
244
- citation=textwrap.dedent(
245
- """\
246
- @inproceedings{dagan2005pascal,
247
- title={The PASCAL recognising textual entailment challenge},
248
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
249
- booktitle={Machine Learning Challenges Workshop},
250
- pages={177--190},
251
- year={2005},
252
- organization={Springer}
253
- }
254
- @inproceedings{bar2006second,
255
- title={The second pascal recognising textual entailment challenge},
256
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
257
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
258
- volume={6},
259
- number={1},
260
- pages={6--4},
261
- year={2006},
262
- organization={Venice}
263
- }
264
- @inproceedings{giampiccolo2007third,
265
- title={The third pascal recognizing textual entailment challenge},
266
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
267
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
268
- pages={1--9},
269
- year={2007},
270
- organization={Association for Computational Linguistics}
271
- }
272
- @inproceedings{bentivogli2009fifth,
273
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
274
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
275
- booktitle={TAC},
276
- year={2009}
277
- }"""
278
- ),
279
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
280
- ),
281
- ]
282
-
283
-
284
- class AdvGlue(datasets.GeneratorBasedBuilder):
285
- """The General Language Understanding Evaluation (GLUE) benchmark."""
286
-
287
- DATASETS = ["adv_sst2", "adv_qqp", "adv_mnli", "adv_mnli_mismatched", "adv_qnli", "adv_rte"]
288
- BUILDER_CONFIGS = ADVGLUE_BUILDER_CONFIGS
289
-
290
- def _info(self):
291
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
292
- if self.config.label_classes:
293
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
294
- else:
295
- features["label"] = datasets.Value("float32")
296
- features["idx"] = datasets.Value("int32")
297
- return datasets.DatasetInfo(
298
- description=_ADV_GLUE_DESCRIPTION,
299
- features=datasets.Features(features),
300
- homepage="https://adversarialglue.github.io/",
301
- citation=_ADV_GLUE_CITATION,
302
- )
303
-
304
- def _split_generators(self, dl_manager):
305
- assert self.config.name in AdvGlue.DATASETS
306
- data_dir = dl_manager.download_and_extract(ADVGLUE_DEV_URL)
307
- data_file = os.path.join(data_dir, "dev", "dev.json")
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.VALIDATION,
311
- gen_kwargs={
312
- "data_file": data_file,
313
- },
314
- )
315
- ]
316
-
317
- def _generate_examples(self, data_file):
318
- # We name splits 'adv_sst2' instead of 'sst2' so as not to be confused
319
- # with the original SST-2. Here they're named like 'sst2' so we have to
320
- # remove the 'adv_' prefix.
321
- config_key = self.config.name.replace("adv_", "")
322
- if config_key == "mnli_mismatched":
323
- # and they name this split differently.
324
- config_key = "mnli-mm"
325
- data = json.loads(open(data_file).read())
326
- for row in data[config_key]:
327
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
328
- example["label"] = self.config.process_label(row[self.config.label_column])
329
- example["idx"] = row["idx"]
330
- yield example["idx"], example