Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
machine-generated
Annotations Creators:
other
Source Datasets:
extended|glue
ArXiv:
License:
albertvillanova HF staff commited on
Commit
e1abda0
1 Parent(s): 6b7a3b0

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (507a95bba7688cf0c371bfba6214ccfcf98cdae4)
- Add adv_mnli_mismatched data files (554ee79e9e36cd68fdf2186dfe3aa5210d792960)
- Add adv_qnli data files (62511019569faff06f42a52013bf09c7dc4f2209)
- Add adv_qqp data files (e7175c30bec332676f524e47f8a46b76beed3c41)
- Add adv_rte data files (2ee6e8f724b73473927890ccbb243ef39ab49342)
- Add adv_sst2 data files (2adbb307e32d68934c1e3524d801c4d4ced59c15)
- Delete loading script (ca822b4843e57c4592b3458921df43b58deea606)
- Delete legacy dataset_infos.json (340a2fbb3550f0951dbfdd1d8eb82f0e779d46f3)

README.md CHANGED
@@ -19,48 +19,17 @@ task_ids:
19
  - natural-language-inference
20
  - sentiment-classification
21
  pretty_name: Adversarial GLUE
 
 
 
 
 
 
 
22
  tags:
23
  - paraphrase-identification
24
  - qa-nli
25
  dataset_info:
26
- - config_name: adv_sst2
27
- features:
28
- - name: sentence
29
- dtype: string
30
- - name: label
31
- dtype:
32
- class_label:
33
- names:
34
- '0': negative
35
- '1': positive
36
- - name: idx
37
- dtype: int32
38
- splits:
39
- - name: validation
40
- num_bytes: 16595
41
- num_examples: 148
42
- download_size: 40662
43
- dataset_size: 16595
44
- - config_name: adv_qqp
45
- features:
46
- - name: question1
47
- dtype: string
48
- - name: question2
49
- dtype: string
50
- - name: label
51
- dtype:
52
- class_label:
53
- names:
54
- '0': not_duplicate
55
- '1': duplicate
56
- - name: idx
57
- dtype: int32
58
- splits:
59
- - name: validation
60
- num_bytes: 9926
61
- num_examples: 78
62
- download_size: 40662
63
- dataset_size: 9926
64
  - config_name: adv_mnli
65
  features:
66
  - name: premise
@@ -78,10 +47,10 @@ dataset_info:
78
  dtype: int32
79
  splits:
80
  - name: validation
81
- num_bytes: 23736
82
  num_examples: 121
83
- download_size: 40662
84
- dataset_size: 23736
85
  - config_name: adv_mnli_mismatched
86
  features:
87
  - name: premise
@@ -99,10 +68,10 @@ dataset_info:
99
  dtype: int32
100
  splits:
101
  - name: validation
102
- num_bytes: 40982
103
  num_examples: 162
104
- download_size: 40662
105
- dataset_size: 40982
106
  - config_name: adv_qnli
107
  features:
108
  - name: question
@@ -119,10 +88,30 @@ dataset_info:
119
  dtype: int32
120
  splits:
121
  - name: validation
122
- num_bytes: 34877
123
  num_examples: 148
124
- download_size: 40662
125
- dataset_size: 34877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  - config_name: adv_rte
127
  features:
128
  - name: sentence1
@@ -139,17 +128,53 @@ dataset_info:
139
  dtype: int32
140
  splits:
141
  - name: validation
142
- num_bytes: 25998
143
  num_examples: 81
144
- download_size: 40662
145
- dataset_size: 25998
146
- config_names:
147
- - adv_mnli
148
- - adv_mnli_mismatched
149
- - adv_qnli
150
- - adv_qqp
151
- - adv_rte
152
- - adv_sst2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  ---
154
 
155
  # Dataset Card for Adversarial GLUE
19
  - natural-language-inference
20
  - sentiment-classification
21
  pretty_name: Adversarial GLUE
22
+ config_names:
23
+ - adv_mnli
24
+ - adv_mnli_mismatched
25
+ - adv_qnli
26
+ - adv_qqp
27
+ - adv_rte
28
+ - adv_sst2
29
  tags:
30
  - paraphrase-identification
31
  - qa-nli
32
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  - config_name: adv_mnli
34
  features:
35
  - name: premise
47
  dtype: int32
48
  splits:
49
  - name: validation
50
+ num_bytes: 23712
51
  num_examples: 121
52
+ download_size: 13485
53
+ dataset_size: 23712
54
  - config_name: adv_mnli_mismatched
55
  features:
56
  - name: premise
68
  dtype: int32
69
  splits:
70
  - name: validation
71
+ num_bytes: 40953
72
  num_examples: 162
73
+ download_size: 25166
74
+ dataset_size: 40953
75
  - config_name: adv_qnli
76
  features:
77
  - name: question
88
  dtype: int32
89
  splits:
90
  - name: validation
91
+ num_bytes: 34850
92
  num_examples: 148
93
+ download_size: 19111
94
+ dataset_size: 34850
95
+ - config_name: adv_qqp
96
+ features:
97
+ - name: question1
98
+ dtype: string
99
+ - name: question2
100
+ dtype: string
101
+ - name: label
102
+ dtype:
103
+ class_label:
104
+ names:
105
+ '0': not_duplicate
106
+ '1': duplicate
107
+ - name: idx
108
+ dtype: int32
109
+ splits:
110
+ - name: validation
111
+ num_bytes: 9908
112
+ num_examples: 78
113
+ download_size: 7705
114
+ dataset_size: 9908
115
  - config_name: adv_rte
116
  features:
117
  - name: sentence1
128
  dtype: int32
129
  splits:
130
  - name: validation
131
+ num_bytes: 25979
132
  num_examples: 81
133
+ download_size: 15872
134
+ dataset_size: 25979
135
+ - config_name: adv_sst2
136
+ features:
137
+ - name: sentence
138
+ dtype: string
139
+ - name: label
140
+ dtype:
141
+ class_label:
142
+ names:
143
+ '0': negative
144
+ '1': positive
145
+ - name: idx
146
+ dtype: int32
147
+ splits:
148
+ - name: validation
149
+ num_bytes: 16572
150
+ num_examples: 148
151
+ download_size: 10833
152
+ dataset_size: 16572
153
+ configs:
154
+ - config_name: adv_mnli
155
+ data_files:
156
+ - split: validation
157
+ path: adv_mnli/validation-*
158
+ - config_name: adv_mnli_mismatched
159
+ data_files:
160
+ - split: validation
161
+ path: adv_mnli_mismatched/validation-*
162
+ - config_name: adv_qnli
163
+ data_files:
164
+ - split: validation
165
+ path: adv_qnli/validation-*
166
+ - config_name: adv_qqp
167
+ data_files:
168
+ - split: validation
169
+ path: adv_qqp/validation-*
170
+ - config_name: adv_rte
171
+ data_files:
172
+ - split: validation
173
+ path: adv_rte/validation-*
174
+ - config_name: adv_sst2
175
+ data_files:
176
+ - split: validation
177
+ path: adv_sst2/validation-*
178
  ---
179
 
180
  # Dataset Card for Adversarial GLUE
adv_glue.py DELETED
@@ -1,330 +0,0 @@
1
- """The Adversarial GLUE (AdvGLUE) benchmark.
2
- Homepage: https://adversarialglue.github.io/
3
- """
4
- import json
5
- import os
6
- import textwrap
7
-
8
- import datasets
9
-
10
-
11
- _ADV_GLUE_CITATION = """\
12
- @article{Wang2021AdversarialGA,
13
- title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},
14
- author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},
15
- journal={ArXiv},
16
- year={2021},
17
- volume={abs/2111.02840}
18
- }
19
- """
20
-
21
- _ADV_GLUE_DESCRIPTION = """\
22
- Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark
23
- that focuses on the adversarial robustness evaluation of language models. It covers five
24
- natural language understanding tasks from the famous GLUE tasks and is an adversarial
25
- version of GLUE benchmark.
26
- """
27
-
28
- _MNLI_BASE_KWARGS = dict(
29
- text_features={
30
- "premise": "premise",
31
- "hypothesis": "hypothesis",
32
- },
33
- label_classes=["entailment", "neutral", "contradiction"],
34
- label_column="label",
35
- data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
36
- data_dir="MNLI",
37
- citation=textwrap.dedent(
38
- """\
39
- @InProceedings{N18-1101,
40
- author = "Williams, Adina
41
- and Nangia, Nikita
42
- and Bowman, Samuel",
43
- title = "A Broad-Coverage Challenge Corpus for
44
- Sentence Understanding through Inference",
45
- booktitle = "Proceedings of the 2018 Conference of
46
- the North American Chapter of the
47
- Association for Computational Linguistics:
48
- Human Language Technologies, Volume 1 (Long
49
- Papers)",
50
- year = "2018",
51
- publisher = "Association for Computational Linguistics",
52
- pages = "1112--1122",
53
- location = "New Orleans, Louisiana",
54
- url = "http://aclweb.org/anthology/N18-1101"
55
- }
56
- @article{bowman2015large,
57
- title={A large annotated corpus for learning natural language inference},
58
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
59
- journal={arXiv preprint arXiv:1508.05326},
60
- year={2015}
61
- }"""
62
- ),
63
- url="http://www.nyu.edu/projects/bowman/multinli/",
64
- )
65
-
66
- ADVGLUE_DEV_URL = "https://adversarialglue.github.io/dataset/dev.zip"
67
-
68
-
69
- class AdvGlueConfig(datasets.BuilderConfig):
70
- """BuilderConfig for Adversarial GLUE."""
71
-
72
- def __init__(
73
- self,
74
- text_features,
75
- label_column,
76
- data_url,
77
- data_dir,
78
- citation,
79
- url,
80
- label_classes=None,
81
- process_label=lambda x: x,
82
- **kwargs,
83
- ):
84
- """BuilderConfig for Adversarial GLUE.
85
-
86
- Args:
87
- text_features: `dict[string, string]`, map from the name of the feature
88
- dict for each text field to the name of the column in the tsv file
89
- label_column: `string`, name of the column in the tsv file corresponding
90
- to the label
91
- data_url: `string`, url to download the zip file from
92
- data_dir: `string`, the path to the folder containing the tsv files in the
93
- downloaded zip
94
- citation: `string`, citation for the data set
95
- url: `string`, url for information about the data set
96
- label_classes: `list[string]`, the list of classes if the label is
97
- categorical. If not provided, then the label will be of type
98
- `datasets.Value('float32')`.
99
- process_label: `Function[string, any]`, function taking in the raw value
100
- of the label and processing it to the form required by the label feature
101
- **kwargs: keyword arguments forwarded to super.
102
- """
103
- super(AdvGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
104
- self.text_features = text_features
105
- self.label_column = label_column
106
- self.label_classes = label_classes
107
- self.data_url = data_url
108
- self.data_dir = data_dir
109
- self.citation = citation
110
- self.url = url
111
- self.process_label = process_label
112
-
113
-
114
- ADVGLUE_BUILDER_CONFIGS = [
115
- AdvGlueConfig(
116
- name="adv_sst2",
117
- description=textwrap.dedent(
118
- """Adversarial version of SST-2.
119
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
120
- human annotations of their sentiment. The task is to predict the sentiment of a
121
- given sentence. We use the two-way (positive/negative) class split, and use only
122
- sentence-level labels."""
123
- ),
124
- text_features={"sentence": "sentence"},
125
- label_classes=["negative", "positive"],
126
- label_column="label",
127
- data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
128
- data_dir="SST-2",
129
- citation=textwrap.dedent(
130
- """\
131
- @inproceedings{socher2013recursive,
132
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
133
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
134
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
135
- pages={1631--1642},
136
- year={2013}
137
- }"""
138
- ),
139
- url="https://datasets.stanford.edu/sentiment/index.html",
140
- ),
141
- AdvGlueConfig(
142
- name="adv_qqp",
143
- description=textwrap.dedent(
144
- """Adversarial version of QQP.
145
- The Quora Question Pairs2 dataset is a collection of question pairs from the
146
- community question-answering website Quora. The task is to determine whether a
147
- pair of questions are semantically equivalent."""
148
- ),
149
- text_features={
150
- "question1": "question1",
151
- "question2": "question2",
152
- },
153
- label_classes=["not_duplicate", "duplicate"],
154
- label_column="label",
155
- data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
156
- data_dir="QQP",
157
- citation=textwrap.dedent(
158
- """\
159
- @online{WinNT,
160
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
161
- title = {First Quora Dataset Release: Question Pairs},
162
- year = {2017},
163
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
164
- urldate = {2019-04-03}
165
- }"""
166
- ),
167
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
168
- ),
169
- AdvGlueConfig(
170
- name="adv_mnli",
171
- description=textwrap.dedent(
172
- """Adversarial version of MNLI.
173
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
174
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
175
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
176
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
177
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
178
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
179
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
180
- the SNLI corpus as 550k examples of auxiliary training data."""
181
- ),
182
- **_MNLI_BASE_KWARGS,
183
- ),
184
- AdvGlueConfig(
185
- name="adv_mnli_mismatched",
186
- description=textwrap.dedent(
187
- """Adversarial version of MNLI-mismatched.
188
- The mismatched validation and test splits from MNLI.
189
- See the "mnli" BuilderConfig for additional information."""
190
- ),
191
- **_MNLI_BASE_KWARGS,
192
- ),
193
- AdvGlueConfig(
194
- name="adv_qnli",
195
- description=textwrap.dedent(
196
- """Adversarial version of QNLI.
197
- The Stanford Question Answering Dataset is a question-answering
198
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
199
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
200
- convert the task into sentence pair classification by forming a pair between each question and each
201
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
202
- question and the context sentence. The task is to determine whether the context sentence contains
203
- the answer to the question. This modified version of the original task removes the requirement that
204
- the model select the exact answer, but also removes the simplifying assumptions that the answer
205
- is always present in the input and that lexical overlap is a reliable cue."""
206
- ), # pylint: disable=line-too-long
207
- text_features={
208
- "question": "question",
209
- "sentence": "sentence",
210
- },
211
- label_classes=["entailment", "not_entailment"],
212
- label_column="label",
213
- data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
214
- data_dir="QNLI",
215
- citation=textwrap.dedent(
216
- """\
217
- @article{rajpurkar2016squad,
218
- title={Squad: 100,000+ questions for machine comprehension of text},
219
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
220
- journal={arXiv preprint arXiv:1606.05250},
221
- year={2016}
222
- }"""
223
- ),
224
- url="https://rajpurkar.github.io/SQuAD-explorer/",
225
- ),
226
- AdvGlueConfig(
227
- name="adv_rte",
228
- description=textwrap.dedent(
229
- """Adversarial version of RTE.
230
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
231
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
232
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
233
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
234
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
235
- ), # pylint: disable=line-too-long
236
- text_features={
237
- "sentence1": "sentence1",
238
- "sentence2": "sentence2",
239
- },
240
- label_classes=["entailment", "not_entailment"],
241
- label_column="label",
242
- data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
243
- data_dir="RTE",
244
- citation=textwrap.dedent(
245
- """\
246
- @inproceedings{dagan2005pascal,
247
- title={The PASCAL recognising textual entailment challenge},
248
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
249
- booktitle={Machine Learning Challenges Workshop},
250
- pages={177--190},
251
- year={2005},
252
- organization={Springer}
253
- }
254
- @inproceedings{bar2006second,
255
- title={The second pascal recognising textual entailment challenge},
256
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
257
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
258
- volume={6},
259
- number={1},
260
- pages={6--4},
261
- year={2006},
262
- organization={Venice}
263
- }
264
- @inproceedings{giampiccolo2007third,
265
- title={The third pascal recognizing textual entailment challenge},
266
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
267
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
268
- pages={1--9},
269
- year={2007},
270
- organization={Association for Computational Linguistics}
271
- }
272
- @inproceedings{bentivogli2009fifth,
273
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
274
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
275
- booktitle={TAC},
276
- year={2009}
277
- }"""
278
- ),
279
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
280
- ),
281
- ]
282
-
283
-
284
- class AdvGlue(datasets.GeneratorBasedBuilder):
285
- """The General Language Understanding Evaluation (GLUE) benchmark."""
286
-
287
- DATASETS = ["adv_sst2", "adv_qqp", "adv_mnli", "adv_mnli_mismatched", "adv_qnli", "adv_rte"]
288
- BUILDER_CONFIGS = ADVGLUE_BUILDER_CONFIGS
289
-
290
- def _info(self):
291
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
292
- if self.config.label_classes:
293
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
294
- else:
295
- features["label"] = datasets.Value("float32")
296
- features["idx"] = datasets.Value("int32")
297
- return datasets.DatasetInfo(
298
- description=_ADV_GLUE_DESCRIPTION,
299
- features=datasets.Features(features),
300
- homepage="https://adversarialglue.github.io/",
301
- citation=_ADV_GLUE_CITATION,
302
- )
303
-
304
- def _split_generators(self, dl_manager):
305
- assert self.config.name in AdvGlue.DATASETS
306
- data_dir = dl_manager.download_and_extract(ADVGLUE_DEV_URL)
307
- data_file = os.path.join(data_dir, "dev", "dev.json")
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.VALIDATION,
311
- gen_kwargs={
312
- "data_file": data_file,
313
- },
314
- )
315
- ]
316
-
317
- def _generate_examples(self, data_file):
318
- # We name splits 'adv_sst2' instead of 'sst2' so as not to be confused
319
- # with the original SST-2. Here they're named like 'sst2' so we have to
320
- # remove the 'adv_' prefix.
321
- config_key = self.config.name.replace("adv_", "")
322
- if config_key == "mnli_mismatched":
323
- # and they name this split differently.
324
- config_key = "mnli-mm"
325
- data = json.loads(open(data_file).read())
326
- for row in data[config_key]:
327
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
328
- example["label"] = self.config.process_label(row[self.config.label_column])
329
- example["idx"] = row["idx"]
330
- yield example["idx"], example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adv_mnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60d669bb175d2309ace88c1b4408b203109128a021f8ae282949411a5d968d00
3
+ size 13485
adv_mnli_mismatched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c65be9dc6735d5c1a9c4ec9b3fe6e822839d28d6da8707c2a1b2ad217b8bcf12
3
+ size 25166
adv_qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d52ef0ffe9ee286082ff90ff44095ae742fc7b767d2c611b5e40941543016d9f
3
+ size 19111
adv_qqp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2667932727a841edce13086f65802d760101010552f594361e68afe4e002d3
3
+ size 7705
adv_rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84020811fda28c120cf2aff59f289a457823444b5e560177b2910a34418af3c8
3
+ size 15872
adv_sst2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e06cc9a2ecdd5cee621d7ec0558e3114a8565c7429d4eb4eacd8e758fff158c
3
+ size 10833
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"adv_sst2": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 16595, "num_examples": 148, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 16595, "size_in_bytes": 57257}, "adv_qqp": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 9926, "num_examples": 78, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 9926, "size_in_bytes": 50588}, "adv_mnli": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 23736, "num_examples": 121, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 23736, "size_in_bytes": 64398}, "adv_mnli_mismatched": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40982, "num_examples": 162, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 40982, "size_in_bytes": 81644}, "adv_qnli": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 34877, "num_examples": 148, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 34877, "size_in_bytes": 75539}, "adv_rte": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 25998, "num_examples": 81, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 25998, "size_in_bytes": 66660}}