Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
machine-generated
Annotations Creators:
other
Source Datasets:
extended|glue
ArXiv:
License:
albertvillanova HF staff commited on
Commit
e7175c3
1 Parent(s): 6251101

Add adv_qqp data files

Browse files
README.md CHANGED
@@ -108,10 +108,10 @@ dataset_info:
108
  dtype: int32
109
  splits:
110
  - name: validation
111
- num_bytes: 9926
112
  num_examples: 78
113
- download_size: 40662
114
- dataset_size: 9926
115
  - config_name: adv_rte
116
  features:
117
  - name: sentence1
@@ -163,6 +163,10 @@ configs:
163
  data_files:
164
  - split: validation
165
  path: adv_qnli/validation-*
 
 
 
 
166
  ---
167
 
168
  # Dataset Card for Adversarial GLUE
 
108
  dtype: int32
109
  splits:
110
  - name: validation
111
+ num_bytes: 9908
112
  num_examples: 78
113
+ download_size: 7705
114
+ dataset_size: 9908
115
  - config_name: adv_rte
116
  features:
117
  - name: sentence1
 
163
  data_files:
164
  - split: validation
165
  path: adv_qnli/validation-*
166
+ - config_name: adv_qqp
167
+ data_files:
168
+ - split: validation
169
+ path: adv_qqp/validation-*
170
  ---
171
 
172
  # Dataset Card for Adversarial GLUE
adv_qqp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2667932727a841edce13086f65802d760101010552f594361e68afe4e002d3
3
+ size 7705
dataset_infos.json CHANGED
@@ -64,37 +64,29 @@
64
  "features": {
65
  "question1": {
66
  "dtype": "string",
67
- "id": null,
68
  "_type": "Value"
69
  },
70
  "question2": {
71
  "dtype": "string",
72
- "id": null,
73
  "_type": "Value"
74
  },
75
  "label": {
76
- "num_classes": 2,
77
  "names": [
78
  "not_duplicate",
79
  "duplicate"
80
  ],
81
- "id": null,
82
  "_type": "ClassLabel"
83
  },
84
  "idx": {
85
  "dtype": "int32",
86
- "id": null,
87
  "_type": "Value"
88
  }
89
  },
90
- "post_processed": null,
91
- "supervised_keys": null,
92
- "task_templates": null,
93
- "builder_name": "adv_glue",
94
  "config_name": "adv_qqp",
95
  "version": {
96
  "version_str": "1.0.0",
97
- "description": "",
98
  "major": 1,
99
  "minor": 0,
100
  "patch": 0
@@ -102,21 +94,14 @@
102
  "splits": {
103
  "validation": {
104
  "name": "validation",
105
- "num_bytes": 9926,
106
  "num_examples": 78,
107
- "dataset_name": "adv_glue"
108
- }
109
- },
110
- "download_checksums": {
111
- "https://adversarialglue.github.io/dataset/dev.zip": {
112
- "num_bytes": 40662,
113
- "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"
114
  }
115
  },
116
- "download_size": 40662,
117
- "post_processing_size": null,
118
- "dataset_size": 9926,
119
- "size_in_bytes": 50588
120
  },
121
  "adv_mnli": {
122
  "description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n",
 
64
  "features": {
65
  "question1": {
66
  "dtype": "string",
 
67
  "_type": "Value"
68
  },
69
  "question2": {
70
  "dtype": "string",
 
71
  "_type": "Value"
72
  },
73
  "label": {
 
74
  "names": [
75
  "not_duplicate",
76
  "duplicate"
77
  ],
 
78
  "_type": "ClassLabel"
79
  },
80
  "idx": {
81
  "dtype": "int32",
 
82
  "_type": "Value"
83
  }
84
  },
85
+ "builder_name": "parquet",
86
+ "dataset_name": "adv_glue",
 
 
87
  "config_name": "adv_qqp",
88
  "version": {
89
  "version_str": "1.0.0",
 
90
  "major": 1,
91
  "minor": 0,
92
  "patch": 0
 
94
  "splits": {
95
  "validation": {
96
  "name": "validation",
97
+ "num_bytes": 9908,
98
  "num_examples": 78,
99
+ "dataset_name": null
 
 
 
 
 
 
100
  }
101
  },
102
+ "download_size": 7705,
103
+ "dataset_size": 9908,
104
+ "size_in_bytes": 17613
 
105
  },
106
  "adv_mnli": {
107
  "description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n",