Commit
•
6251101
1
Parent(s):
554ee79
Add adv_qnli data files
Browse files- README.md +7 -3
- adv_qnli/validation-00000-of-00001.parquet +3 -0
- dataset_infos.json +7 -22
README.md
CHANGED
@@ -88,10 +88,10 @@ dataset_info:
|
|
88 |
dtype: int32
|
89 |
splits:
|
90 |
- name: validation
|
91 |
-
num_bytes:
|
92 |
num_examples: 148
|
93 |
-
download_size:
|
94 |
-
dataset_size:
|
95 |
- config_name: adv_qqp
|
96 |
features:
|
97 |
- name: question1
|
@@ -159,6 +159,10 @@ configs:
|
|
159 |
data_files:
|
160 |
- split: validation
|
161 |
path: adv_mnli_mismatched/validation-*
|
|
|
|
|
|
|
|
|
162 |
---
|
163 |
|
164 |
# Dataset Card for Adversarial GLUE
|
|
|
88 |
dtype: int32
|
89 |
splits:
|
90 |
- name: validation
|
91 |
+
num_bytes: 34850
|
92 |
num_examples: 148
|
93 |
+
download_size: 19111
|
94 |
+
dataset_size: 34850
|
95 |
- config_name: adv_qqp
|
96 |
features:
|
97 |
- name: question1
|
|
|
159 |
data_files:
|
160 |
- split: validation
|
161 |
path: adv_mnli_mismatched/validation-*
|
162 |
+
- config_name: adv_qnli
|
163 |
+
data_files:
|
164 |
+
- split: validation
|
165 |
+
path: adv_qnli/validation-*
|
166 |
---
|
167 |
|
168 |
# Dataset Card for Adversarial GLUE
|
adv_qnli/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d52ef0ffe9ee286082ff90ff44095ae742fc7b767d2c611b5e40941543016d9f
|
3 |
+
size 19111
|
dataset_infos.json
CHANGED
@@ -222,37 +222,29 @@
|
|
222 |
"features": {
|
223 |
"question": {
|
224 |
"dtype": "string",
|
225 |
-
"id": null,
|
226 |
"_type": "Value"
|
227 |
},
|
228 |
"sentence": {
|
229 |
"dtype": "string",
|
230 |
-
"id": null,
|
231 |
"_type": "Value"
|
232 |
},
|
233 |
"label": {
|
234 |
-
"num_classes": 2,
|
235 |
"names": [
|
236 |
"entailment",
|
237 |
"not_entailment"
|
238 |
],
|
239 |
-
"id": null,
|
240 |
"_type": "ClassLabel"
|
241 |
},
|
242 |
"idx": {
|
243 |
"dtype": "int32",
|
244 |
-
"id": null,
|
245 |
"_type": "Value"
|
246 |
}
|
247 |
},
|
248 |
-
"
|
249 |
-
"
|
250 |
-
"task_templates": null,
|
251 |
-
"builder_name": "adv_glue",
|
252 |
"config_name": "adv_qnli",
|
253 |
"version": {
|
254 |
"version_str": "1.0.0",
|
255 |
-
"description": "",
|
256 |
"major": 1,
|
257 |
"minor": 0,
|
258 |
"patch": 0
|
@@ -260,21 +252,14 @@
|
|
260 |
"splits": {
|
261 |
"validation": {
|
262 |
"name": "validation",
|
263 |
-
"num_bytes":
|
264 |
"num_examples": 148,
|
265 |
-
"dataset_name":
|
266 |
-
}
|
267 |
-
},
|
268 |
-
"download_checksums": {
|
269 |
-
"https://adversarialglue.github.io/dataset/dev.zip": {
|
270 |
-
"num_bytes": 40662,
|
271 |
-
"checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"
|
272 |
}
|
273 |
},
|
274 |
-
"download_size":
|
275 |
-
"
|
276 |
-
"
|
277 |
-
"size_in_bytes": 75539
|
278 |
},
|
279 |
"adv_rte": {
|
280 |
"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n",
|
|
|
222 |
"features": {
|
223 |
"question": {
|
224 |
"dtype": "string",
|
|
|
225 |
"_type": "Value"
|
226 |
},
|
227 |
"sentence": {
|
228 |
"dtype": "string",
|
|
|
229 |
"_type": "Value"
|
230 |
},
|
231 |
"label": {
|
|
|
232 |
"names": [
|
233 |
"entailment",
|
234 |
"not_entailment"
|
235 |
],
|
|
|
236 |
"_type": "ClassLabel"
|
237 |
},
|
238 |
"idx": {
|
239 |
"dtype": "int32",
|
|
|
240 |
"_type": "Value"
|
241 |
}
|
242 |
},
|
243 |
+
"builder_name": "parquet",
|
244 |
+
"dataset_name": "adv_glue",
|
|
|
|
|
245 |
"config_name": "adv_qnli",
|
246 |
"version": {
|
247 |
"version_str": "1.0.0",
|
|
|
248 |
"major": 1,
|
249 |
"minor": 0,
|
250 |
"patch": 0
|
|
|
252 |
"splits": {
|
253 |
"validation": {
|
254 |
"name": "validation",
|
255 |
+
"num_bytes": 34850,
|
256 |
"num_examples": 148,
|
257 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
}
|
259 |
},
|
260 |
+
"download_size": 19111,
|
261 |
+
"dataset_size": 34850,
|
262 |
+
"size_in_bytes": 53961
|
|
|
263 |
},
|
264 |
"adv_rte": {
|
265 |
"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n",
|