Datasets:
Commit
•
bed0457
1
Parent(s):
169a67d
Add stsb data files
Browse files- README.md +15 -7
- dataset_infos.json +15 -27
- stsb/test-00000-of-00001.parquet +3 -0
- stsb/train-00000-of-00001.parquet +3 -0
- stsb/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -306,17 +306,17 @@ dataset_info:
|
|
306 |
- name: idx
|
307 |
dtype: int32
|
308 |
splits:
|
309 |
-
- name: test
|
310 |
-
num_bytes: 170847
|
311 |
-
num_examples: 1379
|
312 |
- name: train
|
313 |
-
num_bytes:
|
314 |
num_examples: 5749
|
315 |
- name: validation
|
316 |
-
num_bytes:
|
317 |
num_examples: 1500
|
318 |
-
|
319 |
-
|
|
|
|
|
|
|
320 |
- config_name: wnli
|
321 |
features:
|
322 |
- name: sentence1
|
@@ -376,6 +376,14 @@ configs:
|
|
376 |
path: sst2/validation-*
|
377 |
- split: test
|
378 |
path: sst2/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
379 |
train-eval-index:
|
380 |
- config: cola
|
381 |
task: text-classification
|
|
|
306 |
- name: idx
|
307 |
dtype: int32
|
308 |
splits:
|
|
|
|
|
|
|
309 |
- name: train
|
310 |
+
num_bytes: 754791
|
311 |
num_examples: 5749
|
312 |
- name: validation
|
313 |
+
num_bytes: 216064
|
314 |
num_examples: 1500
|
315 |
+
- name: test
|
316 |
+
num_bytes: 169974
|
317 |
+
num_examples: 1379
|
318 |
+
download_size: 766983
|
319 |
+
dataset_size: 1140829
|
320 |
- config_name: wnli
|
321 |
features:
|
322 |
- name: sentence1
|
|
|
376 |
path: sst2/validation-*
|
377 |
- split: test
|
378 |
path: sst2/test-*
|
379 |
+
- config_name: stsb
|
380 |
+
data_files:
|
381 |
+
- split: train
|
382 |
+
path: stsb/train-*
|
383 |
+
- split: validation
|
384 |
+
path: stsb/validation-*
|
385 |
+
- split: test
|
386 |
+
path: stsb/test-*
|
387 |
train-eval-index:
|
388 |
- config: cola
|
389 |
task: text-classification
|
dataset_infos.json
CHANGED
@@ -233,34 +233,29 @@
|
|
233 |
},
|
234 |
"stsb": {
|
235 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
236 |
-
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n
|
237 |
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
238 |
"license": "",
|
239 |
"features": {
|
240 |
"sentence1": {
|
241 |
"dtype": "string",
|
242 |
-
"id": null,
|
243 |
"_type": "Value"
|
244 |
},
|
245 |
"sentence2": {
|
246 |
"dtype": "string",
|
247 |
-
"id": null,
|
248 |
"_type": "Value"
|
249 |
},
|
250 |
"label": {
|
251 |
"dtype": "float32",
|
252 |
-
"id": null,
|
253 |
"_type": "Value"
|
254 |
},
|
255 |
"idx": {
|
256 |
"dtype": "int32",
|
257 |
-
"id": null,
|
258 |
"_type": "Value"
|
259 |
}
|
260 |
},
|
261 |
-
"post_processed": null,
|
262 |
-
"supervised_keys": null,
|
263 |
"builder_name": "glue",
|
|
|
264 |
"config_name": "stsb",
|
265 |
"version": {
|
266 |
"version_str": "1.0.0",
|
@@ -270,35 +265,28 @@
|
|
270 |
"patch": 0
|
271 |
},
|
272 |
"splits": {
|
273 |
-
"test": {
|
274 |
-
"name": "test",
|
275 |
-
"num_bytes": 170847,
|
276 |
-
"num_examples": 1379,
|
277 |
-
"dataset_name": "glue"
|
278 |
-
},
|
279 |
"train": {
|
280 |
"name": "train",
|
281 |
-
"num_bytes":
|
282 |
"num_examples": 5749,
|
283 |
-
"dataset_name":
|
284 |
},
|
285 |
"validation": {
|
286 |
"name": "validation",
|
287 |
-
"num_bytes":
|
288 |
"num_examples": 1500,
|
289 |
-
"dataset_name":
|
290 |
-
}
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
"
|
295 |
-
"
|
296 |
}
|
297 |
},
|
298 |
-
"download_size":
|
299 |
-
"
|
300 |
-
"
|
301 |
-
"size_in_bytes": 1949125
|
302 |
},
|
303 |
"mnli": {
|
304 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
|
|
233 |
},
|
234 |
"stsb": {
|
235 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
236 |
+
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
237 |
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
238 |
"license": "",
|
239 |
"features": {
|
240 |
"sentence1": {
|
241 |
"dtype": "string",
|
|
|
242 |
"_type": "Value"
|
243 |
},
|
244 |
"sentence2": {
|
245 |
"dtype": "string",
|
|
|
246 |
"_type": "Value"
|
247 |
},
|
248 |
"label": {
|
249 |
"dtype": "float32",
|
|
|
250 |
"_type": "Value"
|
251 |
},
|
252 |
"idx": {
|
253 |
"dtype": "int32",
|
|
|
254 |
"_type": "Value"
|
255 |
}
|
256 |
},
|
|
|
|
|
257 |
"builder_name": "glue",
|
258 |
+
"dataset_name": "glue",
|
259 |
"config_name": "stsb",
|
260 |
"version": {
|
261 |
"version_str": "1.0.0",
|
|
|
265 |
"patch": 0
|
266 |
},
|
267 |
"splits": {
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
"train": {
|
269 |
"name": "train",
|
270 |
+
"num_bytes": 754791,
|
271 |
"num_examples": 5749,
|
272 |
+
"dataset_name": null
|
273 |
},
|
274 |
"validation": {
|
275 |
"name": "validation",
|
276 |
+
"num_bytes": 216064,
|
277 |
"num_examples": 1500,
|
278 |
+
"dataset_name": null
|
279 |
+
},
|
280 |
+
"test": {
|
281 |
+
"name": "test",
|
282 |
+
"num_bytes": 169974,
|
283 |
+
"num_examples": 1379,
|
284 |
+
"dataset_name": null
|
285 |
}
|
286 |
},
|
287 |
+
"download_size": 766983,
|
288 |
+
"dataset_size": 1140829,
|
289 |
+
"size_in_bytes": 1907812
|
|
|
290 |
},
|
291 |
"mnli": {
|
292 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
stsb/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04fa2561f1ff3c395cf8980e3eed5d133c194abf636d5e1870d765c861087bd9
|
3 |
+
size 114296
|
stsb/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbd93bbb988fd18437e02185fe3b2bd9a18350376c392e7820de9df1b247ed1f
|
3 |
+
size 502065
|
stsb/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:152de7cf1fa34ee4df1c243bd209b02ade21a1d5c4fb3b7da5240f78e4000aa9
|
3 |
+
size 150622
|