Datasets:
Commit
•
b04c7fe
1
Parent(s):
4c50f6d
Add mnli_mismatched data files
Browse files- README.md +12 -6
- dataset_infos.json +13 -27
- mnli_mismatched/test-00000-of-00001.parquet +3 -0
- mnli_mismatched/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -159,14 +159,14 @@ dataset_info:
|
|
159 |
- name: idx
|
160 |
dtype: int32
|
161 |
splits:
|
162 |
-
- name: test
|
163 |
-
num_bytes: 1956866
|
164 |
-
num_examples: 9847
|
165 |
- name: validation
|
166 |
-
num_bytes:
|
167 |
num_examples: 9832
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
170 |
- config_name: mrpc
|
171 |
features:
|
172 |
- name: sentence1
|
@@ -364,6 +364,12 @@ configs:
|
|
364 |
path: mnli/test_matched-*
|
365 |
- split: test_mismatched
|
366 |
path: mnli/test_mismatched-*
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
- config_name: mrpc
|
368 |
data_files:
|
369 |
- split: train
|
|
|
159 |
- name: idx
|
160 |
dtype: int32
|
161 |
splits:
|
|
|
|
|
|
|
162 |
- name: validation
|
163 |
+
num_bytes: 1949231
|
164 |
num_examples: 9832
|
165 |
+
- name: test
|
166 |
+
num_bytes: 1950703
|
167 |
+
num_examples: 9847
|
168 |
+
download_size: 2509009
|
169 |
+
dataset_size: 3899934
|
170 |
- config_name: mrpc
|
171 |
features:
|
172 |
- name: sentence1
|
|
|
364 |
path: mnli/test_matched-*
|
365 |
- split: test_mismatched
|
366 |
path: mnli/test_mismatched-*
|
367 |
+
- config_name: mnli_mismatched
|
368 |
+
data_files:
|
369 |
+
- split: validation
|
370 |
+
path: mnli_mismatched/validation-*
|
371 |
+
- split: test
|
372 |
+
path: mnli_mismatched/test-*
|
373 |
- config_name: mrpc
|
374 |
data_files:
|
375 |
- split: train
|
dataset_infos.json
CHANGED
@@ -363,40 +363,33 @@
|
|
363 |
},
|
364 |
"mnli_mismatched": {
|
365 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
366 |
-
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n
|
367 |
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
368 |
"license": "",
|
369 |
"features": {
|
370 |
"premise": {
|
371 |
"dtype": "string",
|
372 |
-
"id": null,
|
373 |
"_type": "Value"
|
374 |
},
|
375 |
"hypothesis": {
|
376 |
"dtype": "string",
|
377 |
-
"id": null,
|
378 |
"_type": "Value"
|
379 |
},
|
380 |
"label": {
|
381 |
-
"num_classes": 3,
|
382 |
"names": [
|
383 |
"entailment",
|
384 |
"neutral",
|
385 |
"contradiction"
|
386 |
],
|
387 |
-
"names_file": null,
|
388 |
-
"id": null,
|
389 |
"_type": "ClassLabel"
|
390 |
},
|
391 |
"idx": {
|
392 |
"dtype": "int32",
|
393 |
-
"id": null,
|
394 |
"_type": "Value"
|
395 |
}
|
396 |
},
|
397 |
-
"post_processed": null,
|
398 |
-
"supervised_keys": null,
|
399 |
"builder_name": "glue",
|
|
|
400 |
"config_name": "mnli_mismatched",
|
401 |
"version": {
|
402 |
"version_str": "1.0.0",
|
@@ -406,29 +399,22 @@
|
|
406 |
"patch": 0
|
407 |
},
|
408 |
"splits": {
|
409 |
-
"test": {
|
410 |
-
"name": "test",
|
411 |
-
"num_bytes": 1956866,
|
412 |
-
"num_examples": 9847,
|
413 |
-
"dataset_name": "glue"
|
414 |
-
},
|
415 |
"validation": {
|
416 |
"name": "validation",
|
417 |
-
"num_bytes":
|
418 |
"num_examples": 9832,
|
419 |
-
"dataset_name":
|
420 |
-
}
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
"
|
425 |
-
"
|
426 |
}
|
427 |
},
|
428 |
-
"download_size":
|
429 |
-
"
|
430 |
-
"
|
431 |
-
"size_in_bytes": 316695757
|
432 |
},
|
433 |
"mnli_matched": {
|
434 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
|
|
363 |
},
|
364 |
"mnli_mismatched": {
|
365 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
366 |
+
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
367 |
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
368 |
"license": "",
|
369 |
"features": {
|
370 |
"premise": {
|
371 |
"dtype": "string",
|
|
|
372 |
"_type": "Value"
|
373 |
},
|
374 |
"hypothesis": {
|
375 |
"dtype": "string",
|
|
|
376 |
"_type": "Value"
|
377 |
},
|
378 |
"label": {
|
|
|
379 |
"names": [
|
380 |
"entailment",
|
381 |
"neutral",
|
382 |
"contradiction"
|
383 |
],
|
|
|
|
|
384 |
"_type": "ClassLabel"
|
385 |
},
|
386 |
"idx": {
|
387 |
"dtype": "int32",
|
|
|
388 |
"_type": "Value"
|
389 |
}
|
390 |
},
|
|
|
|
|
391 |
"builder_name": "glue",
|
392 |
+
"dataset_name": "glue",
|
393 |
"config_name": "mnli_mismatched",
|
394 |
"version": {
|
395 |
"version_str": "1.0.0",
|
|
|
399 |
"patch": 0
|
400 |
},
|
401 |
"splits": {
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
"validation": {
|
403 |
"name": "validation",
|
404 |
+
"num_bytes": 1949231,
|
405 |
"num_examples": 9832,
|
406 |
+
"dataset_name": null
|
407 |
+
},
|
408 |
+
"test": {
|
409 |
+
"name": "test",
|
410 |
+
"num_bytes": 1950703,
|
411 |
+
"num_examples": 9847,
|
412 |
+
"dataset_name": null
|
413 |
}
|
414 |
},
|
415 |
+
"download_size": 2509009,
|
416 |
+
"dataset_size": 3899934,
|
417 |
+
"size_in_bytes": 6408943
|
|
|
418 |
},
|
419 |
"mnli_matched": {
|
420 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
mnli_mismatched/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
|
3 |
+
size 1257857
|
mnli_mismatched/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
|
3 |
+
size 1251152
|