albertvillanova HF staff commited on
Commit
4c50f6d
1 Parent(s): bed0457

Add mnli data files

Browse files
README.md CHANGED
@@ -102,23 +102,23 @@ dataset_info:
102
  - name: idx
103
  dtype: int32
104
  splits:
105
- - name: test_matched
106
- num_bytes: 1854787
107
- num_examples: 9796
108
- - name: test_mismatched
109
- num_bytes: 1956866
110
- num_examples: 9847
111
  - name: train
112
- num_bytes: 74865118
113
  num_examples: 392702
114
  - name: validation_matched
115
- num_bytes: 1839926
116
  num_examples: 9815
117
  - name: validation_mismatched
118
- num_bytes: 1955384
119
  num_examples: 9832
120
- download_size: 312783507
121
- dataset_size: 82472081
 
 
 
 
 
 
122
  - config_name: mnli_matched
123
  features:
124
  - name: premise
@@ -352,6 +352,18 @@ configs:
352
  path: cola/validation-*
353
  - split: test
354
  path: cola/test-*
 
 
 
 
 
 
 
 
 
 
 
 
355
  - config_name: mrpc
356
  data_files:
357
  - split: train
 
102
  - name: idx
103
  dtype: int32
104
  splits:
 
 
 
 
 
 
105
  - name: train
106
+ num_bytes: 74619646
107
  num_examples: 392702
108
  - name: validation_matched
109
+ num_bytes: 1833783
110
  num_examples: 9815
111
  - name: validation_mismatched
112
+ num_bytes: 1949231
113
  num_examples: 9832
114
+ - name: test_matched
115
+ num_bytes: 1848654
116
+ num_examples: 9796
117
+ - name: test_mismatched
118
+ num_bytes: 1950703
119
+ num_examples: 9847
120
+ download_size: 57168425
121
+ dataset_size: 82202017
122
  - config_name: mnli_matched
123
  features:
124
  - name: premise
 
352
  path: cola/validation-*
353
  - split: test
354
  path: cola/test-*
355
+ - config_name: mnli
356
+ data_files:
357
+ - split: train
358
+ path: mnli/train-*
359
+ - split: validation_matched
360
+ path: mnli/validation_matched-*
361
+ - split: validation_mismatched
362
+ path: mnli/validation_mismatched-*
363
+ - split: test_matched
364
+ path: mnli/test_matched-*
365
+ - split: test_mismatched
366
+ path: mnli/test_mismatched-*
367
  - config_name: mrpc
368
  data_files:
369
  - split: train
dataset_infos.json CHANGED
@@ -290,40 +290,33 @@
290
  },
291
  "mnli": {
292
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
293
- "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
294
  "homepage": "http://www.nyu.edu/projects/bowman/multinli/",
295
  "license": "",
296
  "features": {
297
  "premise": {
298
  "dtype": "string",
299
- "id": null,
300
  "_type": "Value"
301
  },
302
  "hypothesis": {
303
  "dtype": "string",
304
- "id": null,
305
  "_type": "Value"
306
  },
307
  "label": {
308
- "num_classes": 3,
309
  "names": [
310
  "entailment",
311
  "neutral",
312
  "contradiction"
313
  ],
314
- "names_file": null,
315
- "id": null,
316
  "_type": "ClassLabel"
317
  },
318
  "idx": {
319
  "dtype": "int32",
320
- "id": null,
321
  "_type": "Value"
322
  }
323
  },
324
- "post_processed": null,
325
- "supervised_keys": null,
326
  "builder_name": "glue",
 
327
  "config_name": "mnli",
328
  "version": {
329
  "version_str": "1.0.0",
@@ -333,47 +326,40 @@
333
  "patch": 0
334
  },
335
  "splits": {
336
- "test_matched": {
337
- "name": "test_matched",
338
- "num_bytes": 1854787,
339
- "num_examples": 9796,
340
- "dataset_name": "glue"
341
- },
342
- "test_mismatched": {
343
- "name": "test_mismatched",
344
- "num_bytes": 1956866,
345
- "num_examples": 9847,
346
- "dataset_name": "glue"
347
- },
348
  "train": {
349
  "name": "train",
350
- "num_bytes": 74865118,
351
  "num_examples": 392702,
352
- "dataset_name": "glue"
353
  },
354
  "validation_matched": {
355
  "name": "validation_matched",
356
- "num_bytes": 1839926,
357
  "num_examples": 9815,
358
- "dataset_name": "glue"
359
  },
360
  "validation_mismatched": {
361
  "name": "validation_mismatched",
362
- "num_bytes": 1955384,
363
  "num_examples": 9832,
364
- "dataset_name": "glue"
365
- }
366
- },
367
- "download_checksums": {
368
- "https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
369
- "num_bytes": 312783507,
370
- "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
 
 
 
 
 
 
371
  }
372
  },
373
- "download_size": 312783507,
374
- "post_processing_size": null,
375
- "dataset_size": 82472081,
376
- "size_in_bytes": 395255588
377
  },
378
  "mnli_mismatched": {
379
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
290
  },
291
  "mnli": {
292
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
293
+ "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
294
  "homepage": "http://www.nyu.edu/projects/bowman/multinli/",
295
  "license": "",
296
  "features": {
297
  "premise": {
298
  "dtype": "string",
 
299
  "_type": "Value"
300
  },
301
  "hypothesis": {
302
  "dtype": "string",
 
303
  "_type": "Value"
304
  },
305
  "label": {
 
306
  "names": [
307
  "entailment",
308
  "neutral",
309
  "contradiction"
310
  ],
 
 
311
  "_type": "ClassLabel"
312
  },
313
  "idx": {
314
  "dtype": "int32",
 
315
  "_type": "Value"
316
  }
317
  },
 
 
318
  "builder_name": "glue",
319
+ "dataset_name": "glue",
320
  "config_name": "mnli",
321
  "version": {
322
  "version_str": "1.0.0",
 
326
  "patch": 0
327
  },
328
  "splits": {
 
 
 
 
 
 
 
 
 
 
 
 
329
  "train": {
330
  "name": "train",
331
+ "num_bytes": 74619646,
332
  "num_examples": 392702,
333
+ "dataset_name": null
334
  },
335
  "validation_matched": {
336
  "name": "validation_matched",
337
+ "num_bytes": 1833783,
338
  "num_examples": 9815,
339
+ "dataset_name": null
340
  },
341
  "validation_mismatched": {
342
  "name": "validation_mismatched",
343
+ "num_bytes": 1949231,
344
  "num_examples": 9832,
345
+ "dataset_name": null
346
+ },
347
+ "test_matched": {
348
+ "name": "test_matched",
349
+ "num_bytes": 1848654,
350
+ "num_examples": 9796,
351
+ "dataset_name": null
352
+ },
353
+ "test_mismatched": {
354
+ "name": "test_mismatched",
355
+ "num_bytes": 1950703,
356
+ "num_examples": 9847,
357
+ "dataset_name": null
358
  }
359
  },
360
+ "download_size": 57168425,
361
+ "dataset_size": 82202017,
362
+ "size_in_bytes": 139370442
 
363
  },
364
  "mnli_mismatched": {
365
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
mnli/test_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
+ size 1220119
mnli/test_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
+ size 1257857
mnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a4a5508b89b8fed2c6e81d2c47d00f4759050a7048c6cc5d95d31122ced3c1
3
+ size 52224361
mnli/validation_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
+ size 1214936
mnli/validation_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
+ size 1251152