Upload dataset_infos.json
Browse files- dataset_infos.json +38 -0
dataset_infos.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"loulely--glue_cola_processed": {
|
2 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
3 |
+
"citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
4 |
+
"homepage": "https://nyu-mll.github.io/CoLA/",
|
5 |
+
"license": "",
|
6 |
+
"features": {
|
7 |
+
"text": {
|
8 |
+
"dtype": "string",
|
9 |
+
"id": null,
|
10 |
+
"_type": "Value"
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"post_processed": null,
|
14 |
+
"supervised_keys": null,
|
15 |
+
"task_templates": null,
|
16 |
+
"builder_name": "glue",
|
17 |
+
"config_name": "cola",
|
18 |
+
"version": {
|
19 |
+
"version_str": "1.0.0",
|
20 |
+
"description": "",
|
21 |
+
"major": 1,
|
22 |
+
"minor": 0,
|
23 |
+
"patch": 0
|
24 |
+
},
|
25 |
+
"splits": {
|
26 |
+
"train": {
|
27 |
+
"name": "train",
|
28 |
+
"num_bytes": 372995,
|
29 |
+
"num_examples": 8551,
|
30 |
+
"dataset_name": "glue_cola_processed"
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"download_checksums": null,
|
34 |
+
"download_size": 191308,
|
35 |
+
"post_processing_size": null,
|
36 |
+
"dataset_size": 372995,
|
37 |
+
"size_in_bytes": 564303
|
38 |
+
}}
|