Upload 2 files
Browse files- dataset_infos.json +875 -0
- glue.py +624 -0
dataset_infos.json
ADDED
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cola": {
|
3 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
4 |
+
"citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
5 |
+
"homepage": "https://nyu-mll.github.io/CoLA/",
|
6 |
+
"license": "",
|
7 |
+
"features": {
|
8 |
+
"sentence": {
|
9 |
+
"dtype": "string",
|
10 |
+
"id": null,
|
11 |
+
"_type": "Value"
|
12 |
+
},
|
13 |
+
"label": {
|
14 |
+
"num_classes": 2,
|
15 |
+
"names": [
|
16 |
+
"unacceptable",
|
17 |
+
"acceptable"
|
18 |
+
],
|
19 |
+
"names_file": null,
|
20 |
+
"id": null,
|
21 |
+
"_type": "ClassLabel"
|
22 |
+
},
|
23 |
+
"idx": {
|
24 |
+
"dtype": "int32",
|
25 |
+
"id": null,
|
26 |
+
"_type": "Value"
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"post_processed": null,
|
30 |
+
"supervised_keys": null,
|
31 |
+
"builder_name": "glue",
|
32 |
+
"config_name": "cola",
|
33 |
+
"version": {
|
34 |
+
"version_str": "1.0.0",
|
35 |
+
"description": "",
|
36 |
+
"major": 1,
|
37 |
+
"minor": 0,
|
38 |
+
"patch": 0
|
39 |
+
},
|
40 |
+
"splits": {
|
41 |
+
"test": {
|
42 |
+
"name": "test",
|
43 |
+
"num_bytes": 61049,
|
44 |
+
"num_examples": 1063,
|
45 |
+
"dataset_name": "glue"
|
46 |
+
},
|
47 |
+
"train": {
|
48 |
+
"name": "train",
|
49 |
+
"num_bytes": 489149,
|
50 |
+
"num_examples": 8551,
|
51 |
+
"dataset_name": "glue"
|
52 |
+
},
|
53 |
+
"validation": {
|
54 |
+
"name": "validation",
|
55 |
+
"num_bytes": 60850,
|
56 |
+
"num_examples": 1043,
|
57 |
+
"dataset_name": "glue"
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"download_checksums": {
|
61 |
+
"https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {
|
62 |
+
"num_bytes": 376971,
|
63 |
+
"checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"
|
64 |
+
}
|
65 |
+
},
|
66 |
+
"download_size": 376971,
|
67 |
+
"post_processing_size": null,
|
68 |
+
"dataset_size": 611048,
|
69 |
+
"size_in_bytes": 988019
|
70 |
+
},
|
71 |
+
"sst2": {
|
72 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
73 |
+
"citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
74 |
+
"homepage": "https://nlp.stanford.edu/sentiment/index.html",
|
75 |
+
"license": "",
|
76 |
+
"features": {
|
77 |
+
"sentence": {
|
78 |
+
"dtype": "string",
|
79 |
+
"id": null,
|
80 |
+
"_type": "Value"
|
81 |
+
},
|
82 |
+
"label": {
|
83 |
+
"num_classes": 2,
|
84 |
+
"names": [
|
85 |
+
"negative",
|
86 |
+
"positive"
|
87 |
+
],
|
88 |
+
"names_file": null,
|
89 |
+
"id": null,
|
90 |
+
"_type": "ClassLabel"
|
91 |
+
},
|
92 |
+
"idx": {
|
93 |
+
"dtype": "int32",
|
94 |
+
"id": null,
|
95 |
+
"_type": "Value"
|
96 |
+
}
|
97 |
+
},
|
98 |
+
"post_processed": null,
|
99 |
+
"supervised_keys": null,
|
100 |
+
"builder_name": "glue",
|
101 |
+
"config_name": "sst2",
|
102 |
+
"version": {
|
103 |
+
"version_str": "1.0.0",
|
104 |
+
"description": "",
|
105 |
+
"major": 1,
|
106 |
+
"minor": 0,
|
107 |
+
"patch": 0
|
108 |
+
},
|
109 |
+
"splits": {
|
110 |
+
"test": {
|
111 |
+
"name": "test",
|
112 |
+
"num_bytes": 217556,
|
113 |
+
"num_examples": 1821,
|
114 |
+
"dataset_name": "glue"
|
115 |
+
},
|
116 |
+
"train": {
|
117 |
+
"name": "train",
|
118 |
+
"num_bytes": 4715283,
|
119 |
+
"num_examples": 67349,
|
120 |
+
"dataset_name": "glue"
|
121 |
+
},
|
122 |
+
"validation": {
|
123 |
+
"name": "validation",
|
124 |
+
"num_bytes": 106692,
|
125 |
+
"num_examples": 872,
|
126 |
+
"dataset_name": "glue"
|
127 |
+
}
|
128 |
+
},
|
129 |
+
"download_checksums": {
|
130 |
+
"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {
|
131 |
+
"num_bytes": 7439277,
|
132 |
+
"checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"
|
133 |
+
}
|
134 |
+
},
|
135 |
+
"download_size": 7439277,
|
136 |
+
"post_processing_size": null,
|
137 |
+
"dataset_size": 5039531,
|
138 |
+
"size_in_bytes": 12478808
|
139 |
+
},
|
140 |
+
"mrpc": {
|
141 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
142 |
+
"citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
143 |
+
"homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
|
144 |
+
"license": "",
|
145 |
+
"features": {
|
146 |
+
"sentence1": {
|
147 |
+
"dtype": "string",
|
148 |
+
"id": null,
|
149 |
+
"_type": "Value"
|
150 |
+
},
|
151 |
+
"sentence2": {
|
152 |
+
"dtype": "string",
|
153 |
+
"id": null,
|
154 |
+
"_type": "Value"
|
155 |
+
},
|
156 |
+
"label": {
|
157 |
+
"num_classes": 2,
|
158 |
+
"names": [
|
159 |
+
"not_equivalent",
|
160 |
+
"equivalent"
|
161 |
+
],
|
162 |
+
"names_file": null,
|
163 |
+
"id": null,
|
164 |
+
"_type": "ClassLabel"
|
165 |
+
},
|
166 |
+
"idx": {
|
167 |
+
"dtype": "int32",
|
168 |
+
"id": null,
|
169 |
+
"_type": "Value"
|
170 |
+
}
|
171 |
+
},
|
172 |
+
"post_processed": null,
|
173 |
+
"supervised_keys": null,
|
174 |
+
"builder_name": "glue",
|
175 |
+
"config_name": "mrpc",
|
176 |
+
"version": {
|
177 |
+
"version_str": "1.0.0",
|
178 |
+
"description": "",
|
179 |
+
"major": 1,
|
180 |
+
"minor": 0,
|
181 |
+
"patch": 0
|
182 |
+
},
|
183 |
+
"splits": {
|
184 |
+
"test": {
|
185 |
+
"name": "test",
|
186 |
+
"num_bytes": 443498,
|
187 |
+
"num_examples": 1725,
|
188 |
+
"dataset_name": "glue"
|
189 |
+
},
|
190 |
+
"train": {
|
191 |
+
"name": "train",
|
192 |
+
"num_bytes": 946146,
|
193 |
+
"num_examples": 3668,
|
194 |
+
"dataset_name": "glue"
|
195 |
+
},
|
196 |
+
"validation": {
|
197 |
+
"name": "validation",
|
198 |
+
"num_bytes": 106142,
|
199 |
+
"num_examples": 408,
|
200 |
+
"dataset_name": "glue"
|
201 |
+
}
|
202 |
+
},
|
203 |
+
"download_checksums": {
|
204 |
+
"https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {
|
205 |
+
"num_bytes": 6222,
|
206 |
+
"checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"
|
207 |
+
},
|
208 |
+
"https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {
|
209 |
+
"num_bytes": 1047044,
|
210 |
+
"checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"
|
211 |
+
},
|
212 |
+
"https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {
|
213 |
+
"num_bytes": 441275,
|
214 |
+
"checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"
|
215 |
+
}
|
216 |
+
},
|
217 |
+
"download_size": 1494541,
|
218 |
+
"post_processing_size": null,
|
219 |
+
"dataset_size": 1495786,
|
220 |
+
"size_in_bytes": 2990327
|
221 |
+
},
|
222 |
+
"qqp": {
|
223 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
224 |
+
"citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
225 |
+
"homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
|
226 |
+
"license": "",
|
227 |
+
"features": {
|
228 |
+
"question1": {
|
229 |
+
"dtype": "string",
|
230 |
+
"id": null,
|
231 |
+
"_type": "Value"
|
232 |
+
},
|
233 |
+
"question2": {
|
234 |
+
"dtype": "string",
|
235 |
+
"id": null,
|
236 |
+
"_type": "Value"
|
237 |
+
},
|
238 |
+
"label": {
|
239 |
+
"num_classes": 2,
|
240 |
+
"names": [
|
241 |
+
"not_duplicate",
|
242 |
+
"duplicate"
|
243 |
+
],
|
244 |
+
"names_file": null,
|
245 |
+
"id": null,
|
246 |
+
"_type": "ClassLabel"
|
247 |
+
},
|
248 |
+
"idx": {
|
249 |
+
"dtype": "int32",
|
250 |
+
"id": null,
|
251 |
+
"_type": "Value"
|
252 |
+
}
|
253 |
+
},
|
254 |
+
"post_processed": null,
|
255 |
+
"supervised_keys": null,
|
256 |
+
"builder_name": "glue",
|
257 |
+
"config_name": "qqp",
|
258 |
+
"version": {
|
259 |
+
"version_str": "1.0.0",
|
260 |
+
"description": "",
|
261 |
+
"major": 1,
|
262 |
+
"minor": 0,
|
263 |
+
"patch": 0
|
264 |
+
},
|
265 |
+
"splits": {
|
266 |
+
"train": {
|
267 |
+
"name": "train",
|
268 |
+
"num_bytes": 50901116,
|
269 |
+
"num_examples": 363846,
|
270 |
+
"dataset_name": "glue"
|
271 |
+
},
|
272 |
+
"validation": {
|
273 |
+
"name": "validation",
|
274 |
+
"num_bytes": 5653794,
|
275 |
+
"num_examples": 40430,
|
276 |
+
"dataset_name": "glue"
|
277 |
+
},
|
278 |
+
"test": {
|
279 |
+
"name": "test",
|
280 |
+
"num_bytes": 55171431,
|
281 |
+
"num_examples": 390965,
|
282 |
+
"dataset_name": "glue"
|
283 |
+
}
|
284 |
+
},
|
285 |
+
"download_checksums": {
|
286 |
+
"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {
|
287 |
+
"num_bytes": 41696084,
|
288 |
+
"checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"
|
289 |
+
}
|
290 |
+
},
|
291 |
+
"download_size": 41696084,
|
292 |
+
"post_processing_size": null,
|
293 |
+
"dataset_size": 111726341,
|
294 |
+
"size_in_bytes": 153422425
|
295 |
+
},
|
296 |
+
"stsb": {
|
297 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
298 |
+
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
299 |
+
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
300 |
+
"license": "",
|
301 |
+
"features": {
|
302 |
+
"sentence1": {
|
303 |
+
"dtype": "string",
|
304 |
+
"id": null,
|
305 |
+
"_type": "Value"
|
306 |
+
},
|
307 |
+
"sentence2": {
|
308 |
+
"dtype": "string",
|
309 |
+
"id": null,
|
310 |
+
"_type": "Value"
|
311 |
+
},
|
312 |
+
"label": {
|
313 |
+
"dtype": "float32",
|
314 |
+
"id": null,
|
315 |
+
"_type": "Value"
|
316 |
+
},
|
317 |
+
"idx": {
|
318 |
+
"dtype": "int32",
|
319 |
+
"id": null,
|
320 |
+
"_type": "Value"
|
321 |
+
}
|
322 |
+
},
|
323 |
+
"post_processed": null,
|
324 |
+
"supervised_keys": null,
|
325 |
+
"builder_name": "glue",
|
326 |
+
"config_name": "stsb",
|
327 |
+
"version": {
|
328 |
+
"version_str": "1.0.0",
|
329 |
+
"description": "",
|
330 |
+
"major": 1,
|
331 |
+
"minor": 0,
|
332 |
+
"patch": 0
|
333 |
+
},
|
334 |
+
"splits": {
|
335 |
+
"test": {
|
336 |
+
"name": "test",
|
337 |
+
"num_bytes": 170847,
|
338 |
+
"num_examples": 1379,
|
339 |
+
"dataset_name": "glue"
|
340 |
+
},
|
341 |
+
"train": {
|
342 |
+
"name": "train",
|
343 |
+
"num_bytes": 758394,
|
344 |
+
"num_examples": 5749,
|
345 |
+
"dataset_name": "glue"
|
346 |
+
},
|
347 |
+
"validation": {
|
348 |
+
"name": "validation",
|
349 |
+
"num_bytes": 217012,
|
350 |
+
"num_examples": 1500,
|
351 |
+
"dataset_name": "glue"
|
352 |
+
}
|
353 |
+
},
|
354 |
+
"download_checksums": {
|
355 |
+
"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {
|
356 |
+
"num_bytes": 802872,
|
357 |
+
"checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"
|
358 |
+
}
|
359 |
+
},
|
360 |
+
"download_size": 802872,
|
361 |
+
"post_processing_size": null,
|
362 |
+
"dataset_size": 1146253,
|
363 |
+
"size_in_bytes": 1949125
|
364 |
+
},
|
365 |
+
"mnli": {
|
366 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
367 |
+
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
368 |
+
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
369 |
+
"license": "",
|
370 |
+
"features": {
|
371 |
+
"premise": {
|
372 |
+
"dtype": "string",
|
373 |
+
"id": null,
|
374 |
+
"_type": "Value"
|
375 |
+
},
|
376 |
+
"hypothesis": {
|
377 |
+
"dtype": "string",
|
378 |
+
"id": null,
|
379 |
+
"_type": "Value"
|
380 |
+
},
|
381 |
+
"label": {
|
382 |
+
"num_classes": 3,
|
383 |
+
"names": [
|
384 |
+
"entailment",
|
385 |
+
"neutral",
|
386 |
+
"contradiction"
|
387 |
+
],
|
388 |
+
"names_file": null,
|
389 |
+
"id": null,
|
390 |
+
"_type": "ClassLabel"
|
391 |
+
},
|
392 |
+
"idx": {
|
393 |
+
"dtype": "int32",
|
394 |
+
"id": null,
|
395 |
+
"_type": "Value"
|
396 |
+
}
|
397 |
+
},
|
398 |
+
"post_processed": null,
|
399 |
+
"supervised_keys": null,
|
400 |
+
"builder_name": "glue",
|
401 |
+
"config_name": "mnli",
|
402 |
+
"version": {
|
403 |
+
"version_str": "1.0.0",
|
404 |
+
"description": "",
|
405 |
+
"major": 1,
|
406 |
+
"minor": 0,
|
407 |
+
"patch": 0
|
408 |
+
},
|
409 |
+
"splits": {
|
410 |
+
"test_matched": {
|
411 |
+
"name": "test_matched",
|
412 |
+
"num_bytes": 1854787,
|
413 |
+
"num_examples": 9796,
|
414 |
+
"dataset_name": "glue"
|
415 |
+
},
|
416 |
+
"test_mismatched": {
|
417 |
+
"name": "test_mismatched",
|
418 |
+
"num_bytes": 1956866,
|
419 |
+
"num_examples": 9847,
|
420 |
+
"dataset_name": "glue"
|
421 |
+
},
|
422 |
+
"train": {
|
423 |
+
"name": "train",
|
424 |
+
"num_bytes": 74865118,
|
425 |
+
"num_examples": 392702,
|
426 |
+
"dataset_name": "glue"
|
427 |
+
},
|
428 |
+
"validation_matched": {
|
429 |
+
"name": "validation_matched",
|
430 |
+
"num_bytes": 1839926,
|
431 |
+
"num_examples": 9815,
|
432 |
+
"dataset_name": "glue"
|
433 |
+
},
|
434 |
+
"validation_mismatched": {
|
435 |
+
"name": "validation_mismatched",
|
436 |
+
"num_bytes": 1955384,
|
437 |
+
"num_examples": 9832,
|
438 |
+
"dataset_name": "glue"
|
439 |
+
}
|
440 |
+
},
|
441 |
+
"download_checksums": {
|
442 |
+
"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
|
443 |
+
"num_bytes": 312783507,
|
444 |
+
"checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
|
445 |
+
}
|
446 |
+
},
|
447 |
+
"download_size": 312783507,
|
448 |
+
"post_processing_size": null,
|
449 |
+
"dataset_size": 82472081,
|
450 |
+
"size_in_bytes": 395255588
|
451 |
+
},
|
452 |
+
"mnli_mismatched": {
|
453 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
454 |
+
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
455 |
+
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
456 |
+
"license": "",
|
457 |
+
"features": {
|
458 |
+
"premise": {
|
459 |
+
"dtype": "string",
|
460 |
+
"id": null,
|
461 |
+
"_type": "Value"
|
462 |
+
},
|
463 |
+
"hypothesis": {
|
464 |
+
"dtype": "string",
|
465 |
+
"id": null,
|
466 |
+
"_type": "Value"
|
467 |
+
},
|
468 |
+
"label": {
|
469 |
+
"num_classes": 3,
|
470 |
+
"names": [
|
471 |
+
"entailment",
|
472 |
+
"neutral",
|
473 |
+
"contradiction"
|
474 |
+
],
|
475 |
+
"names_file": null,
|
476 |
+
"id": null,
|
477 |
+
"_type": "ClassLabel"
|
478 |
+
},
|
479 |
+
"idx": {
|
480 |
+
"dtype": "int32",
|
481 |
+
"id": null,
|
482 |
+
"_type": "Value"
|
483 |
+
}
|
484 |
+
},
|
485 |
+
"post_processed": null,
|
486 |
+
"supervised_keys": null,
|
487 |
+
"builder_name": "glue",
|
488 |
+
"config_name": "mnli_mismatched",
|
489 |
+
"version": {
|
490 |
+
"version_str": "1.0.0",
|
491 |
+
"description": "",
|
492 |
+
"major": 1,
|
493 |
+
"minor": 0,
|
494 |
+
"patch": 0
|
495 |
+
},
|
496 |
+
"splits": {
|
497 |
+
"test": {
|
498 |
+
"name": "test",
|
499 |
+
"num_bytes": 1956866,
|
500 |
+
"num_examples": 9847,
|
501 |
+
"dataset_name": "glue"
|
502 |
+
},
|
503 |
+
"validation": {
|
504 |
+
"name": "validation",
|
505 |
+
"num_bytes": 1955384,
|
506 |
+
"num_examples": 9832,
|
507 |
+
"dataset_name": "glue"
|
508 |
+
}
|
509 |
+
},
|
510 |
+
"download_checksums": {
|
511 |
+
"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
|
512 |
+
"num_bytes": 312783507,
|
513 |
+
"checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
|
514 |
+
}
|
515 |
+
},
|
516 |
+
"download_size": 312783507,
|
517 |
+
"post_processing_size": null,
|
518 |
+
"dataset_size": 3912250,
|
519 |
+
"size_in_bytes": 316695757
|
520 |
+
},
|
521 |
+
"mnli_matched": {
|
522 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
523 |
+
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
524 |
+
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
525 |
+
"license": "",
|
526 |
+
"features": {
|
527 |
+
"premise": {
|
528 |
+
"dtype": "string",
|
529 |
+
"id": null,
|
530 |
+
"_type": "Value"
|
531 |
+
},
|
532 |
+
"hypothesis": {
|
533 |
+
"dtype": "string",
|
534 |
+
"id": null,
|
535 |
+
"_type": "Value"
|
536 |
+
},
|
537 |
+
"label": {
|
538 |
+
"num_classes": 3,
|
539 |
+
"names": [
|
540 |
+
"entailment",
|
541 |
+
"neutral",
|
542 |
+
"contradiction"
|
543 |
+
],
|
544 |
+
"names_file": null,
|
545 |
+
"id": null,
|
546 |
+
"_type": "ClassLabel"
|
547 |
+
},
|
548 |
+
"idx": {
|
549 |
+
"dtype": "int32",
|
550 |
+
"id": null,
|
551 |
+
"_type": "Value"
|
552 |
+
}
|
553 |
+
},
|
554 |
+
"post_processed": null,
|
555 |
+
"supervised_keys": null,
|
556 |
+
"builder_name": "glue",
|
557 |
+
"config_name": "mnli_matched",
|
558 |
+
"version": {
|
559 |
+
"version_str": "1.0.0",
|
560 |
+
"description": "",
|
561 |
+
"major": 1,
|
562 |
+
"minor": 0,
|
563 |
+
"patch": 0
|
564 |
+
},
|
565 |
+
"splits": {
|
566 |
+
"test": {
|
567 |
+
"name": "test",
|
568 |
+
"num_bytes": 1854787,
|
569 |
+
"num_examples": 9796,
|
570 |
+
"dataset_name": "glue"
|
571 |
+
},
|
572 |
+
"validation": {
|
573 |
+
"name": "validation",
|
574 |
+
"num_bytes": 1839926,
|
575 |
+
"num_examples": 9815,
|
576 |
+
"dataset_name": "glue"
|
577 |
+
}
|
578 |
+
},
|
579 |
+
"download_checksums": {
|
580 |
+
"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {
|
581 |
+
"num_bytes": 312783507,
|
582 |
+
"checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"
|
583 |
+
}
|
584 |
+
},
|
585 |
+
"download_size": 312783507,
|
586 |
+
"post_processing_size": null,
|
587 |
+
"dataset_size": 3694713,
|
588 |
+
"size_in_bytes": 316478220
|
589 |
+
},
|
590 |
+
"qnli": {
|
591 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
592 |
+
"citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
593 |
+
"homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
|
594 |
+
"license": "",
|
595 |
+
"features": {
|
596 |
+
"question": {
|
597 |
+
"dtype": "string",
|
598 |
+
"id": null,
|
599 |
+
"_type": "Value"
|
600 |
+
},
|
601 |
+
"sentence": {
|
602 |
+
"dtype": "string",
|
603 |
+
"id": null,
|
604 |
+
"_type": "Value"
|
605 |
+
},
|
606 |
+
"label": {
|
607 |
+
"num_classes": 2,
|
608 |
+
"names": [
|
609 |
+
"entailment",
|
610 |
+
"not_entailment"
|
611 |
+
],
|
612 |
+
"names_file": null,
|
613 |
+
"id": null,
|
614 |
+
"_type": "ClassLabel"
|
615 |
+
},
|
616 |
+
"idx": {
|
617 |
+
"dtype": "int32",
|
618 |
+
"id": null,
|
619 |
+
"_type": "Value"
|
620 |
+
}
|
621 |
+
},
|
622 |
+
"post_processed": null,
|
623 |
+
"supervised_keys": null,
|
624 |
+
"builder_name": "glue",
|
625 |
+
"config_name": "qnli",
|
626 |
+
"version": {
|
627 |
+
"version_str": "1.0.0",
|
628 |
+
"description": "",
|
629 |
+
"major": 1,
|
630 |
+
"minor": 0,
|
631 |
+
"patch": 0
|
632 |
+
},
|
633 |
+
"splits": {
|
634 |
+
"test": {
|
635 |
+
"name": "test",
|
636 |
+
"num_bytes": 1376516,
|
637 |
+
"num_examples": 5463,
|
638 |
+
"dataset_name": "glue"
|
639 |
+
},
|
640 |
+
"train": {
|
641 |
+
"name": "train",
|
642 |
+
"num_bytes": 25677924,
|
643 |
+
"num_examples": 104743,
|
644 |
+
"dataset_name": "glue"
|
645 |
+
},
|
646 |
+
"validation": {
|
647 |
+
"name": "validation",
|
648 |
+
"num_bytes": 1371727,
|
649 |
+
"num_examples": 5463,
|
650 |
+
"dataset_name": "glue"
|
651 |
+
}
|
652 |
+
},
|
653 |
+
"download_checksums": {
|
654 |
+
"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {
|
655 |
+
"num_bytes": 10627589,
|
656 |
+
"checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"
|
657 |
+
}
|
658 |
+
},
|
659 |
+
"download_size": 10627589,
|
660 |
+
"post_processing_size": null,
|
661 |
+
"dataset_size": 28426167,
|
662 |
+
"size_in_bytes": 39053756
|
663 |
+
},
|
664 |
+
"rte": {
|
665 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
666 |
+
"citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
667 |
+
"homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
|
668 |
+
"license": "",
|
669 |
+
"features": {
|
670 |
+
"sentence1": {
|
671 |
+
"dtype": "string",
|
672 |
+
"id": null,
|
673 |
+
"_type": "Value"
|
674 |
+
},
|
675 |
+
"sentence2": {
|
676 |
+
"dtype": "string",
|
677 |
+
"id": null,
|
678 |
+
"_type": "Value"
|
679 |
+
},
|
680 |
+
"label": {
|
681 |
+
"num_classes": 2,
|
682 |
+
"names": [
|
683 |
+
"entailment",
|
684 |
+
"not_entailment"
|
685 |
+
],
|
686 |
+
"names_file": null,
|
687 |
+
"id": null,
|
688 |
+
"_type": "ClassLabel"
|
689 |
+
},
|
690 |
+
"idx": {
|
691 |
+
"dtype": "int32",
|
692 |
+
"id": null,
|
693 |
+
"_type": "Value"
|
694 |
+
}
|
695 |
+
},
|
696 |
+
"post_processed": null,
|
697 |
+
"supervised_keys": null,
|
698 |
+
"builder_name": "glue",
|
699 |
+
"config_name": "rte",
|
700 |
+
"version": {
|
701 |
+
"version_str": "1.0.0",
|
702 |
+
"description": "",
|
703 |
+
"major": 1,
|
704 |
+
"minor": 0,
|
705 |
+
"patch": 0
|
706 |
+
},
|
707 |
+
"splits": {
|
708 |
+
"test": {
|
709 |
+
"name": "test",
|
710 |
+
"num_bytes": 975936,
|
711 |
+
"num_examples": 3000,
|
712 |
+
"dataset_name": "glue"
|
713 |
+
},
|
714 |
+
"train": {
|
715 |
+
"name": "train",
|
716 |
+
"num_bytes": 848888,
|
717 |
+
"num_examples": 2490,
|
718 |
+
"dataset_name": "glue"
|
719 |
+
},
|
720 |
+
"validation": {
|
721 |
+
"name": "validation",
|
722 |
+
"num_bytes": 90911,
|
723 |
+
"num_examples": 277,
|
724 |
+
"dataset_name": "glue"
|
725 |
+
}
|
726 |
+
},
|
727 |
+
"download_checksums": {
|
728 |
+
"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {
|
729 |
+
"num_bytes": 697150,
|
730 |
+
"checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"
|
731 |
+
}
|
732 |
+
},
|
733 |
+
"download_size": 697150,
|
734 |
+
"post_processing_size": null,
|
735 |
+
"dataset_size": 1915735,
|
736 |
+
"size_in_bytes": 2612885
|
737 |
+
},
|
738 |
+
"wnli": {
|
739 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
740 |
+
"citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
741 |
+
"homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
|
742 |
+
"license": "",
|
743 |
+
"features": {
|
744 |
+
"sentence1": {
|
745 |
+
"dtype": "string",
|
746 |
+
"id": null,
|
747 |
+
"_type": "Value"
|
748 |
+
},
|
749 |
+
"sentence2": {
|
750 |
+
"dtype": "string",
|
751 |
+
"id": null,
|
752 |
+
"_type": "Value"
|
753 |
+
},
|
754 |
+
"label": {
|
755 |
+
"num_classes": 2,
|
756 |
+
"names": [
|
757 |
+
"not_entailment",
|
758 |
+
"entailment"
|
759 |
+
],
|
760 |
+
"names_file": null,
|
761 |
+
"id": null,
|
762 |
+
"_type": "ClassLabel"
|
763 |
+
},
|
764 |
+
"idx": {
|
765 |
+
"dtype": "int32",
|
766 |
+
"id": null,
|
767 |
+
"_type": "Value"
|
768 |
+
}
|
769 |
+
},
|
770 |
+
"post_processed": null,
|
771 |
+
"supervised_keys": null,
|
772 |
+
"builder_name": "glue",
|
773 |
+
"config_name": "wnli",
|
774 |
+
"version": {
|
775 |
+
"version_str": "1.0.0",
|
776 |
+
"description": "",
|
777 |
+
"major": 1,
|
778 |
+
"minor": 0,
|
779 |
+
"patch": 0
|
780 |
+
},
|
781 |
+
"splits": {
|
782 |
+
"test": {
|
783 |
+
"name": "test",
|
784 |
+
"num_bytes": 37992,
|
785 |
+
"num_examples": 146,
|
786 |
+
"dataset_name": "glue"
|
787 |
+
},
|
788 |
+
"train": {
|
789 |
+
"name": "train",
|
790 |
+
"num_bytes": 107517,
|
791 |
+
"num_examples": 635,
|
792 |
+
"dataset_name": "glue"
|
793 |
+
},
|
794 |
+
"validation": {
|
795 |
+
"name": "validation",
|
796 |
+
"num_bytes": 12215,
|
797 |
+
"num_examples": 71,
|
798 |
+
"dataset_name": "glue"
|
799 |
+
}
|
800 |
+
},
|
801 |
+
"download_checksums": {
|
802 |
+
"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {
|
803 |
+
"num_bytes": 28999,
|
804 |
+
"checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"
|
805 |
+
}
|
806 |
+
},
|
807 |
+
"download_size": 28999,
|
808 |
+
"post_processing_size": null,
|
809 |
+
"dataset_size": 157724,
|
810 |
+
"size_in_bytes": 186723
|
811 |
+
},
|
812 |
+
"ax": {
|
813 |
+
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
814 |
+
"citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
|
815 |
+
"homepage": "https://gluebenchmark.com/diagnostics",
|
816 |
+
"license": "",
|
817 |
+
"features": {
|
818 |
+
"premise": {
|
819 |
+
"dtype": "string",
|
820 |
+
"id": null,
|
821 |
+
"_type": "Value"
|
822 |
+
},
|
823 |
+
"hypothesis": {
|
824 |
+
"dtype": "string",
|
825 |
+
"id": null,
|
826 |
+
"_type": "Value"
|
827 |
+
},
|
828 |
+
"label": {
|
829 |
+
"num_classes": 3,
|
830 |
+
"names": [
|
831 |
+
"entailment",
|
832 |
+
"neutral",
|
833 |
+
"contradiction"
|
834 |
+
],
|
835 |
+
"names_file": null,
|
836 |
+
"id": null,
|
837 |
+
"_type": "ClassLabel"
|
838 |
+
},
|
839 |
+
"idx": {
|
840 |
+
"dtype": "int32",
|
841 |
+
"id": null,
|
842 |
+
"_type": "Value"
|
843 |
+
}
|
844 |
+
},
|
845 |
+
"post_processed": null,
|
846 |
+
"supervised_keys": null,
|
847 |
+
"builder_name": "glue",
|
848 |
+
"config_name": "ax",
|
849 |
+
"version": {
|
850 |
+
"version_str": "1.0.0",
|
851 |
+
"description": "",
|
852 |
+
"major": 1,
|
853 |
+
"minor": 0,
|
854 |
+
"patch": 0
|
855 |
+
},
|
856 |
+
"splits": {
|
857 |
+
"test": {
|
858 |
+
"name": "test",
|
859 |
+
"num_bytes": 238392,
|
860 |
+
"num_examples": 1104,
|
861 |
+
"dataset_name": "glue"
|
862 |
+
}
|
863 |
+
},
|
864 |
+
"download_checksums": {
|
865 |
+
"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {
|
866 |
+
"num_bytes": 222257,
|
867 |
+
"checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"
|
868 |
+
}
|
869 |
+
},
|
870 |
+
"download_size": 222257,
|
871 |
+
"post_processing_size": null,
|
872 |
+
"dataset_size": 238392,
|
873 |
+
"size_in_bytes": 460649
|
874 |
+
}
|
875 |
+
}
|
glue.py
ADDED
@@ -0,0 +1,624 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
"""The General Language Understanding Evaluation (GLUE) benchmark."""
|
18 |
+
|
19 |
+
import csv
|
20 |
+
import os
|
21 |
+
import textwrap
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
|
25 |
+
import datasets
|
26 |
+
|
27 |
+
_GLUE_CITATION = """\
|
28 |
+
@inproceedings{wang2019glue,
|
29 |
+
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
|
30 |
+
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
|
31 |
+
note={In the Proceedings of ICLR.},
|
32 |
+
year={2019}
|
33 |
+
}
|
34 |
+
"""
|
35 |
+
|
36 |
+
_GLUE_DESCRIPTION = """\
|
37 |
+
GLUE, the General Language Understanding Evaluation benchmark
|
38 |
+
(https://gluebenchmark.com/) is a collection of resources for training,
|
39 |
+
evaluating, and analyzing natural language understanding systems.
|
40 |
+
"""
|
41 |
+
|
42 |
+
_MRPC_DEV_IDS = "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv"
|
43 |
+
_MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
|
44 |
+
_MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
|
45 |
+
|
46 |
+
_MNLI_BASE_KWARGS = dict(
|
47 |
+
text_features={
|
48 |
+
"premise": "sentence1",
|
49 |
+
"hypothesis": "sentence2",
|
50 |
+
},
|
51 |
+
label_classes=["entailment", "neutral", "contradiction"],
|
52 |
+
label_column="gold_label",
|
53 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
|
54 |
+
data_dir="MNLI",
|
55 |
+
citation=textwrap.dedent(
|
56 |
+
"""\
|
57 |
+
@InProceedings{N18-1101,
|
58 |
+
author = "Williams, Adina
|
59 |
+
and Nangia, Nikita
|
60 |
+
and Bowman, Samuel",
|
61 |
+
title = "A Broad-Coverage Challenge Corpus for
|
62 |
+
Sentence Understanding through Inference",
|
63 |
+
booktitle = "Proceedings of the 2018 Conference of
|
64 |
+
the North American Chapter of the
|
65 |
+
Association for Computational Linguistics:
|
66 |
+
Human Language Technologies, Volume 1 (Long
|
67 |
+
Papers)",
|
68 |
+
year = "2018",
|
69 |
+
publisher = "Association for Computational Linguistics",
|
70 |
+
pages = "1112--1122",
|
71 |
+
location = "New Orleans, Louisiana",
|
72 |
+
url = "http://aclweb.org/anthology/N18-1101"
|
73 |
+
}
|
74 |
+
@article{bowman2015large,
|
75 |
+
title={A large annotated corpus for learning natural language inference},
|
76 |
+
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
|
77 |
+
journal={arXiv preprint arXiv:1508.05326},
|
78 |
+
year={2015}
|
79 |
+
}"""
|
80 |
+
),
|
81 |
+
url="http://www.nyu.edu/projects/bowman/multinli/",
|
82 |
+
)
|
83 |
+
|
84 |
+
|
85 |
+
class GlueConfig(datasets.BuilderConfig):
|
86 |
+
"""BuilderConfig for GLUE."""
|
87 |
+
|
88 |
+
def __init__(
|
89 |
+
self,
|
90 |
+
text_features,
|
91 |
+
label_column,
|
92 |
+
data_url,
|
93 |
+
data_dir,
|
94 |
+
citation,
|
95 |
+
url,
|
96 |
+
label_classes=None,
|
97 |
+
process_label=lambda x: x,
|
98 |
+
**kwargs,
|
99 |
+
):
|
100 |
+
"""BuilderConfig for GLUE.
|
101 |
+
Args:
|
102 |
+
text_features: `dict[string, string]`, map from the name of the feature
|
103 |
+
dict for each text field to the name of the column in the tsv file
|
104 |
+
label_column: `string`, name of the column in the tsv file corresponding
|
105 |
+
to the label
|
106 |
+
data_url: `string`, url to download the zip file from
|
107 |
+
data_dir: `string`, the path to the folder containing the tsv files in the
|
108 |
+
downloaded zip
|
109 |
+
citation: `string`, citation for the data set
|
110 |
+
url: `string`, url for information about the data set
|
111 |
+
label_classes: `list[string]`, the list of classes if the label is
|
112 |
+
categorical. If not provided, then the label will be of type
|
113 |
+
`datasets.Value('float32')`.
|
114 |
+
process_label: `Function[string, any]`, function taking in the raw value
|
115 |
+
of the label and processing it to the form required by the label feature
|
116 |
+
**kwargs: keyword arguments forwarded to super.
|
117 |
+
"""
|
118 |
+
super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
119 |
+
self.text_features = text_features
|
120 |
+
self.label_column = label_column
|
121 |
+
self.label_classes = label_classes
|
122 |
+
self.data_url = data_url
|
123 |
+
self.data_dir = data_dir
|
124 |
+
self.citation = citation
|
125 |
+
self.url = url
|
126 |
+
self.process_label = process_label
|
127 |
+
|
128 |
+
|
129 |
+
class Glue(datasets.GeneratorBasedBuilder):
|
130 |
+
"""The General Language Understanding Evaluation (GLUE) benchmark."""
|
131 |
+
|
132 |
+
BUILDER_CONFIGS = [
|
133 |
+
GlueConfig(
|
134 |
+
name="cola",
|
135 |
+
description=textwrap.dedent(
|
136 |
+
"""\
|
137 |
+
The Corpus of Linguistic Acceptability consists of English
|
138 |
+
acceptability judgments drawn from books and journal articles on
|
139 |
+
linguistic theory. Each example is a sequence of words annotated
|
140 |
+
with whether it is a grammatical English sentence."""
|
141 |
+
),
|
142 |
+
text_features={"sentence": "sentence"},
|
143 |
+
label_classes=["unacceptable", "acceptable"],
|
144 |
+
label_column="is_acceptable",
|
145 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
|
146 |
+
data_dir="CoLA",
|
147 |
+
citation=textwrap.dedent(
|
148 |
+
"""\
|
149 |
+
@article{warstadt2018neural,
|
150 |
+
title={Neural Network Acceptability Judgments},
|
151 |
+
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
|
152 |
+
journal={arXiv preprint arXiv:1805.12471},
|
153 |
+
year={2018}
|
154 |
+
}"""
|
155 |
+
),
|
156 |
+
url="https://nyu-mll.github.io/CoLA/",
|
157 |
+
),
|
158 |
+
GlueConfig(
|
159 |
+
name="sst2",
|
160 |
+
description=textwrap.dedent(
|
161 |
+
"""\
|
162 |
+
The Stanford Sentiment Treebank consists of sentences from movie reviews and
|
163 |
+
human annotations of their sentiment. The task is to predict the sentiment of a
|
164 |
+
given sentence. We use the two-way (positive/negative) class split, and use only
|
165 |
+
sentence-level labels."""
|
166 |
+
),
|
167 |
+
text_features={"sentence": "sentence"},
|
168 |
+
label_classes=["negative", "positive"],
|
169 |
+
label_column="label",
|
170 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
|
171 |
+
data_dir="SST-2",
|
172 |
+
citation=textwrap.dedent(
|
173 |
+
"""\
|
174 |
+
@inproceedings{socher2013recursive,
|
175 |
+
title={Recursive deep models for semantic compositionality over a sentiment treebank},
|
176 |
+
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
|
177 |
+
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
|
178 |
+
pages={1631--1642},
|
179 |
+
year={2013}
|
180 |
+
}"""
|
181 |
+
),
|
182 |
+
url="https://datasets.stanford.edu/sentiment/index.html",
|
183 |
+
),
|
184 |
+
GlueConfig(
|
185 |
+
name="mrpc",
|
186 |
+
description=textwrap.dedent(
|
187 |
+
"""\
|
188 |
+
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
|
189 |
+
sentence pairs automatically extracted from online news sources, with human annotations
|
190 |
+
for whether the sentences in the pair are semantically equivalent."""
|
191 |
+
), # pylint: disable=line-too-long
|
192 |
+
text_features={"sentence1": "", "sentence2": ""},
|
193 |
+
label_classes=["not_equivalent", "equivalent"],
|
194 |
+
label_column="Quality",
|
195 |
+
data_url="", # MRPC isn't hosted by GLUE.
|
196 |
+
data_dir="MRPC",
|
197 |
+
citation=textwrap.dedent(
|
198 |
+
"""\
|
199 |
+
@inproceedings{dolan2005automatically,
|
200 |
+
title={Automatically constructing a corpus of sentential paraphrases},
|
201 |
+
author={Dolan, William B and Brockett, Chris},
|
202 |
+
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
|
203 |
+
year={2005}
|
204 |
+
}"""
|
205 |
+
),
|
206 |
+
url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
|
207 |
+
),
|
208 |
+
GlueConfig(
|
209 |
+
name="qqp",
|
210 |
+
description=textwrap.dedent(
|
211 |
+
"""\
|
212 |
+
The Quora Question Pairs2 dataset is a collection of question pairs from the
|
213 |
+
community question-answering website Quora. The task is to determine whether a
|
214 |
+
pair of questions are semantically equivalent."""
|
215 |
+
),
|
216 |
+
text_features={
|
217 |
+
"question1": "question1",
|
218 |
+
"question2": "question2",
|
219 |
+
},
|
220 |
+
label_classes=["not_duplicate", "duplicate"],
|
221 |
+
label_column="is_duplicate",
|
222 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
|
223 |
+
data_dir="QQP",
|
224 |
+
citation=textwrap.dedent(
|
225 |
+
"""\
|
226 |
+
@online{WinNT,
|
227 |
+
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
|
228 |
+
title = {First Quora Dataset Release: Question Pairs},
|
229 |
+
year = {2017},
|
230 |
+
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
|
231 |
+
urldate = {2019-04-03}
|
232 |
+
}"""
|
233 |
+
),
|
234 |
+
url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
|
235 |
+
),
|
236 |
+
GlueConfig(
|
237 |
+
name="stsb",
|
238 |
+
description=textwrap.dedent(
|
239 |
+
"""\
|
240 |
+
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
|
241 |
+
sentence pairs drawn from news headlines, video and image captions, and natural
|
242 |
+
language inference data. Each pair is human-annotated with a similarity score
|
243 |
+
from 1 to 5."""
|
244 |
+
),
|
245 |
+
text_features={
|
246 |
+
"sentence1": "sentence1",
|
247 |
+
"sentence2": "sentence2",
|
248 |
+
},
|
249 |
+
label_column="score",
|
250 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
|
251 |
+
data_dir="STS-B",
|
252 |
+
citation=textwrap.dedent(
|
253 |
+
"""\
|
254 |
+
@article{cer2017semeval,
|
255 |
+
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
|
256 |
+
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
|
257 |
+
journal={arXiv preprint arXiv:1708.00055},
|
258 |
+
year={2017}
|
259 |
+
}"""
|
260 |
+
),
|
261 |
+
url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
262 |
+
process_label=np.float32,
|
263 |
+
),
|
264 |
+
GlueConfig(
|
265 |
+
name="mnli",
|
266 |
+
description=textwrap.dedent(
|
267 |
+
"""\
|
268 |
+
The Multi-Genre Natural Language Inference Corpus is a crowdsourced
|
269 |
+
collection of sentence pairs with textual entailment annotations. Given a premise sentence
|
270 |
+
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
|
271 |
+
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
|
272 |
+
gathered from ten different sources, including transcribed speech, fiction, and government reports.
|
273 |
+
We use the standard test set, for which we obtained private labels from the authors, and evaluate
|
274 |
+
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
|
275 |
+
the SNLI corpus as 550k examples of auxiliary training data."""
|
276 |
+
),
|
277 |
+
**_MNLI_BASE_KWARGS,
|
278 |
+
),
|
279 |
+
GlueConfig(
|
280 |
+
name="mnli_mismatched",
|
281 |
+
description=textwrap.dedent(
|
282 |
+
"""\
|
283 |
+
The mismatched validation and test splits from MNLI.
|
284 |
+
See the "mnli" BuilderConfig for additional information."""
|
285 |
+
),
|
286 |
+
**_MNLI_BASE_KWARGS,
|
287 |
+
),
|
288 |
+
GlueConfig(
|
289 |
+
name="mnli_matched",
|
290 |
+
description=textwrap.dedent(
|
291 |
+
"""\
|
292 |
+
The matched validation and test splits from MNLI.
|
293 |
+
See the "mnli" BuilderConfig for additional information."""
|
294 |
+
),
|
295 |
+
**_MNLI_BASE_KWARGS,
|
296 |
+
),
|
297 |
+
GlueConfig(
|
298 |
+
name="qnli",
|
299 |
+
description=textwrap.dedent(
|
300 |
+
"""\
|
301 |
+
The Stanford Question Answering Dataset is a question-answering
|
302 |
+
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
|
303 |
+
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
|
304 |
+
convert the task into sentence pair classification by forming a pair between each question and each
|
305 |
+
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
|
306 |
+
question and the context sentence. The task is to determine whether the context sentence contains
|
307 |
+
the answer to the question. This modified version of the original task removes the requirement that
|
308 |
+
the model select the exact answer, but also removes the simplifying assumptions that the answer
|
309 |
+
is always present in the input and that lexical overlap is a reliable cue."""
|
310 |
+
), # pylint: disable=line-too-long
|
311 |
+
text_features={
|
312 |
+
"question": "question",
|
313 |
+
"sentence": "sentence",
|
314 |
+
},
|
315 |
+
label_classes=["entailment", "not_entailment"],
|
316 |
+
label_column="label",
|
317 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
|
318 |
+
data_dir="QNLI",
|
319 |
+
citation=textwrap.dedent(
|
320 |
+
"""\
|
321 |
+
@article{rajpurkar2016squad,
|
322 |
+
title={Squad: 100,000+ questions for machine comprehension of text},
|
323 |
+
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
|
324 |
+
journal={arXiv preprint arXiv:1606.05250},
|
325 |
+
year={2016}
|
326 |
+
}"""
|
327 |
+
),
|
328 |
+
url="https://rajpurkar.github.io/SQuAD-explorer/",
|
329 |
+
),
|
330 |
+
GlueConfig(
|
331 |
+
name="rte",
|
332 |
+
description=textwrap.dedent(
|
333 |
+
"""\
|
334 |
+
The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
|
335 |
+
entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
|
336 |
+
et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
|
337 |
+
constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
|
338 |
+
for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
|
339 |
+
), # pylint: disable=line-too-long
|
340 |
+
text_features={
|
341 |
+
"sentence1": "sentence1",
|
342 |
+
"sentence2": "sentence2",
|
343 |
+
},
|
344 |
+
label_classes=["entailment", "not_entailment"],
|
345 |
+
label_column="label",
|
346 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
|
347 |
+
data_dir="RTE",
|
348 |
+
citation=textwrap.dedent(
|
349 |
+
"""\
|
350 |
+
@inproceedings{dagan2005pascal,
|
351 |
+
title={The PASCAL recognising textual entailment challenge},
|
352 |
+
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
|
353 |
+
booktitle={Machine Learning Challenges Workshop},
|
354 |
+
pages={177--190},
|
355 |
+
year={2005},
|
356 |
+
organization={Springer}
|
357 |
+
}
|
358 |
+
@inproceedings{bar2006second,
|
359 |
+
title={The second pascal recognising textual entailment challenge},
|
360 |
+
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
|
361 |
+
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
|
362 |
+
volume={6},
|
363 |
+
number={1},
|
364 |
+
pages={6--4},
|
365 |
+
year={2006},
|
366 |
+
organization={Venice}
|
367 |
+
}
|
368 |
+
@inproceedings{giampiccolo2007third,
|
369 |
+
title={The third pascal recognizing textual entailment challenge},
|
370 |
+
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
|
371 |
+
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
|
372 |
+
pages={1--9},
|
373 |
+
year={2007},
|
374 |
+
organization={Association for Computational Linguistics}
|
375 |
+
}
|
376 |
+
@inproceedings{bentivogli2009fifth,
|
377 |
+
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
|
378 |
+
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
|
379 |
+
booktitle={TAC},
|
380 |
+
year={2009}
|
381 |
+
}"""
|
382 |
+
),
|
383 |
+
url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
|
384 |
+
),
|
385 |
+
GlueConfig(
|
386 |
+
name="wnli",
|
387 |
+
description=textwrap.dedent(
|
388 |
+
"""\
|
389 |
+
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
|
390 |
+
in which a system must read a sentence with a pronoun and select the referent of that pronoun from
|
391 |
+
a list of choices. The examples are manually constructed to foil simple statistical methods: Each
|
392 |
+
one is contingent on contextual information provided by a single word or phrase in the sentence.
|
393 |
+
To convert the problem into sentence pair classification, we construct sentence pairs by replacing
|
394 |
+
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
|
395 |
+
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
|
396 |
+
new examples derived from fiction books that was shared privately by the authors of the original
|
397 |
+
corpus. While the included training set is balanced between two classes, the test set is imbalanced
|
398 |
+
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
|
399 |
+
hypotheses are sometimes shared between training and development examples, so if a model memorizes the
|
400 |
+
training examples, they will predict the wrong label on corresponding development set
|
401 |
+
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
|
402 |
+
between a model's score on this task and its score on the unconverted original task. We
|
403 |
+
call converted dataset WNLI (Winograd NLI)."""
|
404 |
+
),
|
405 |
+
text_features={
|
406 |
+
"sentence1": "sentence1",
|
407 |
+
"sentence2": "sentence2",
|
408 |
+
},
|
409 |
+
label_classes=["not_entailment", "entailment"],
|
410 |
+
label_column="label",
|
411 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
|
412 |
+
data_dir="WNLI",
|
413 |
+
citation=textwrap.dedent(
|
414 |
+
"""\
|
415 |
+
@inproceedings{levesque2012winograd,
|
416 |
+
title={The winograd schema challenge},
|
417 |
+
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
|
418 |
+
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
|
419 |
+
year={2012}
|
420 |
+
}"""
|
421 |
+
),
|
422 |
+
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
|
423 |
+
),
|
424 |
+
GlueConfig(
|
425 |
+
name="ax",
|
426 |
+
description=textwrap.dedent(
|
427 |
+
"""\
|
428 |
+
A manually-curated evaluation dataset for fine-grained analysis of
|
429 |
+
system performance on a broad range of linguistic phenomena. This
|
430 |
+
dataset evaluates sentence understanding through Natural Language
|
431 |
+
Inference (NLI) problems. Use a model trained on MulitNLI to produce
|
432 |
+
predictions for this dataset."""
|
433 |
+
),
|
434 |
+
text_features={
|
435 |
+
"premise": "sentence1",
|
436 |
+
"hypothesis": "sentence2",
|
437 |
+
},
|
438 |
+
label_classes=["entailment", "neutral", "contradiction"],
|
439 |
+
label_column="", # No label since we only have test set.
|
440 |
+
# We must use a URL shortener since the URL from GLUE is very long and
|
441 |
+
# causes issues in TFDS.
|
442 |
+
data_url="https://dl.fbaipublicfiles.com/glue/data/AX.tsv",
|
443 |
+
data_dir="", # We are downloading a tsv.
|
444 |
+
citation="", # The GLUE citation is sufficient.
|
445 |
+
url="https://gluebenchmark.com/diagnostics",
|
446 |
+
),
|
447 |
+
]
|
448 |
+
|
449 |
+
def _info(self):
|
450 |
+
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
451 |
+
if self.config.label_classes:
|
452 |
+
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
|
453 |
+
else:
|
454 |
+
features["label"] = datasets.Value("float32")
|
455 |
+
features["idx"] = datasets.Value("int32")
|
456 |
+
return datasets.DatasetInfo(
|
457 |
+
description=_GLUE_DESCRIPTION,
|
458 |
+
features=datasets.Features(features),
|
459 |
+
homepage=self.config.url,
|
460 |
+
citation=self.config.citation + "\n" + _GLUE_CITATION,
|
461 |
+
)
|
462 |
+
|
463 |
+
def _split_generators(self, dl_manager):
|
464 |
+
if self.config.name == "ax":
|
465 |
+
data_file = dl_manager.download(self.config.data_url)
|
466 |
+
return [
|
467 |
+
datasets.SplitGenerator(
|
468 |
+
name=datasets.Split.TEST,
|
469 |
+
gen_kwargs={
|
470 |
+
"data_file": data_file,
|
471 |
+
"split": "test",
|
472 |
+
},
|
473 |
+
)
|
474 |
+
]
|
475 |
+
|
476 |
+
if self.config.name == "mrpc":
|
477 |
+
data_dir = None
|
478 |
+
mrpc_files = dl_manager.download(
|
479 |
+
{
|
480 |
+
"dev_ids": _MRPC_DEV_IDS,
|
481 |
+
"train": _MRPC_TRAIN,
|
482 |
+
"test": _MRPC_TEST,
|
483 |
+
}
|
484 |
+
)
|
485 |
+
else:
|
486 |
+
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
487 |
+
data_dir = os.path.join(dl_dir, self.config.data_dir)
|
488 |
+
mrpc_files = None
|
489 |
+
train_split = datasets.SplitGenerator(
|
490 |
+
name=datasets.Split.TRAIN,
|
491 |
+
gen_kwargs={
|
492 |
+
"data_file": os.path.join(data_dir or "", "train.tsv"),
|
493 |
+
"split": "train",
|
494 |
+
"mrpc_files": mrpc_files,
|
495 |
+
},
|
496 |
+
)
|
497 |
+
if self.config.name == "mnli":
|
498 |
+
return [
|
499 |
+
train_split,
|
500 |
+
_mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
|
501 |
+
_mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
|
502 |
+
_mnli_split_generator("test_matched", data_dir, "test", matched=True),
|
503 |
+
_mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
|
504 |
+
]
|
505 |
+
elif self.config.name == "mnli_matched":
|
506 |
+
return [
|
507 |
+
_mnli_split_generator("validation", data_dir, "dev", matched=True),
|
508 |
+
_mnli_split_generator("test", data_dir, "test", matched=True),
|
509 |
+
]
|
510 |
+
elif self.config.name == "mnli_mismatched":
|
511 |
+
return [
|
512 |
+
_mnli_split_generator("validation", data_dir, "dev", matched=False),
|
513 |
+
_mnli_split_generator("test", data_dir, "test", matched=False),
|
514 |
+
]
|
515 |
+
else:
|
516 |
+
return [
|
517 |
+
train_split,
|
518 |
+
datasets.SplitGenerator(
|
519 |
+
name=datasets.Split.VALIDATION,
|
520 |
+
gen_kwargs={
|
521 |
+
"data_file": os.path.join(data_dir or "", "dev.tsv"),
|
522 |
+
"split": "dev",
|
523 |
+
"mrpc_files": mrpc_files,
|
524 |
+
},
|
525 |
+
),
|
526 |
+
datasets.SplitGenerator(
|
527 |
+
name=datasets.Split.TEST,
|
528 |
+
gen_kwargs={
|
529 |
+
"data_file": os.path.join(data_dir or "", "test.tsv"),
|
530 |
+
"split": "test",
|
531 |
+
"mrpc_files": mrpc_files,
|
532 |
+
},
|
533 |
+
),
|
534 |
+
]
|
535 |
+
|
536 |
+
def _generate_examples(self, data_file, split, mrpc_files=None):
|
537 |
+
if self.config.name == "mrpc":
|
538 |
+
# We have to prepare the MRPC dataset from the original sources ourselves.
|
539 |
+
examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
|
540 |
+
for example in examples:
|
541 |
+
yield example["idx"], example
|
542 |
+
else:
|
543 |
+
process_label = self.config.process_label
|
544 |
+
label_classes = self.config.label_classes
|
545 |
+
|
546 |
+
# The train and dev files for CoLA are the only tsv files without a
|
547 |
+
# header.
|
548 |
+
is_cola_non_test = self.config.name == "cola" and split != "test"
|
549 |
+
|
550 |
+
with open(data_file, encoding="utf8") as f:
|
551 |
+
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
552 |
+
if is_cola_non_test:
|
553 |
+
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
554 |
+
|
555 |
+
for n, row in enumerate(reader):
|
556 |
+
if is_cola_non_test:
|
557 |
+
row = {
|
558 |
+
"sentence": row[3],
|
559 |
+
"is_acceptable": row[1],
|
560 |
+
}
|
561 |
+
|
562 |
+
example = {feat: row[col] for feat, col in self.config.text_features.items()}
|
563 |
+
example["idx"] = n
|
564 |
+
|
565 |
+
if self.config.label_column in row:
|
566 |
+
label = row[self.config.label_column]
|
567 |
+
# For some tasks, the label is represented as 0 and 1 in the tsv
|
568 |
+
# files and needs to be cast to integer to work with the feature.
|
569 |
+
if label_classes and label not in label_classes:
|
570 |
+
label = int(label) if label else None
|
571 |
+
example["label"] = process_label(label)
|
572 |
+
else:
|
573 |
+
example["label"] = process_label(-1)
|
574 |
+
|
575 |
+
# Filter out corrupted rows.
|
576 |
+
for value in example.values():
|
577 |
+
if value is None:
|
578 |
+
break
|
579 |
+
else:
|
580 |
+
yield example["idx"], example
|
581 |
+
|
582 |
+
def _generate_example_mrpc_files(self, mrpc_files, split):
|
583 |
+
if split == "test":
|
584 |
+
with open(mrpc_files["test"], encoding="utf8") as f:
|
585 |
+
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
|
586 |
+
# the Quality key.
|
587 |
+
f.seek(3)
|
588 |
+
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
589 |
+
for n, row in enumerate(reader):
|
590 |
+
yield {
|
591 |
+
"sentence1": row["#1 String"],
|
592 |
+
"sentence2": row["#2 String"],
|
593 |
+
"label": int(row["Quality"]),
|
594 |
+
"idx": n,
|
595 |
+
}
|
596 |
+
else:
|
597 |
+
with open(mrpc_files["dev_ids"], encoding="utf8") as f:
|
598 |
+
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
599 |
+
dev_ids = [[row[0], row[1]] for row in reader]
|
600 |
+
with open(mrpc_files["train"], encoding="utf8") as f:
|
601 |
+
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
|
602 |
+
# the Quality key.
|
603 |
+
f.seek(3)
|
604 |
+
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
605 |
+
for n, row in enumerate(reader):
|
606 |
+
is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
|
607 |
+
if is_row_in_dev == (split == "dev"):
|
608 |
+
yield {
|
609 |
+
"sentence1": row["#1 String"],
|
610 |
+
"sentence2": row["#2 String"],
|
611 |
+
"label": int(row["Quality"]),
|
612 |
+
"idx": n,
|
613 |
+
}
|
614 |
+
|
615 |
+
|
616 |
+
def _mnli_split_generator(name, data_dir, split, matched):
|
617 |
+
return datasets.SplitGenerator(
|
618 |
+
name=name,
|
619 |
+
gen_kwargs={
|
620 |
+
"data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
|
621 |
+
"split": split,
|
622 |
+
"mrpc_files": None,
|
623 |
+
},
|
624 |
+
)
|