Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -88,4 +88,129 @@ size_categories:
|
|
88 |
"wmt16_translate_tr-en": null,
|
89 |
"yelp_polarity_reviews": null
|
90 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
```
|
|
|
88 |
"wmt16_translate_tr-en": null,
|
89 |
"yelp_polarity_reviews": null
|
90 |
}
|
91 |
+
```
|
92 |
+
```python
|
93 |
+
text_classification = [
|
94 |
+
"ag_news_subset",
|
95 |
+
"glue_cola",
|
96 |
+
"glue_sst2",
|
97 |
+
"imdb_reviews_plain_text",
|
98 |
+
"yelp_polarity_reviews"
|
99 |
+
]
|
100 |
+
|
101 |
+
question_answering = [
|
102 |
+
"ai2_arc_ARC-Challenge",
|
103 |
+
"ai2_arc_ARC-Easy",
|
104 |
+
"bool_q",
|
105 |
+
"coqa",
|
106 |
+
"cosmos_qa",
|
107 |
+
"drop",
|
108 |
+
"natural_questions_open",
|
109 |
+
"openbookqa",
|
110 |
+
"quac",
|
111 |
+
"squad_v1_1",
|
112 |
+
"squad_v2_0",
|
113 |
+
"trivia_qa_rc"
|
114 |
+
]
|
115 |
+
|
116 |
+
text_generation = [
|
117 |
+
"aeslc",
|
118 |
+
"cnn_dailymail",
|
119 |
+
"gem_common_gen",
|
120 |
+
"gem_dart",
|
121 |
+
"gem_e2e_nlg",
|
122 |
+
"gem_web_nlg_en",
|
123 |
+
"gem_wiki_lingua_english_en",
|
124 |
+
"gigaword",
|
125 |
+
"huggingface_xsum",
|
126 |
+
"lambada",
|
127 |
+
"multi_news",
|
128 |
+
"newsroom",
|
129 |
+
"samsum"
|
130 |
+
]
|
131 |
+
|
132 |
+
translation = [
|
133 |
+
"wmt14_translate_fr-en",
|
134 |
+
"wmt16_translate_cs-en",
|
135 |
+
"wmt16_translate_de-en",
|
136 |
+
"wmt16_translate_fi-en",
|
137 |
+
"wmt16_translate_ro-en",
|
138 |
+
"wmt16_translate_ru-en",
|
139 |
+
"wmt16_translate_tr-en"
|
140 |
+
]
|
141 |
+
|
142 |
+
sentiment_analysis = [
|
143 |
+
"sentiment140"
|
144 |
+
]
|
145 |
+
|
146 |
+
textual_entailment = [
|
147 |
+
"anli_r1",
|
148 |
+
"anli_r2",
|
149 |
+
"anli_r3",
|
150 |
+
"glue_mnli",
|
151 |
+
"glue_rte",
|
152 |
+
"snli",
|
153 |
+
"super_glue_cb",
|
154 |
+
"super_glue_copa",
|
155 |
+
"super_glue_rte"
|
156 |
+
]
|
157 |
+
|
158 |
+
paraphrase_detection = [
|
159 |
+
"glue_mrpc",
|
160 |
+
"glue_qqp",
|
161 |
+
"paws_wiki"
|
162 |
+
]
|
163 |
+
|
164 |
+
commonsense_reasoning = [
|
165 |
+
"hellaswag",
|
166 |
+
"piqa",
|
167 |
+
"super_glue_multirc",
|
168 |
+
"super_glue_record",
|
169 |
+
"super_glue_wic",
|
170 |
+
"super_glue_wsc_fixed",
|
171 |
+
"winogrande"
|
172 |
+
]
|
173 |
+
|
174 |
+
textual_similarity = [
|
175 |
+
"glue_stsb"
|
176 |
+
]
|
177 |
+
|
178 |
+
named_entity_recognition = [
|
179 |
+
"glue_wnli"
|
180 |
+
]
|
181 |
+
|
182 |
+
text_correction = [
|
183 |
+
"fix_punct",
|
184 |
+
"true_case"
|
185 |
+
]
|
186 |
+
|
187 |
+
text_segmentation = [
|
188 |
+
"word_segment"
|
189 |
+
]
|
190 |
+
|
191 |
+
argument_mining = [
|
192 |
+
"opinion_abstracts_idebate",
|
193 |
+
"opinion_abstracts_rotten_tomatoes"
|
194 |
+
]
|
195 |
+
|
196 |
+
machine_reading_comprehension = [
|
197 |
+
"glue_qnli"
|
198 |
+
]
|
199 |
+
|
200 |
+
text_summarization = [
|
201 |
+
"trec"
|
202 |
+
]
|
203 |
+
|
204 |
+
language_modelling = [
|
205 |
+
"story_cloze_2016"
|
206 |
+
]
|
207 |
+
|
208 |
+
math_problem_solving = [
|
209 |
+
"math_dataset_algebra__linear_1d",
|
210 |
+
"unified_qa_science_inst"
|
211 |
+
]
|
212 |
+
|
213 |
+
cross_lingual_information_retrieval = [
|
214 |
+
"para_crawl_enes"
|
215 |
+
]
|
216 |
```
|