Omnibus commited on
Commit
8db1ad2
1 Parent(s): b43a77a

Create tasks.json

Browse files
Files changed (1) hide show
  1. tasks.json +545 -0
tasks.json ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PIPELINE_DATA = [
2
+ "text-classification": {
3
+ name: "Text Classification",
4
+ subtasks: [
5
+ {
6
+ type: "acceptability-classification",
7
+ name: "Acceptability Classification",
8
+ },
9
+ {
10
+ type: "entity-linking-classification",
11
+ name: "Entity Linking Classification",
12
+ },
13
+ {
14
+ type: "fact-checking",
15
+ name: "Fact Checking",
16
+ },
17
+ {
18
+ type: "intent-classification",
19
+ name: "Intent Classification",
20
+ },
21
+ {
22
+ type: "language-identification",
23
+ name: "Language Identification",
24
+ },
25
+ {
26
+ type: "multi-class-classification",
27
+ name: "Multi Class Classification",
28
+ },
29
+ {
30
+ type: "multi-label-classification",
31
+ name: "Multi Label Classification",
32
+ },
33
+ {
34
+ type: "multi-input-text-classification",
35
+ name: "Multi-input Text Classification",
36
+ },
37
+ {
38
+ type: "natural-language-inference",
39
+ name: "Natural Language Inference",
40
+ },
41
+ {
42
+ type: "semantic-similarity-classification",
43
+ name: "Semantic Similarity Classification",
44
+ },
45
+ {
46
+ type: "sentiment-classification",
47
+ name: "Sentiment Classification",
48
+ },
49
+ {
50
+ type: "topic-classification",
51
+ name: "Topic Classification",
52
+ },
53
+ {
54
+ type: "semantic-similarity-scoring",
55
+ name: "Semantic Similarity Scoring",
56
+ },
57
+ {
58
+ type: "sentiment-scoring",
59
+ name: "Sentiment Scoring",
60
+ },
61
+ {
62
+ type: "sentiment-analysis",
63
+ name: "Sentiment Analysis",
64
+ },
65
+ {
66
+ type: "hate-speech-detection",
67
+ name: "Hate Speech Detection",
68
+ },
69
+ {
70
+ type: "text-scoring",
71
+ name: "Text Scoring",
72
+ },
73
+ ],
74
+ modality: "nlp",
75
+ color: "orange",
76
+ },
77
+ "token-classification": {
78
+ name: "Token Classification",
79
+ subtasks: [
80
+ {
81
+ type: "named-entity-recognition",
82
+ name: "Named Entity Recognition",
83
+ },
84
+ {
85
+ type: "part-of-speech",
86
+ name: "Part of Speech",
87
+ },
88
+ {
89
+ type: "parsing",
90
+ name: "Parsing",
91
+ },
92
+ {
93
+ type: "lemmatization",
94
+ name: "Lemmatization",
95
+ },
96
+ {
97
+ type: "word-sense-disambiguation",
98
+ name: "Word Sense Disambiguation",
99
+ },
100
+ {
101
+ type: "coreference-resolution",
102
+ name: "Coreference-resolution",
103
+ },
104
+ ],
105
+ modality: "nlp",
106
+ color: "blue",
107
+ },
108
+ "table-question-answering": {
109
+ name: "Table Question Answering",
110
+ modality: "nlp",
111
+ color: "green",
112
+ },
113
+ "question-answering": {
114
+ name: "Question Answering",
115
+ subtasks: [
116
+ {
117
+ type: "extractive-qa",
118
+ name: "Extractive QA",
119
+ },
120
+ {
121
+ type: "open-domain-qa",
122
+ name: "Open Domain QA",
123
+ },
124
+ {
125
+ type: "closed-domain-qa",
126
+ name: "Closed Domain QA",
127
+ },
128
+ ],
129
+ modality: "nlp",
130
+ color: "blue",
131
+ },
132
+ "zero-shot-classification": {
133
+ name: "Zero-Shot Classification",
134
+ modality: "nlp",
135
+ color: "yellow",
136
+ },
137
+ "translation": {
138
+ name: "Translation",
139
+ modality: "nlp",
140
+ color: "green",
141
+ },
142
+ "summarization": {
143
+ name: "Summarization",
144
+ subtasks: [
145
+ {
146
+ type: "news-articles-summarization",
147
+ name: "News Articles Summarization",
148
+ },
149
+ {
150
+ type: "news-articles-headline-generation",
151
+ name: "News Articles Headline Generation",
152
+ },
153
+ ],
154
+ modality: "nlp",
155
+ color: "indigo",
156
+ },
157
+ "conversational": {
158
+ name: "Conversational",
159
+ subtasks: [
160
+ {
161
+ type: "dialogue-generation",
162
+ name: "Dialogue Generation",
163
+ },
164
+ ],
165
+ modality: "nlp",
166
+ color: "green",
167
+ },
168
+ "feature-extraction": {
169
+ name: "Feature Extraction",
170
+ modality: "multimodal",
171
+ color: "red",
172
+ },
173
+ "text-generation": {
174
+ name: "Text Generation",
175
+ subtasks: [
176
+ {
177
+ type: "dialogue-modeling",
178
+ name: "Dialogue Modeling",
179
+ },
180
+ {
181
+ type: "language-modeling",
182
+ name: "Language Modeling",
183
+ },
184
+ ],
185
+ modality: "nlp",
186
+ color: "indigo",
187
+ },
188
+ "text2text-generation": {
189
+ name: "Text2Text Generation",
190
+ subtasks: [
191
+ {
192
+ type: "text-simplification",
193
+ name: "Text simplification",
194
+ },
195
+ {
196
+ type: "explanation-generation",
197
+ name: "Explanation Generation",
198
+ },
199
+ {
200
+ type: "abstractive-qa",
201
+ name: "Abstractive QA",
202
+ },
203
+ {
204
+ type: "open-domain-abstractive-qa",
205
+ name: "Open Domain Abstractive QA",
206
+ },
207
+ {
208
+ type: "closed-domain-qa",
209
+ name: "Closed Domain QA",
210
+ },
211
+ {
212
+ type: "open-book-qa",
213
+ name: "Open Book QA",
214
+ },
215
+ {
216
+ type: "closed-book-qa",
217
+ name: "Closed Book QA",
218
+ },
219
+ ],
220
+ modality: "nlp",
221
+ color: "indigo",
222
+ },
223
+ "fill-mask": {
224
+ name: "Fill-Mask",
225
+ subtasks: [
226
+ {
227
+ type: "slot-filling",
228
+ name: "Slot Filling",
229
+ },
230
+ {
231
+ type: "masked-language-modeling",
232
+ name: "Masked Language Modeling",
233
+ },
234
+ ],
235
+ modality: "nlp",
236
+ color: "red",
237
+ },
238
+ "sentence-similarity": {
239
+ name: "Sentence Similarity",
240
+ modality: "nlp",
241
+ color: "yellow",
242
+ },
243
+ "text-to-speech": {
244
+ name: "Text-to-Speech",
245
+ modality: "audio",
246
+ color: "yellow",
247
+ },
248
+ "text-to-audio": {
249
+ name: "Text-to-Audio",
250
+ modality: "audio",
251
+ color: "yellow",
252
+ },
253
+ "automatic-speech-recognition": {
254
+ name: "Automatic Speech Recognition",
255
+ modality: "audio",
256
+ color: "yellow",
257
+ },
258
+ "audio-to-audio": {
259
+ name: "Audio-to-Audio",
260
+ modality: "audio",
261
+ color: "blue",
262
+ },
263
+ "audio-classification": {
264
+ name: "Audio Classification",
265
+ subtasks: [
266
+ {
267
+ type: "keyword-spotting",
268
+ name: "Keyword Spotting",
269
+ },
270
+ {
271
+ type: "speaker-identification",
272
+ name: "Speaker Identification",
273
+ },
274
+ {
275
+ type: "audio-intent-classification",
276
+ name: "Audio Intent Classification",
277
+ },
278
+ {
279
+ type: "audio-emotion-recognition",
280
+ name: "Audio Emotion Recognition",
281
+ },
282
+ {
283
+ type: "audio-language-identification",
284
+ name: "Audio Language Identification",
285
+ },
286
+ ],
287
+ modality: "audio",
288
+ color: "green",
289
+ },
290
+ "voice-activity-detection": {
291
+ name: "Voice Activity Detection",
292
+ modality: "audio",
293
+ color: "red",
294
+ },
295
+ "depth-estimation": {
296
+ name: "Depth Estimation",
297
+ modality: "cv",
298
+ color: "yellow",
299
+ },
300
+ "image-classification": {
301
+ name: "Image Classification",
302
+ subtasks: [
303
+ {
304
+ type: "multi-label-image-classification",
305
+ name: "Multi Label Image Classification",
306
+ },
307
+ {
308
+ type: "multi-class-image-classification",
309
+ name: "Multi Class Image Classification",
310
+ },
311
+ ],
312
+ modality: "cv",
313
+ color: "blue",
314
+ },
315
+ "object-detection": {
316
+ name: "Object Detection",
317
+ subtasks: [
318
+ {
319
+ type: "face-detection",
320
+ name: "Face Detection",
321
+ },
322
+ {
323
+ type: "vehicle-detection",
324
+ name: "Vehicle Detection",
325
+ },
326
+ ],
327
+ modality: "cv",
328
+ color: "yellow",
329
+ },
330
+ "image-segmentation": {
331
+ name: "Image Segmentation",
332
+ subtasks: [
333
+ {
334
+ type: "instance-segmentation",
335
+ name: "Instance Segmentation",
336
+ },
337
+ {
338
+ type: "semantic-segmentation",
339
+ name: "Semantic Segmentation",
340
+ },
341
+ {
342
+ type: "panoptic-segmentation",
343
+ name: "Panoptic Segmentation",
344
+ },
345
+ ],
346
+ modality: "cv",
347
+ color: "green",
348
+ },
349
+ "text-to-image": {
350
+ name: "Text-to-Image",
351
+ modality: "multimodal",
352
+ color: "yellow",
353
+ },
354
+ "image-to-text": {
355
+ name: "Image-to-Text",
356
+ subtasks: [
357
+ {
358
+ type: "image-captioning",
359
+ name: "Image Captioning",
360
+ },
361
+ ],
362
+ modality: "multimodal",
363
+ color: "red",
364
+ },
365
+ "image-to-image": {
366
+ name: "Image-to-Image",
367
+ modality: "cv",
368
+ color: "indigo",
369
+ },
370
+ "unconditional-image-generation": {
371
+ name: "Unconditional Image Generation",
372
+ modality: "cv",
373
+ color: "green",
374
+ },
375
+ "video-classification": {
376
+ name: "Video Classification",
377
+ modality: "cv",
378
+ color: "blue",
379
+ },
380
+ "reinforcement-learning": {
381
+ name: "Reinforcement Learning",
382
+ modality: "rl",
383
+ color: "red",
384
+ },
385
+ "robotics": {
386
+ name: "Robotics",
387
+ modality: "rl",
388
+ subtasks: [
389
+ {
390
+ type: "grasping",
391
+ name: "Grasping",
392
+ },
393
+ {
394
+ type: "task-planning",
395
+ name: "Task Planning",
396
+ },
397
+ ],
398
+ color: "blue",
399
+ },
400
+ "tabular-classification": {
401
+ name: "Tabular Classification",
402
+ modality: "tabular",
403
+ subtasks: [
404
+ {
405
+ type: "tabular-multi-class-classification",
406
+ name: "Tabular Multi Class Classification",
407
+ },
408
+ {
409
+ type: "tabular-multi-label-classification",
410
+ name: "Tabular Multi Label Classification",
411
+ },
412
+ ],
413
+ color: "blue",
414
+ },
415
+ "tabular-regression": {
416
+ name: "Tabular Regression",
417
+ modality: "tabular",
418
+ subtasks: [
419
+ {
420
+ type: "tabular-single-column-regression",
421
+ name: "Tabular Single Column Regression",
422
+ },
423
+ ],
424
+ color: "blue",
425
+ },
426
+ "tabular-to-text": {
427
+ name: "Tabular to Text",
428
+ modality: "tabular",
429
+ subtasks: [
430
+ {
431
+ type: "rdf-to-text",
432
+ name: "RDF to text",
433
+ },
434
+ ],
435
+ color: "blue",
436
+ hideInModels: true,
437
+ },
438
+ "table-to-text": {
439
+ name: "Table to Text",
440
+ modality: "nlp",
441
+ color: "blue",
442
+ hideInModels: true,
443
+ },
444
+ "multiple-choice": {
445
+ name: "Multiple Choice",
446
+ subtasks: [
447
+ {
448
+ type: "multiple-choice-qa",
449
+ name: "Multiple Choice QA",
450
+ },
451
+ {
452
+ type: "multiple-choice-coreference-resolution",
453
+ name: "Multiple Choice Coreference Resolution",
454
+ },
455
+ ],
456
+ modality: "nlp",
457
+ color: "blue",
458
+ hideInModels: true,
459
+ },
460
+ "text-retrieval": {
461
+ name: "Text Retrieval",
462
+ subtasks: [
463
+ {
464
+ type: "document-retrieval",
465
+ name: "Document Retrieval",
466
+ },
467
+ {
468
+ type: "utterance-retrieval",
469
+ name: "Utterance Retrieval",
470
+ },
471
+ {
472
+ type: "entity-linking-retrieval",
473
+ name: "Entity Linking Retrieval",
474
+ },
475
+ {
476
+ type: "fact-checking-retrieval",
477
+ name: "Fact Checking Retrieval",
478
+ },
479
+ ],
480
+ modality: "nlp",
481
+ color: "indigo",
482
+ hideInModels: true,
483
+ },
484
+ "time-series-forecasting": {
485
+ name: "Time Series Forecasting",
486
+ modality: "tabular",
487
+ subtasks: [
488
+ {
489
+ type: "univariate-time-series-forecasting",
490
+ name: "Univariate Time Series Forecasting",
491
+ },
492
+ {
493
+ type: "multivariate-time-series-forecasting",
494
+ name: "Multivariate Time Series Forecasting",
495
+ },
496
+ ],
497
+ color: "blue",
498
+ hideInModels: true,
499
+ },
500
+ "text-to-video": {
501
+ name: "Text-to-Video",
502
+ modality: "multimodal",
503
+ color: "green",
504
+ },
505
+ "visual-question-answering": {
506
+ name: "Visual Question Answering",
507
+ subtasks: [
508
+ {
509
+ type: "visual-question-answering",
510
+ name: "Visual Question Answering",
511
+ },
512
+ ],
513
+ modality: "multimodal",
514
+ color: "red",
515
+ },
516
+ "document-question-answering": {
517
+ name: "Document Question Answering",
518
+ subtasks: [
519
+ {
520
+ type: "document-question-answering",
521
+ name: "Document Question Answering",
522
+ },
523
+ ],
524
+ modality: "multimodal",
525
+ color: "blue",
526
+ hideInDatasets: true,
527
+ },
528
+ "zero-shot-image-classification": {
529
+ name: "Zero-Shot Image Classification",
530
+ modality: "cv",
531
+ color: "yellow",
532
+ },
533
+ "graph-ml": {
534
+ name: "Graph Machine Learning",
535
+ modality: "multimodal",
536
+ color: "green",
537
+ },
538
+ "other": {
539
+ name: "Other",
540
+ modality: "other",
541
+ color: "blue",
542
+ hideInModels: true,
543
+ hideInDatasets: true,
544
+ },
545
+ ]