File size: 2,691 Bytes
9d298eb
 
b2ecf7d
 
4487afc
7026e84
4487afc
 
b7f922a
b2ecf7d
4487afc
b2ecf7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7026e84
b2ecf7d
 
 
871c193
b2ecf7d
 
 
 
 
 
 
 
 
 
 
 
4487afc
 
 
 
 
0757360
4487afc
 
 
 
b7f922a
4487afc
 
b7f922a
 
4487afc
b7f922a
 
 
 
 
 
 
4487afc
 
 
 
b2ecf7d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import type { ModelLibraryKey } from "./model-libraries";
import type { PipelineType } from "./pipelines";

/**
 * Mapping from library name to its supported tasks.
 * Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
 * This mapping is partially generated automatically by "python-api-export-tasks" action in
 * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually
 * based on api-inference (hf_types.rs).
 */
export const LIBRARY_TASK_MAPPING: Partial<Record<ModelLibraryKey, PipelineType[]>> = {
	"adapter-transformers": ["question-answering", "text-classification", "token-classification"],
	allennlp: ["question-answering"],
	asteroid: [
		// "audio-source-separation",
		"audio-to-audio",
	],
	bertopic: ["text-classification"],
	diffusers: ["image-to-image", "text-to-image"],
	doctr: ["object-detection"],
	espnet: ["text-to-speech", "automatic-speech-recognition"],
	fairseq: ["text-to-speech", "audio-to-audio"],
	fastai: ["image-classification"],
	fasttext: ["feature-extraction", "text-classification"],
	flair: ["token-classification"],
	k2: ["automatic-speech-recognition"],
	keras: ["image-classification"],
	nemo: ["automatic-speech-recognition"],
	open_clip: ["zero-shot-classification", "zero-shot-image-classification"],
	paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"],
	peft: ["text-generation"],
	"pyannote-audio": ["automatic-speech-recognition"],
	"sentence-transformers": ["feature-extraction", "sentence-similarity"],
	setfit: ["text-classification"],
	sklearn: ["tabular-classification", "tabular-regression", "text-classification"],
	spacy: ["token-classification", "text-classification", "sentence-similarity"],
	"span-marker": ["token-classification"],
	speechbrain: [
		"audio-classification",
		"audio-to-audio",
		"automatic-speech-recognition",
		"text-to-speech",
		"text2text-generation",
	],
	stanza: ["token-classification"],
	timm: ["image-classification"],
	transformers: [
		"audio-classification",
		"automatic-speech-recognition",
		"depth-estimation",
		"document-question-answering",
		"feature-extraction",
		"fill-mask",
		"image-classification",
		"image-segmentation",
		"image-to-image",
		"image-to-text",
		"object-detection",
		"question-answering",
		"summarization",
		"table-question-answering",
		"text2text-generation",
		"text-classification",
		"text-generation",
		"text-to-audio",
		"text-to-speech",
		"token-classification",
		"translation",
		"video-classification",
		"visual-question-answering",
		"zero-shot-classification",
		"zero-shot-image-classification",
	],
	mindspore: ["image-classification"],
};