Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
machineuser
commited on
Commit
β’
7026e84
1
Parent(s):
60fd1b8
Sync widgets demo
Browse files- packages/tasks/README.md +1 -1
- packages/tasks/package.json +1 -1
- packages/tasks/src/default-widget-inputs.ts +2 -2
- packages/tasks/src/index.ts +5 -5
- packages/tasks/src/library-to-tasks.ts +2 -2
- packages/tasks/src/model-data.ts +2 -0
- packages/tasks/src/model-libraries-downloads.ts +20 -0
- packages/tasks/src/{library-ui-elements.ts β model-libraries-snippets.ts} +43 -291
- packages/tasks/src/model-libraries.ts +375 -44
- packages/tasks/src/pipelines.ts +29 -19
- packages/tasks/src/snippets/curl.ts +0 -1
- packages/tasks/src/snippets/inputs.ts +0 -8
- packages/tasks/src/snippets/js.ts +0 -1
- packages/tasks/src/snippets/python.ts +0 -1
- packages/tasks/src/tasks/conversational/about.md +0 -50
- packages/tasks/src/tasks/conversational/data.ts +0 -66
- packages/tasks/src/tasks/index.ts +6 -4
- packages/tasks/src/tokenizer-data.ts +24 -0
- packages/widgets/package.json +3 -2
- packages/widgets/pnpm-lock.yaml +3 -0
- packages/widgets/src/lib/components/Icons/IconImageAndTextToText.svelte +28 -0
- packages/widgets/src/lib/components/Icons/IconImageFeatureExtraction.svelte +22 -0
- packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte +7 -5
- packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte +9 -6
- packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte +90 -61
- packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte +6 -2
- packages/widgets/src/routes/+page.svelte +18 -1
packages/tasks/README.md
CHANGED
@@ -25,7 +25,7 @@ This package contains the definition files (written in Typescript) for the huggi
|
|
25 |
|
26 |
- **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run)
|
27 |
- **default widget inputs** (when they aren't provided in the model card)
|
28 |
-
- definitions and UI elements for **
|
29 |
|
30 |
Please add to any of those definitions by opening a PR. Thanks π₯
|
31 |
|
|
|
25 |
|
26 |
- **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run)
|
27 |
- **default widget inputs** (when they aren't provided in the model card)
|
28 |
+
- definitions and UI elements for **model libraries** (and soon for **dataset libraries**).
|
29 |
|
30 |
Please add to any of those definitions by opening a PR. Thanks π₯
|
31 |
|
packages/tasks/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"name": "@huggingface/tasks",
|
3 |
"packageManager": "pnpm@8.10.5",
|
4 |
-
"version": "0.
|
5 |
"description": "List of ML tasks for huggingface.co/tasks",
|
6 |
"repository": "https://github.com/huggingface/huggingface.js.git",
|
7 |
"publishConfig": {
|
|
|
1 |
{
|
2 |
"name": "@huggingface/tasks",
|
3 |
"packageManager": "pnpm@8.10.5",
|
4 |
+
"version": "0.3.2",
|
5 |
"description": "List of ML tasks for huggingface.co/tasks",
|
6 |
"repository": "https://github.com/huggingface/huggingface.js.git",
|
7 |
"publishConfig": {
|
packages/tasks/src/default-widget-inputs.ts
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import type { WidgetExample } from "./widget-example";
|
2 |
-
import type {
|
3 |
|
4 |
type LanguageCode = string;
|
5 |
|
6 |
-
type PerLanguageMapping = Map<
|
7 |
|
8 |
/// NOTE TO CONTRIBUTORS:
|
9 |
///
|
|
|
1 |
import type { WidgetExample } from "./widget-example";
|
2 |
+
import type { WidgetType } from "./pipelines";
|
3 |
|
4 |
type LanguageCode = string;
|
5 |
|
6 |
+
type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>;
|
7 |
|
8 |
/// NOTE TO CONTRIBUTORS:
|
9 |
///
|
packages/tasks/src/index.ts
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
export { LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS } from "./library-to-tasks";
|
2 |
-
export { MODEL_LIBRARIES_UI_ELEMENTS } from "./library-ui-elements";
|
3 |
export { MAPPING_DEFAULT_WIDGET } from "./default-widget-inputs";
|
4 |
export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./tasks";
|
5 |
export * from "./tasks";
|
6 |
export {
|
7 |
PIPELINE_DATA,
|
8 |
PIPELINE_TYPES,
|
|
|
9 |
type PipelineType,
|
10 |
type PipelineData,
|
11 |
type Modality,
|
@@ -14,9 +14,10 @@ export {
|
|
14 |
SUBTASK_TYPES,
|
15 |
PIPELINE_TYPES_SET,
|
16 |
} from "./pipelines";
|
17 |
-
export {
|
18 |
-
export type { ModelLibraryKey } from "./model-libraries";
|
19 |
export type { ModelData, TransformersInfo } from "./model-data";
|
|
|
20 |
export type {
|
21 |
WidgetExample,
|
22 |
WidgetExampleAttribute,
|
@@ -38,8 +39,7 @@ export type {
|
|
38 |
WidgetExampleOutputText,
|
39 |
} from "./widget-example";
|
40 |
export { InferenceDisplayability } from "./model-data";
|
|
|
41 |
|
42 |
import * as snippets from "./snippets";
|
43 |
export { snippets };
|
44 |
-
|
45 |
-
export type { LibraryUiElement } from "./library-ui-elements";
|
|
|
1 |
export { LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS } from "./library-to-tasks";
|
|
|
2 |
export { MAPPING_DEFAULT_WIDGET } from "./default-widget-inputs";
|
3 |
export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./tasks";
|
4 |
export * from "./tasks";
|
5 |
export {
|
6 |
PIPELINE_DATA,
|
7 |
PIPELINE_TYPES,
|
8 |
+
type WidgetType,
|
9 |
type PipelineType,
|
10 |
type PipelineData,
|
11 |
type Modality,
|
|
|
14 |
SUBTASK_TYPES,
|
15 |
PIPELINE_TYPES_SET,
|
16 |
} from "./pipelines";
|
17 |
+
export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, MODEL_LIBRARIES_UI_ELEMENTS } from "./model-libraries";
|
18 |
+
export type { LibraryUiElement, ModelLibraryKey } from "./model-libraries";
|
19 |
export type { ModelData, TransformersInfo } from "./model-data";
|
20 |
+
export type { SpecialTokensMap, TokenizerConfig } from "./tokenizer-data";
|
21 |
export type {
|
22 |
WidgetExample,
|
23 |
WidgetExampleAttribute,
|
|
|
39 |
WidgetExampleOutputText,
|
40 |
} from "./widget-example";
|
41 |
export { InferenceDisplayability } from "./model-data";
|
42 |
+
export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data";
|
43 |
|
44 |
import * as snippets from "./snippets";
|
45 |
export { snippets };
|
|
|
|
packages/tasks/src/library-to-tasks.ts
CHANGED
@@ -3,7 +3,7 @@ import type { PipelineType } from "./pipelines";
|
|
3 |
|
4 |
/**
|
5 |
* Mapping from library name (excluding Transformers) to its supported tasks.
|
6 |
-
* Inference
|
7 |
* As an exception, we assume Transformers supports all inference tasks.
|
8 |
* This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
|
9 |
* Ref: https://github.com/huggingface/api-inference-community/pull/158
|
@@ -27,7 +27,7 @@ export const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLi
|
|
27 |
keras: ["image-classification"],
|
28 |
nemo: ["automatic-speech-recognition"],
|
29 |
open_clip: ["zero-shot-classification", "zero-shot-image-classification"],
|
30 |
-
paddlenlp: ["
|
31 |
peft: ["text-generation"],
|
32 |
"pyannote-audio": ["automatic-speech-recognition"],
|
33 |
"sentence-transformers": ["feature-extraction", "sentence-similarity"],
|
|
|
3 |
|
4 |
/**
|
5 |
* Mapping from library name (excluding Transformers) to its supported tasks.
|
6 |
+
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
|
7 |
* As an exception, we assume Transformers supports all inference tasks.
|
8 |
* This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
|
9 |
* Ref: https://github.com/huggingface/api-inference-community/pull/158
|
|
|
27 |
keras: ["image-classification"],
|
28 |
nemo: ["automatic-speech-recognition"],
|
29 |
open_clip: ["zero-shot-classification", "zero-shot-image-classification"],
|
30 |
+
paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"],
|
31 |
peft: ["text-generation"],
|
32 |
"pyannote-audio": ["automatic-speech-recognition"],
|
33 |
"sentence-transformers": ["feature-extraction", "sentence-similarity"],
|
packages/tasks/src/model-data.ts
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import type { PipelineType } from "./pipelines";
|
2 |
import type { WidgetExample } from "./widget-example";
|
|
|
3 |
|
4 |
export enum InferenceDisplayability {
|
5 |
/**
|
@@ -53,6 +54,7 @@ export interface ModelData {
|
|
53 |
base_model_name?: string;
|
54 |
task_type?: string;
|
55 |
};
|
|
|
56 |
};
|
57 |
/**
|
58 |
* all the model tags
|
|
|
1 |
import type { PipelineType } from "./pipelines";
|
2 |
import type { WidgetExample } from "./widget-example";
|
3 |
+
import type { TokenizerConfig } from "./tokenizer-data";
|
4 |
|
5 |
export enum InferenceDisplayability {
|
6 |
/**
|
|
|
54 |
base_model_name?: string;
|
55 |
task_type?: string;
|
56 |
};
|
57 |
+
tokenizer?: TokenizerConfig;
|
58 |
};
|
59 |
/**
|
60 |
* all the model tags
|
packages/tasks/src/model-libraries-downloads.ts
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* This file contains the (simplified) types used
|
3 |
+
* to represent queries that are made to Elastic
|
4 |
+
* in order to count number of model downloads
|
5 |
+
*
|
6 |
+
* Read this doc about download stats on the Hub:
|
7 |
+
*
|
8 |
+
* https://huggingface.co/docs/hub/models-download-stats
|
9 |
+
*
|
10 |
+
* see also:
|
11 |
+
* https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
|
12 |
+
*/
|
13 |
+
|
14 |
+
export type ElasticBoolQueryFilter =
|
15 |
+
// match a single filename
|
16 |
+
| { term?: { path: string } }
|
17 |
+
// match multiple possible filenames
|
18 |
+
| { terms?: { path: string[] } }
|
19 |
+
// match a wildcard
|
20 |
+
| { wildcard?: { path: string } };
|
packages/tasks/src/{library-ui-elements.ts β model-libraries-snippets.ts}
RENAMED
@@ -1,35 +1,7 @@
|
|
1 |
import type { ModelData } from "./model-data";
|
2 |
-
import type { ModelLibraryKey } from "./model-libraries";
|
3 |
|
4 |
const TAG_CUSTOM_CODE = "custom_code";
|
5 |
|
6 |
-
/**
|
7 |
-
* Elements configurable by a model library.
|
8 |
-
*/
|
9 |
-
export interface LibraryUiElement {
|
10 |
-
/**
|
11 |
-
* Name displayed on the main
|
12 |
-
* call-to-action button on the model page.
|
13 |
-
*/
|
14 |
-
btnLabel: string;
|
15 |
-
/**
|
16 |
-
* Repo name
|
17 |
-
*/
|
18 |
-
repoName: string;
|
19 |
-
/**
|
20 |
-
* URL to library's repo
|
21 |
-
*/
|
22 |
-
repoUrl: string;
|
23 |
-
/**
|
24 |
-
* URL to library's docs
|
25 |
-
*/
|
26 |
-
docsUrl?: string;
|
27 |
-
/**
|
28 |
-
* Code snippet displayed on model page
|
29 |
-
*/
|
30 |
-
snippets: (model: ModelData) => string[];
|
31 |
-
}
|
32 |
-
|
33 |
function nameWithoutNamespace(modelId: string): string {
|
34 |
const splitted = modelId.split("/");
|
35 |
return splitted.length === 1 ? splitted[0] : splitted[1];
|
@@ -37,7 +9,7 @@ function nameWithoutNamespace(modelId: string): string {
|
|
37 |
|
38 |
//#region snippets
|
39 |
|
40 |
-
const adapters = (model: ModelData) => [
|
41 |
`from adapters import AutoAdapterModel
|
42 |
|
43 |
model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
|
@@ -60,14 +32,14 @@ predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "quest
|
|
60 |
predictions = predictor.predict_json(predictor_input)`,
|
61 |
];
|
62 |
|
63 |
-
const allennlp = (model: ModelData) => {
|
64 |
if (model.tags?.includes("question-answering")) {
|
65 |
return allennlpQuestionAnswering(model);
|
66 |
}
|
67 |
return allennlpUnknown(model);
|
68 |
};
|
69 |
|
70 |
-
const asteroid = (model: ModelData) => [
|
71 |
`from asteroid.models import BaseModel
|
72 |
|
73 |
model = BaseModel.from_pretrained("${model.id}")`,
|
@@ -77,7 +49,7 @@ function get_base_diffusers_model(model: ModelData): string {
|
|
77 |
return model.cardData?.base_model?.toString() ?? "fill-in-base-model";
|
78 |
}
|
79 |
|
80 |
-
const bertopic = (model: ModelData) => [
|
81 |
`from bertopic import BERTopic
|
82 |
|
83 |
model = BERTopic.load("${model.id}")`,
|
@@ -112,7 +84,7 @@ pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}
|
|
112 |
pipeline.load_textual_inversion("${model.id}")`,
|
113 |
];
|
114 |
|
115 |
-
const diffusers = (model: ModelData) => {
|
116 |
if (model.tags?.includes("controlnet")) {
|
117 |
return diffusers_controlnet(model);
|
118 |
} else if (model.tags?.includes("lora")) {
|
@@ -124,7 +96,7 @@ const diffusers = (model: ModelData) => {
|
|
124 |
}
|
125 |
};
|
126 |
|
127 |
-
const espnetTTS = (model: ModelData) => [
|
128 |
`from espnet2.bin.tts_inference import Text2Speech
|
129 |
|
130 |
model = Text2Speech.from_pretrained("${model.id}")
|
@@ -132,7 +104,7 @@ model = Text2Speech.from_pretrained("${model.id}")
|
|
132 |
speech, *_ = model("text to generate speech from")`,
|
133 |
];
|
134 |
|
135 |
-
const espnetASR = (model: ModelData) => [
|
136 |
`from espnet2.bin.asr_inference import Speech2Text
|
137 |
|
138 |
model = Speech2Text.from_pretrained(
|
@@ -145,7 +117,7 @@ text, *_ = model(speech)[0]`,
|
|
145 |
|
146 |
const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
147 |
|
148 |
-
const espnet = (model: ModelData) => {
|
149 |
if (model.tags?.includes("text-to-speech")) {
|
150 |
return espnetTTS(model);
|
151 |
} else if (model.tags?.includes("automatic-speech-recognition")) {
|
@@ -154,7 +126,7 @@ const espnet = (model: ModelData) => {
|
|
154 |
return espnetUnknown();
|
155 |
};
|
156 |
|
157 |
-
const fairseq = (model: ModelData) => [
|
158 |
`from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
159 |
|
160 |
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
@@ -162,27 +134,27 @@ models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
|
162 |
)`,
|
163 |
];
|
164 |
|
165 |
-
const flair = (model: ModelData) => [
|
166 |
`from flair.models import SequenceTagger
|
167 |
|
168 |
tagger = SequenceTagger.load("${model.id}")`,
|
169 |
];
|
170 |
|
171 |
-
const keras = (model: ModelData) => [
|
172 |
`from huggingface_hub import from_pretrained_keras
|
173 |
|
174 |
model = from_pretrained_keras("${model.id}")
|
175 |
`,
|
176 |
];
|
177 |
|
178 |
-
const open_clip = (model: ModelData) => [
|
179 |
`import open_clip
|
180 |
|
181 |
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
|
182 |
tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
|
183 |
];
|
184 |
|
185 |
-
const paddlenlp = (model: ModelData) => {
|
186 |
if (model.config?.architectures?.[0]) {
|
187 |
const architecture = model.config.architectures[0];
|
188 |
return [
|
@@ -206,7 +178,7 @@ const paddlenlp = (model: ModelData) => {
|
|
206 |
}
|
207 |
};
|
208 |
|
209 |
-
const pyannote_audio_pipeline = (model: ModelData) => [
|
210 |
`from pyannote.audio import Pipeline
|
211 |
|
212 |
pipeline = Pipeline.from_pretrained("${model.id}")
|
@@ -223,7 +195,7 @@ waveform, sample_rate = Audio().crop("file.wav", excerpt)
|
|
223 |
pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
|
224 |
];
|
225 |
|
226 |
-
const pyannote_audio_model = (model: ModelData) => [
|
227 |
`from pyannote.audio import Model, Inference
|
228 |
|
229 |
model = Model.from_pretrained("${model.id}")
|
@@ -238,14 +210,14 @@ excerpt = Segment(start=2.0, end=5.0)
|
|
238 |
inference.crop("file.wav", excerpt)`,
|
239 |
];
|
240 |
|
241 |
-
const pyannote_audio = (model: ModelData) => {
|
242 |
if (model.tags?.includes("pyannote-audio-pipeline")) {
|
243 |
return pyannote_audio_pipeline(model);
|
244 |
}
|
245 |
return pyannote_audio_model(model);
|
246 |
};
|
247 |
|
248 |
-
const tensorflowttsTextToMel = (model: ModelData) => [
|
249 |
`from tensorflow_tts.inference import AutoProcessor, TFAutoModel
|
250 |
|
251 |
processor = AutoProcessor.from_pretrained("${model.id}")
|
@@ -253,7 +225,7 @@ model = TFAutoModel.from_pretrained("${model.id}")
|
|
253 |
`,
|
254 |
];
|
255 |
|
256 |
-
const tensorflowttsMelToWav = (model: ModelData) => [
|
257 |
`from tensorflow_tts.inference import TFAutoModel
|
258 |
|
259 |
model = TFAutoModel.from_pretrained("${model.id}")
|
@@ -261,14 +233,14 @@ audios = model.inference(mels)
|
|
261 |
`,
|
262 |
];
|
263 |
|
264 |
-
const tensorflowttsUnknown = (model: ModelData) => [
|
265 |
`from tensorflow_tts.inference import TFAutoModel
|
266 |
|
267 |
model = TFAutoModel.from_pretrained("${model.id}")
|
268 |
`,
|
269 |
];
|
270 |
|
271 |
-
const tensorflowtts = (model: ModelData) => {
|
272 |
if (model.tags?.includes("text-to-mel")) {
|
273 |
return tensorflowttsTextToMel(model);
|
274 |
} else if (model.tags?.includes("mel-to-wav")) {
|
@@ -277,7 +249,7 @@ const tensorflowtts = (model: ModelData) => {
|
|
277 |
return tensorflowttsUnknown(model);
|
278 |
};
|
279 |
|
280 |
-
const timm = (model: ModelData) => [
|
281 |
`import timm
|
282 |
|
283 |
model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
|
@@ -319,7 +291,7 @@ model = joblib.load(
|
|
319 |
];
|
320 |
};
|
321 |
|
322 |
-
const sklearn = (model: ModelData) => {
|
323 |
if (model.tags?.includes("skops")) {
|
324 |
const skopsmodelFile = model.config?.sklearn?.filename;
|
325 |
const skopssaveFormat = model.config?.sklearn?.model_format;
|
@@ -336,29 +308,29 @@ const sklearn = (model: ModelData) => {
|
|
336 |
}
|
337 |
};
|
338 |
|
339 |
-
const fastai = (model: ModelData) => [
|
340 |
`from huggingface_hub import from_pretrained_fastai
|
341 |
|
342 |
learn = from_pretrained_fastai("${model.id}")`,
|
343 |
];
|
344 |
|
345 |
-
const sampleFactory = (model: ModelData) => [
|
346 |
`python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
|
347 |
];
|
348 |
|
349 |
-
const sentenceTransformers = (model: ModelData) => [
|
350 |
`from sentence_transformers import SentenceTransformer
|
351 |
|
352 |
model = SentenceTransformer("${model.id}")`,
|
353 |
];
|
354 |
|
355 |
-
const setfit = (model: ModelData) => [
|
356 |
`from setfit import SetFitModel
|
357 |
|
358 |
model = SetFitModel.from_pretrained("${model.id}")`,
|
359 |
];
|
360 |
|
361 |
-
const spacy = (model: ModelData) => [
|
362 |
`!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
|
363 |
|
364 |
# Using spacy.load().
|
@@ -370,13 +342,13 @@ import ${nameWithoutNamespace(model.id)}
|
|
370 |
nlp = ${nameWithoutNamespace(model.id)}.load()`,
|
371 |
];
|
372 |
|
373 |
-
const span_marker = (model: ModelData) => [
|
374 |
`from span_marker import SpanMarkerModel
|
375 |
|
376 |
model = SpanMarkerModel.from_pretrained("${model.id}")`,
|
377 |
];
|
378 |
|
379 |
-
const stanza = (model: ModelData) => [
|
380 |
`import stanza
|
381 |
|
382 |
stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
|
@@ -399,7 +371,7 @@ const speechBrainMethod = (speechbrainInterface: string) => {
|
|
399 |
}
|
400 |
};
|
401 |
|
402 |
-
const speechbrain = (model: ModelData) => {
|
403 |
const speechbrainInterface = model.config?.speechbrain?.interface;
|
404 |
if (speechbrainInterface === undefined) {
|
405 |
return [`# interface not specified in config.json`];
|
@@ -419,7 +391,7 @@ model.${speechbrainMethod}("file.wav")`,
|
|
419 |
];
|
420 |
};
|
421 |
|
422 |
-
const transformers = (model: ModelData) => {
|
423 |
const info = model.transformersInfo;
|
424 |
if (!info) {
|
425 |
return [`# β οΈ Type of model unknown`];
|
@@ -461,7 +433,7 @@ const transformers = (model: ModelData) => {
|
|
461 |
return [autoSnippet];
|
462 |
};
|
463 |
|
464 |
-
const transformersJS = (model: ModelData) => {
|
465 |
if (!model.pipeline_tag) {
|
466 |
return [`// β οΈ Unknown pipeline tag`];
|
467 |
}
|
@@ -492,7 +464,7 @@ const peftTask = (peftTaskType?: string) => {
|
|
492 |
}
|
493 |
};
|
494 |
|
495 |
-
const peft = (model: ModelData) => {
|
496 |
const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
|
497 |
const pefttask = peftTask(peftTaskType);
|
498 |
if (!pefttask) {
|
@@ -512,14 +484,14 @@ model = PeftModel.from_pretrained(model, "${model.id}")`,
|
|
512 |
];
|
513 |
};
|
514 |
|
515 |
-
const fasttext = (model: ModelData) => [
|
516 |
`from huggingface_hub import hf_hub_download
|
517 |
import fasttext
|
518 |
|
519 |
model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
|
520 |
];
|
521 |
|
522 |
-
const stableBaselines3 = (model: ModelData) => [
|
523 |
`from huggingface_sb3 import load_from_hub
|
524 |
checkpoint = load_from_hub(
|
525 |
repo_id="${model.id}",
|
@@ -541,9 +513,11 @@ transcriptions = asr_model.transcribe(["file.wav"])`,
|
|
541 |
}
|
542 |
};
|
543 |
|
544 |
-
const mlAgents = (model: ModelData) => [
|
|
|
|
|
545 |
|
546 |
-
const sentis = (/* model: ModelData */) => [
|
547 |
`string modelName = "[Your model name here].sentis";
|
548 |
Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
|
549 |
IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
@@ -551,14 +525,14 @@ IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
|
551 |
`,
|
552 |
];
|
553 |
|
554 |
-
const mlx = (model: ModelData) => [
|
555 |
`pip install huggingface_hub hf_transfer
|
556 |
|
557 |
-
export
|
558 |
huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
|
559 |
];
|
560 |
|
561 |
-
const nemo = (model: ModelData) => {
|
562 |
let command: string[] | undefined = undefined;
|
563 |
// Resolve the tag to a nemo domain/sub-domain
|
564 |
if (model.tags?.includes("automatic-speech-recognition")) {
|
@@ -568,232 +542,10 @@ const nemo = (model: ModelData) => {
|
|
568 |
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
569 |
};
|
570 |
|
571 |
-
const pythae = (model: ModelData) => [
|
572 |
`from pythae.models import AutoModel
|
573 |
|
574 |
model = AutoModel.load_from_hf_hub("${model.id}")`,
|
575 |
];
|
576 |
|
577 |
//#endregion
|
578 |
-
|
579 |
-
export const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>> = {
|
580 |
-
"adapter-transformers": {
|
581 |
-
btnLabel: "Adapters",
|
582 |
-
repoName: "adapters",
|
583 |
-
repoUrl: "https://github.com/Adapter-Hub/adapters",
|
584 |
-
docsUrl: "https://huggingface.co/docs/hub/adapters",
|
585 |
-
snippets: adapters,
|
586 |
-
},
|
587 |
-
allennlp: {
|
588 |
-
btnLabel: "AllenNLP",
|
589 |
-
repoName: "AllenNLP",
|
590 |
-
repoUrl: "https://github.com/allenai/allennlp",
|
591 |
-
docsUrl: "https://huggingface.co/docs/hub/allennlp",
|
592 |
-
snippets: allennlp,
|
593 |
-
},
|
594 |
-
asteroid: {
|
595 |
-
btnLabel: "Asteroid",
|
596 |
-
repoName: "Asteroid",
|
597 |
-
repoUrl: "https://github.com/asteroid-team/asteroid",
|
598 |
-
docsUrl: "https://huggingface.co/docs/hub/asteroid",
|
599 |
-
snippets: asteroid,
|
600 |
-
},
|
601 |
-
bertopic: {
|
602 |
-
btnLabel: "BERTopic",
|
603 |
-
repoName: "BERTopic",
|
604 |
-
repoUrl: "https://github.com/MaartenGr/BERTopic",
|
605 |
-
snippets: bertopic,
|
606 |
-
},
|
607 |
-
diffusers: {
|
608 |
-
btnLabel: "Diffusers",
|
609 |
-
repoName: "π€/diffusers",
|
610 |
-
repoUrl: "https://github.com/huggingface/diffusers",
|
611 |
-
docsUrl: "https://huggingface.co/docs/hub/diffusers",
|
612 |
-
snippets: diffusers,
|
613 |
-
},
|
614 |
-
espnet: {
|
615 |
-
btnLabel: "ESPnet",
|
616 |
-
repoName: "ESPnet",
|
617 |
-
repoUrl: "https://github.com/espnet/espnet",
|
618 |
-
docsUrl: "https://huggingface.co/docs/hub/espnet",
|
619 |
-
snippets: espnet,
|
620 |
-
},
|
621 |
-
fairseq: {
|
622 |
-
btnLabel: "Fairseq",
|
623 |
-
repoName: "fairseq",
|
624 |
-
repoUrl: "https://github.com/pytorch/fairseq",
|
625 |
-
snippets: fairseq,
|
626 |
-
},
|
627 |
-
flair: {
|
628 |
-
btnLabel: "Flair",
|
629 |
-
repoName: "Flair",
|
630 |
-
repoUrl: "https://github.com/flairNLP/flair",
|
631 |
-
docsUrl: "https://huggingface.co/docs/hub/flair",
|
632 |
-
snippets: flair,
|
633 |
-
},
|
634 |
-
keras: {
|
635 |
-
btnLabel: "Keras",
|
636 |
-
repoName: "Keras",
|
637 |
-
repoUrl: "https://github.com/keras-team/keras",
|
638 |
-
docsUrl: "https://huggingface.co/docs/hub/keras",
|
639 |
-
snippets: keras,
|
640 |
-
},
|
641 |
-
mlx: {
|
642 |
-
btnLabel: "MLX",
|
643 |
-
repoName: "MLX",
|
644 |
-
repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main",
|
645 |
-
snippets: mlx,
|
646 |
-
},
|
647 |
-
nemo: {
|
648 |
-
btnLabel: "NeMo",
|
649 |
-
repoName: "NeMo",
|
650 |
-
repoUrl: "https://github.com/NVIDIA/NeMo",
|
651 |
-
snippets: nemo,
|
652 |
-
},
|
653 |
-
open_clip: {
|
654 |
-
btnLabel: "OpenCLIP",
|
655 |
-
repoName: "OpenCLIP",
|
656 |
-
repoUrl: "https://github.com/mlfoundations/open_clip",
|
657 |
-
snippets: open_clip,
|
658 |
-
},
|
659 |
-
paddlenlp: {
|
660 |
-
btnLabel: "paddlenlp",
|
661 |
-
repoName: "PaddleNLP",
|
662 |
-
repoUrl: "https://github.com/PaddlePaddle/PaddleNLP",
|
663 |
-
docsUrl: "https://huggingface.co/docs/hub/paddlenlp",
|
664 |
-
snippets: paddlenlp,
|
665 |
-
},
|
666 |
-
peft: {
|
667 |
-
btnLabel: "PEFT",
|
668 |
-
repoName: "PEFT",
|
669 |
-
repoUrl: "https://github.com/huggingface/peft",
|
670 |
-
snippets: peft,
|
671 |
-
},
|
672 |
-
"pyannote-audio": {
|
673 |
-
btnLabel: "pyannote.audio",
|
674 |
-
repoName: "pyannote-audio",
|
675 |
-
repoUrl: "https://github.com/pyannote/pyannote-audio",
|
676 |
-
snippets: pyannote_audio,
|
677 |
-
},
|
678 |
-
"sentence-transformers": {
|
679 |
-
btnLabel: "sentence-transformers",
|
680 |
-
repoName: "sentence-transformers",
|
681 |
-
repoUrl: "https://github.com/UKPLab/sentence-transformers",
|
682 |
-
docsUrl: "https://huggingface.co/docs/hub/sentence-transformers",
|
683 |
-
snippets: sentenceTransformers,
|
684 |
-
},
|
685 |
-
setfit: {
|
686 |
-
btnLabel: "setfit",
|
687 |
-
repoName: "setfit",
|
688 |
-
repoUrl: "https://github.com/huggingface/setfit",
|
689 |
-
docsUrl: "https://huggingface.co/docs/hub/setfit",
|
690 |
-
snippets: setfit,
|
691 |
-
},
|
692 |
-
sklearn: {
|
693 |
-
btnLabel: "Scikit-learn",
|
694 |
-
repoName: "Scikit-learn",
|
695 |
-
repoUrl: "https://github.com/scikit-learn/scikit-learn",
|
696 |
-
snippets: sklearn,
|
697 |
-
},
|
698 |
-
fastai: {
|
699 |
-
btnLabel: "fastai",
|
700 |
-
repoName: "fastai",
|
701 |
-
repoUrl: "https://github.com/fastai/fastai",
|
702 |
-
docsUrl: "https://huggingface.co/docs/hub/fastai",
|
703 |
-
snippets: fastai,
|
704 |
-
},
|
705 |
-
spacy: {
|
706 |
-
btnLabel: "spaCy",
|
707 |
-
repoName: "spaCy",
|
708 |
-
repoUrl: "https://github.com/explosion/spaCy",
|
709 |
-
docsUrl: "https://huggingface.co/docs/hub/spacy",
|
710 |
-
snippets: spacy,
|
711 |
-
},
|
712 |
-
"span-marker": {
|
713 |
-
btnLabel: "SpanMarker",
|
714 |
-
repoName: "SpanMarkerNER",
|
715 |
-
repoUrl: "https://github.com/tomaarsen/SpanMarkerNER",
|
716 |
-
docsUrl: "https://huggingface.co/docs/hub/span_marker",
|
717 |
-
snippets: span_marker,
|
718 |
-
},
|
719 |
-
speechbrain: {
|
720 |
-
btnLabel: "speechbrain",
|
721 |
-
repoName: "speechbrain",
|
722 |
-
repoUrl: "https://github.com/speechbrain/speechbrain",
|
723 |
-
docsUrl: "https://huggingface.co/docs/hub/speechbrain",
|
724 |
-
snippets: speechbrain,
|
725 |
-
},
|
726 |
-
stanza: {
|
727 |
-
btnLabel: "Stanza",
|
728 |
-
repoName: "stanza",
|
729 |
-
repoUrl: "https://github.com/stanfordnlp/stanza",
|
730 |
-
docsUrl: "https://huggingface.co/docs/hub/stanza",
|
731 |
-
snippets: stanza,
|
732 |
-
},
|
733 |
-
tensorflowtts: {
|
734 |
-
btnLabel: "TensorFlowTTS",
|
735 |
-
repoName: "TensorFlowTTS",
|
736 |
-
repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
|
737 |
-
snippets: tensorflowtts,
|
738 |
-
},
|
739 |
-
timm: {
|
740 |
-
btnLabel: "timm",
|
741 |
-
repoName: "pytorch-image-models",
|
742 |
-
repoUrl: "https://github.com/rwightman/pytorch-image-models",
|
743 |
-
docsUrl: "https://huggingface.co/docs/hub/timm",
|
744 |
-
snippets: timm,
|
745 |
-
},
|
746 |
-
transformers: {
|
747 |
-
btnLabel: "Transformers",
|
748 |
-
repoName: "π€/transformers",
|
749 |
-
repoUrl: "https://github.com/huggingface/transformers",
|
750 |
-
docsUrl: "https://huggingface.co/docs/hub/transformers",
|
751 |
-
snippets: transformers,
|
752 |
-
},
|
753 |
-
"transformers.js": {
|
754 |
-
btnLabel: "Transformers.js",
|
755 |
-
repoName: "transformers.js",
|
756 |
-
repoUrl: "https://github.com/xenova/transformers.js",
|
757 |
-
docsUrl: "https://huggingface.co/docs/hub/transformers-js",
|
758 |
-
snippets: transformersJS,
|
759 |
-
},
|
760 |
-
fasttext: {
|
761 |
-
btnLabel: "fastText",
|
762 |
-
repoName: "fastText",
|
763 |
-
repoUrl: "https://fasttext.cc/",
|
764 |
-
snippets: fasttext,
|
765 |
-
},
|
766 |
-
"sample-factory": {
|
767 |
-
btnLabel: "sample-factory",
|
768 |
-
repoName: "sample-factory",
|
769 |
-
repoUrl: "https://github.com/alex-petrenko/sample-factory",
|
770 |
-
docsUrl: "https://huggingface.co/docs/hub/sample-factory",
|
771 |
-
snippets: sampleFactory,
|
772 |
-
},
|
773 |
-
"stable-baselines3": {
|
774 |
-
btnLabel: "stable-baselines3",
|
775 |
-
repoName: "stable-baselines3",
|
776 |
-
repoUrl: "https://github.com/huggingface/huggingface_sb3",
|
777 |
-
docsUrl: "https://huggingface.co/docs/hub/stable-baselines3",
|
778 |
-
snippets: stableBaselines3,
|
779 |
-
},
|
780 |
-
"ml-agents": {
|
781 |
-
btnLabel: "ml-agents",
|
782 |
-
repoName: "ml-agents",
|
783 |
-
repoUrl: "https://github.com/Unity-Technologies/ml-agents",
|
784 |
-
docsUrl: "https://huggingface.co/docs/hub/ml-agents",
|
785 |
-
snippets: mlAgents,
|
786 |
-
},
|
787 |
-
"unity-sentis": {
|
788 |
-
btnLabel: "unity-sentis",
|
789 |
-
repoName: "unity-sentis",
|
790 |
-
repoUrl: "https://github.com/Unity-Technologies/sentis-samples",
|
791 |
-
snippets: sentis,
|
792 |
-
},
|
793 |
-
pythae: {
|
794 |
-
btnLabel: "pythae",
|
795 |
-
repoName: "pythae",
|
796 |
-
repoUrl: "https://github.com/clementchadebec/benchmark_VAE",
|
797 |
-
snippets: pythae,
|
798 |
-
},
|
799 |
-
} as const;
|
|
|
1 |
import type { ModelData } from "./model-data";
|
|
|
2 |
|
3 |
const TAG_CUSTOM_CODE = "custom_code";
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
function nameWithoutNamespace(modelId: string): string {
|
6 |
const splitted = modelId.split("/");
|
7 |
return splitted.length === 1 ? splitted[0] : splitted[1];
|
|
|
9 |
|
10 |
//#region snippets
|
11 |
|
12 |
+
export const adapters = (model: ModelData): string[] => [
|
13 |
`from adapters import AutoAdapterModel
|
14 |
|
15 |
model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
|
|
|
32 |
predictions = predictor.predict_json(predictor_input)`,
|
33 |
];
|
34 |
|
35 |
+
export const allennlp = (model: ModelData): string[] => {
|
36 |
if (model.tags?.includes("question-answering")) {
|
37 |
return allennlpQuestionAnswering(model);
|
38 |
}
|
39 |
return allennlpUnknown(model);
|
40 |
};
|
41 |
|
42 |
+
export const asteroid = (model: ModelData): string[] => [
|
43 |
`from asteroid.models import BaseModel
|
44 |
|
45 |
model = BaseModel.from_pretrained("${model.id}")`,
|
|
|
49 |
return model.cardData?.base_model?.toString() ?? "fill-in-base-model";
|
50 |
}
|
51 |
|
52 |
+
export const bertopic = (model: ModelData): string[] => [
|
53 |
`from bertopic import BERTopic
|
54 |
|
55 |
model = BERTopic.load("${model.id}")`,
|
|
|
84 |
pipeline.load_textual_inversion("${model.id}")`,
|
85 |
];
|
86 |
|
87 |
+
export const diffusers = (model: ModelData): string[] => {
|
88 |
if (model.tags?.includes("controlnet")) {
|
89 |
return diffusers_controlnet(model);
|
90 |
} else if (model.tags?.includes("lora")) {
|
|
|
96 |
}
|
97 |
};
|
98 |
|
99 |
+
export const espnetTTS = (model: ModelData): string[] => [
|
100 |
`from espnet2.bin.tts_inference import Text2Speech
|
101 |
|
102 |
model = Text2Speech.from_pretrained("${model.id}")
|
|
|
104 |
speech, *_ = model("text to generate speech from")`,
|
105 |
];
|
106 |
|
107 |
+
export const espnetASR = (model: ModelData): string[] => [
|
108 |
`from espnet2.bin.asr_inference import Speech2Text
|
109 |
|
110 |
model = Speech2Text.from_pretrained(
|
|
|
117 |
|
118 |
const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
119 |
|
120 |
+
export const espnet = (model: ModelData): string[] => {
|
121 |
if (model.tags?.includes("text-to-speech")) {
|
122 |
return espnetTTS(model);
|
123 |
} else if (model.tags?.includes("automatic-speech-recognition")) {
|
|
|
126 |
return espnetUnknown();
|
127 |
};
|
128 |
|
129 |
+
export const fairseq = (model: ModelData): string[] => [
|
130 |
`from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
131 |
|
132 |
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
|
|
134 |
)`,
|
135 |
];
|
136 |
|
137 |
+
export const flair = (model: ModelData): string[] => [
|
138 |
`from flair.models import SequenceTagger
|
139 |
|
140 |
tagger = SequenceTagger.load("${model.id}")`,
|
141 |
];
|
142 |
|
143 |
+
export const keras = (model: ModelData): string[] => [
|
144 |
`from huggingface_hub import from_pretrained_keras
|
145 |
|
146 |
model = from_pretrained_keras("${model.id}")
|
147 |
`,
|
148 |
];
|
149 |
|
150 |
+
export const open_clip = (model: ModelData): string[] => [
|
151 |
`import open_clip
|
152 |
|
153 |
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
|
154 |
tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
|
155 |
];
|
156 |
|
157 |
+
export const paddlenlp = (model: ModelData): string[] => {
|
158 |
if (model.config?.architectures?.[0]) {
|
159 |
const architecture = model.config.architectures[0];
|
160 |
return [
|
|
|
178 |
}
|
179 |
};
|
180 |
|
181 |
+
export const pyannote_audio_pipeline = (model: ModelData): string[] => [
|
182 |
`from pyannote.audio import Pipeline
|
183 |
|
184 |
pipeline = Pipeline.from_pretrained("${model.id}")
|
|
|
195 |
pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
|
196 |
];
|
197 |
|
198 |
+
const pyannote_audio_model = (model: ModelData): string[] => [
|
199 |
`from pyannote.audio import Model, Inference
|
200 |
|
201 |
model = Model.from_pretrained("${model.id}")
|
|
|
210 |
inference.crop("file.wav", excerpt)`,
|
211 |
];
|
212 |
|
213 |
+
export const pyannote_audio = (model: ModelData): string[] => {
|
214 |
if (model.tags?.includes("pyannote-audio-pipeline")) {
|
215 |
return pyannote_audio_pipeline(model);
|
216 |
}
|
217 |
return pyannote_audio_model(model);
|
218 |
};
|
219 |
|
220 |
+
const tensorflowttsTextToMel = (model: ModelData): string[] => [
|
221 |
`from tensorflow_tts.inference import AutoProcessor, TFAutoModel
|
222 |
|
223 |
processor = AutoProcessor.from_pretrained("${model.id}")
|
|
|
225 |
`,
|
226 |
];
|
227 |
|
228 |
+
const tensorflowttsMelToWav = (model: ModelData): string[] => [
|
229 |
`from tensorflow_tts.inference import TFAutoModel
|
230 |
|
231 |
model = TFAutoModel.from_pretrained("${model.id}")
|
|
|
233 |
`,
|
234 |
];
|
235 |
|
236 |
+
const tensorflowttsUnknown = (model: ModelData): string[] => [
|
237 |
`from tensorflow_tts.inference import TFAutoModel
|
238 |
|
239 |
model = TFAutoModel.from_pretrained("${model.id}")
|
240 |
`,
|
241 |
];
|
242 |
|
243 |
+
export const tensorflowtts = (model: ModelData): string[] => {
|
244 |
if (model.tags?.includes("text-to-mel")) {
|
245 |
return tensorflowttsTextToMel(model);
|
246 |
} else if (model.tags?.includes("mel-to-wav")) {
|
|
|
249 |
return tensorflowttsUnknown(model);
|
250 |
};
|
251 |
|
252 |
+
export const timm = (model: ModelData): string[] => [
|
253 |
`import timm
|
254 |
|
255 |
model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
|
|
|
291 |
];
|
292 |
};
|
293 |
|
294 |
+
export const sklearn = (model: ModelData): string[] => {
|
295 |
if (model.tags?.includes("skops")) {
|
296 |
const skopsmodelFile = model.config?.sklearn?.filename;
|
297 |
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
|
308 |
}
|
309 |
};
|
310 |
|
311 |
+
export const fastai = (model: ModelData): string[] => [
|
312 |
`from huggingface_hub import from_pretrained_fastai
|
313 |
|
314 |
learn = from_pretrained_fastai("${model.id}")`,
|
315 |
];
|
316 |
|
317 |
+
export const sampleFactory = (model: ModelData): string[] => [
|
318 |
`python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
|
319 |
];
|
320 |
|
321 |
+
export const sentenceTransformers = (model: ModelData): string[] => [
|
322 |
`from sentence_transformers import SentenceTransformer
|
323 |
|
324 |
model = SentenceTransformer("${model.id}")`,
|
325 |
];
|
326 |
|
327 |
+
export const setfit = (model: ModelData): string[] => [
|
328 |
`from setfit import SetFitModel
|
329 |
|
330 |
model = SetFitModel.from_pretrained("${model.id}")`,
|
331 |
];
|
332 |
|
333 |
+
export const spacy = (model: ModelData): string[] => [
|
334 |
`!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
|
335 |
|
336 |
# Using spacy.load().
|
|
|
342 |
nlp = ${nameWithoutNamespace(model.id)}.load()`,
|
343 |
];
|
344 |
|
345 |
+
export const span_marker = (model: ModelData): string[] => [
|
346 |
`from span_marker import SpanMarkerModel
|
347 |
|
348 |
model = SpanMarkerModel.from_pretrained("${model.id}")`,
|
349 |
];
|
350 |
|
351 |
+
export const stanza = (model: ModelData): string[] => [
|
352 |
`import stanza
|
353 |
|
354 |
stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
|
|
|
371 |
}
|
372 |
};
|
373 |
|
374 |
+
export const speechbrain = (model: ModelData): string[] => {
|
375 |
const speechbrainInterface = model.config?.speechbrain?.interface;
|
376 |
if (speechbrainInterface === undefined) {
|
377 |
return [`# interface not specified in config.json`];
|
|
|
391 |
];
|
392 |
};
|
393 |
|
394 |
+
export const transformers = (model: ModelData): string[] => {
|
395 |
const info = model.transformersInfo;
|
396 |
if (!info) {
|
397 |
return [`# β οΈ Type of model unknown`];
|
|
|
433 |
return [autoSnippet];
|
434 |
};
|
435 |
|
436 |
+
export const transformersJS = (model: ModelData): string[] => {
|
437 |
if (!model.pipeline_tag) {
|
438 |
return [`// β οΈ Unknown pipeline tag`];
|
439 |
}
|
|
|
464 |
}
|
465 |
};
|
466 |
|
467 |
+
export const peft = (model: ModelData): string[] => {
|
468 |
const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
|
469 |
const pefttask = peftTask(peftTaskType);
|
470 |
if (!pefttask) {
|
|
|
484 |
];
|
485 |
};
|
486 |
|
487 |
+
export const fasttext = (model: ModelData): string[] => [
|
488 |
`from huggingface_hub import hf_hub_download
|
489 |
import fasttext
|
490 |
|
491 |
model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
|
492 |
];
|
493 |
|
494 |
+
export const stableBaselines3 = (model: ModelData): string[] => [
|
495 |
`from huggingface_sb3 import load_from_hub
|
496 |
checkpoint = load_from_hub(
|
497 |
repo_id="${model.id}",
|
|
|
513 |
}
|
514 |
};
|
515 |
|
516 |
+
export const mlAgents = (model: ModelData): string[] => [
|
517 |
+
`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"`,
|
518 |
+
];
|
519 |
|
520 |
+
export const sentis = (/* model: ModelData */): string[] => [
|
521 |
`string modelName = "[Your model name here].sentis";
|
522 |
Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
|
523 |
IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
|
|
525 |
`,
|
526 |
];
|
527 |
|
528 |
+
export const mlx = (model: ModelData): string[] => [
|
529 |
`pip install huggingface_hub hf_transfer
|
530 |
|
531 |
+
export HF_HUB_ENABLE_HF_TRANS: string[]FER=1
|
532 |
huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
|
533 |
];
|
534 |
|
535 |
+
export const nemo = (model: ModelData): string[] => {
|
536 |
let command: string[] | undefined = undefined;
|
537 |
// Resolve the tag to a nemo domain/sub-domain
|
538 |
if (model.tags?.includes("automatic-speech-recognition")) {
|
|
|
542 |
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
543 |
};
|
544 |
|
545 |
+
export const pythae = (model: ModelData): string[] => [
|
546 |
`from pythae.models import AutoModel
|
547 |
|
548 |
model = AutoModel.load_from_hf_hub("${model.id}")`,
|
549 |
];
|
550 |
|
551 |
//#endregion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
packages/tasks/src/model-libraries.ts
CHANGED
@@ -1,52 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
/**
|
2 |
* Add your new library here.
|
3 |
*
|
4 |
* This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
|
5 |
-
*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
*/
|
7 |
-
export enum ModelLibrary {
|
8 |
-
"adapter-transformers" = "Adapters",
|
9 |
-
"allennlp" = "allenNLP",
|
10 |
-
"asteroid" = "Asteroid",
|
11 |
-
"bertopic" = "BERTopic",
|
12 |
-
"diffusers" = "Diffusers",
|
13 |
-
"doctr" = "docTR",
|
14 |
-
"espnet" = "ESPnet",
|
15 |
-
"fairseq" = "Fairseq",
|
16 |
-
"flair" = "Flair",
|
17 |
-
"keras" = "Keras",
|
18 |
-
"k2" = "K2",
|
19 |
-
"mlx" = "MLX",
|
20 |
-
"nemo" = "NeMo",
|
21 |
-
"open_clip" = "OpenCLIP",
|
22 |
-
"paddlenlp" = "PaddleNLP",
|
23 |
-
"peft" = "PEFT",
|
24 |
-
"pyannote-audio" = "pyannote.audio",
|
25 |
-
"sample-factory" = "Sample Factory",
|
26 |
-
"sentence-transformers" = "Sentence Transformers",
|
27 |
-
"setfit" = "SetFit",
|
28 |
-
"sklearn" = "Scikit-learn",
|
29 |
-
"spacy" = "spaCy",
|
30 |
-
"span-marker" = "SpanMarker",
|
31 |
-
"speechbrain" = "speechbrain",
|
32 |
-
"tensorflowtts" = "TensorFlowTTS",
|
33 |
-
"timm" = "Timm",
|
34 |
-
"fastai" = "fastai",
|
35 |
-
"transformers" = "Transformers",
|
36 |
-
"transformers.js" = "Transformers.js",
|
37 |
-
"stanza" = "Stanza",
|
38 |
-
"fasttext" = "fastText",
|
39 |
-
"stable-baselines3" = "Stable-Baselines3",
|
40 |
-
"ml-agents" = "Unity ML-Agents",
|
41 |
-
"pythae" = "Pythae",
|
42 |
-
"mindspore" = "MindSpore",
|
43 |
-
"unity-sentis" = "Unity Sentis",
|
44 |
-
}
|
45 |
|
46 |
-
export
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
export const ALL_MODEL_LIBRARY_KEYS = Object.keys(
|
49 |
|
50 |
-
export const ALL_DISPLAY_MODEL_LIBRARY_KEYS =
|
51 |
-
(
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import * as snippets from "./model-libraries-snippets";
|
2 |
+
import type { ModelData } from "./model-data";
|
3 |
+
import type { ElasticBoolQueryFilter } from "./model-libraries-downloads";
|
4 |
+
|
5 |
+
/**
|
6 |
+
* Elements configurable by a model library.
|
7 |
+
*/
|
8 |
+
export interface LibraryUiElement {
|
9 |
+
/**
|
10 |
+
* Pretty name of the library.
|
11 |
+
* displayed in tags, and on the main
|
12 |
+
* call-to-action button on the model page.
|
13 |
+
*/
|
14 |
+
prettyLabel: string;
|
15 |
+
/**
|
16 |
+
* Repo name of the library's (usually on GitHub) code repo
|
17 |
+
*/
|
18 |
+
repoName: string;
|
19 |
+
/**
|
20 |
+
* URL to library's (usually on GitHub) code repo
|
21 |
+
*/
|
22 |
+
repoUrl: string;
|
23 |
+
/**
|
24 |
+
* URL to library's docs
|
25 |
+
*/
|
26 |
+
docsUrl?: string;
|
27 |
+
/**
|
28 |
+
* Code snippet(s) displayed on model page
|
29 |
+
*/
|
30 |
+
snippets?: (model: ModelData) => string[];
|
31 |
+
/**
|
32 |
+
* Elastic query used to count this library's model downloads
|
33 |
+
*
|
34 |
+
* By default, those files are counted:
|
35 |
+
* "config.json", "config.yaml", "hyperparams.yaml", "meta.yaml"
|
36 |
+
*/
|
37 |
+
countDownloads?: ElasticBoolQueryFilter;
|
38 |
+
/**
|
39 |
+
* should we display this library in hf.co/models filter
|
40 |
+
* (only for popular libraries with > 100 models)
|
41 |
+
*/
|
42 |
+
filter?: boolean;
|
43 |
+
}
|
44 |
+
|
45 |
/**
|
46 |
* Add your new library here.
|
47 |
*
|
48 |
* This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
|
49 |
+
* (unlike libraries, file formats live in an enum inside the internal codebase.)
|
50 |
+
*
|
51 |
+
* Doc on how to add a library to the Hub:
|
52 |
+
*
|
53 |
+
* https://huggingface.co/docs/hub/models-adding-libraries
|
54 |
+
*
|
55 |
+
* /!\ IMPORTANT
|
56 |
+
*
|
57 |
+
* The key you choose is the tag your models have in their library_name on the Hub.
|
58 |
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
61 |
+
"adapter-transformers": {
|
62 |
+
prettyLabel: "Adapters",
|
63 |
+
repoName: "adapters",
|
64 |
+
repoUrl: "https://github.com/Adapter-Hub/adapters",
|
65 |
+
docsUrl: "https://huggingface.co/docs/hub/adapters",
|
66 |
+
snippets: snippets.adapters,
|
67 |
+
filter: true,
|
68 |
+
countDownloads: {
|
69 |
+
term: { path: "adapter_config.json" },
|
70 |
+
},
|
71 |
+
},
|
72 |
+
allennlp: {
|
73 |
+
prettyLabel: "AllenNLP",
|
74 |
+
repoName: "AllenNLP",
|
75 |
+
repoUrl: "https://github.com/allenai/allennlp",
|
76 |
+
docsUrl: "https://huggingface.co/docs/hub/allennlp",
|
77 |
+
snippets: snippets.allennlp,
|
78 |
+
filter: true,
|
79 |
+
},
|
80 |
+
asteroid: {
|
81 |
+
prettyLabel: "Asteroid",
|
82 |
+
repoName: "Asteroid",
|
83 |
+
repoUrl: "https://github.com/asteroid-team/asteroid",
|
84 |
+
docsUrl: "https://huggingface.co/docs/hub/asteroid",
|
85 |
+
snippets: snippets.asteroid,
|
86 |
+
filter: true,
|
87 |
+
countDownloads: {
|
88 |
+
term: { path: "pytorch_model.bin" },
|
89 |
+
},
|
90 |
+
},
|
91 |
+
bertopic: {
|
92 |
+
prettyLabel: "BERTopic",
|
93 |
+
repoName: "BERTopic",
|
94 |
+
repoUrl: "https://github.com/MaartenGr/BERTopic",
|
95 |
+
snippets: snippets.bertopic,
|
96 |
+
filter: true,
|
97 |
+
},
|
98 |
+
diffusers: {
|
99 |
+
prettyLabel: "Diffusers",
|
100 |
+
repoName: "π€/diffusers",
|
101 |
+
repoUrl: "https://github.com/huggingface/diffusers",
|
102 |
+
docsUrl: "https://huggingface.co/docs/hub/diffusers",
|
103 |
+
snippets: snippets.diffusers,
|
104 |
+
filter: true,
|
105 |
+
/// diffusers has its own more complex "countDownloads" query
|
106 |
+
},
|
107 |
+
doctr: {
|
108 |
+
prettyLabel: "docTR",
|
109 |
+
repoName: "doctr",
|
110 |
+
repoUrl: "https://github.com/mindee/doctr",
|
111 |
+
},
|
112 |
+
espnet: {
|
113 |
+
prettyLabel: "ESPnet",
|
114 |
+
repoName: "ESPnet",
|
115 |
+
repoUrl: "https://github.com/espnet/espnet",
|
116 |
+
docsUrl: "https://huggingface.co/docs/hub/espnet",
|
117 |
+
snippets: snippets.espnet,
|
118 |
+
filter: true,
|
119 |
+
},
|
120 |
+
fairseq: {
|
121 |
+
prettyLabel: "Fairseq",
|
122 |
+
repoName: "fairseq",
|
123 |
+
repoUrl: "https://github.com/pytorch/fairseq",
|
124 |
+
snippets: snippets.fairseq,
|
125 |
+
filter: true,
|
126 |
+
},
|
127 |
+
fastai: {
|
128 |
+
prettyLabel: "fastai",
|
129 |
+
repoName: "fastai",
|
130 |
+
repoUrl: "https://github.com/fastai/fastai",
|
131 |
+
docsUrl: "https://huggingface.co/docs/hub/fastai",
|
132 |
+
snippets: snippets.fastai,
|
133 |
+
filter: true,
|
134 |
+
},
|
135 |
+
fasttext: {
|
136 |
+
prettyLabel: "fastText",
|
137 |
+
repoName: "fastText",
|
138 |
+
repoUrl: "https://fasttext.cc/",
|
139 |
+
snippets: snippets.fasttext,
|
140 |
+
filter: true,
|
141 |
+
},
|
142 |
+
flair: {
|
143 |
+
prettyLabel: "Flair",
|
144 |
+
repoName: "Flair",
|
145 |
+
repoUrl: "https://github.com/flairNLP/flair",
|
146 |
+
docsUrl: "https://huggingface.co/docs/hub/flair",
|
147 |
+
snippets: snippets.flair,
|
148 |
+
filter: true,
|
149 |
+
countDownloads: {
|
150 |
+
term: { path: "pytorch_model.bin" },
|
151 |
+
},
|
152 |
+
},
|
153 |
+
keras: {
|
154 |
+
prettyLabel: "Keras",
|
155 |
+
repoName: "Keras",
|
156 |
+
repoUrl: "https://github.com/keras-team/keras",
|
157 |
+
docsUrl: "https://huggingface.co/docs/hub/keras",
|
158 |
+
snippets: snippets.keras,
|
159 |
+
filter: true,
|
160 |
+
countDownloads: { term: { path: "saved_model.pb" } },
|
161 |
+
},
|
162 |
+
k2: {
|
163 |
+
prettyLabel: "K2",
|
164 |
+
repoName: "k2",
|
165 |
+
repoUrl: "https://github.com/k2-fsa/k2",
|
166 |
+
},
|
167 |
+
mindspore: {
|
168 |
+
prettyLabel: "MindSpore",
|
169 |
+
repoName: "mindspore",
|
170 |
+
repoUrl: "https://github.com/mindspore-ai/mindspore",
|
171 |
+
},
|
172 |
+
"ml-agents": {
|
173 |
+
prettyLabel: "ml-agents",
|
174 |
+
repoName: "ml-agents",
|
175 |
+
repoUrl: "https://github.com/Unity-Technologies/ml-agents",
|
176 |
+
docsUrl: "https://huggingface.co/docs/hub/ml-agents",
|
177 |
+
snippets: snippets.mlAgents,
|
178 |
+
filter: true,
|
179 |
+
countDownloads: { wildcard: { path: "*.onnx" } },
|
180 |
+
},
|
181 |
+
mlx: {
|
182 |
+
prettyLabel: "MLX",
|
183 |
+
repoName: "MLX",
|
184 |
+
repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main",
|
185 |
+
snippets: snippets.mlx,
|
186 |
+
filter: true,
|
187 |
+
},
|
188 |
+
nemo: {
|
189 |
+
prettyLabel: "NeMo",
|
190 |
+
repoName: "NeMo",
|
191 |
+
repoUrl: "https://github.com/NVIDIA/NeMo",
|
192 |
+
snippets: snippets.nemo,
|
193 |
+
filter: true,
|
194 |
+
countDownloads: { wildcard: { path: "*.nemo" } },
|
195 |
+
},
|
196 |
+
open_clip: {
|
197 |
+
prettyLabel: "OpenCLIP",
|
198 |
+
repoName: "OpenCLIP",
|
199 |
+
repoUrl: "https://github.com/mlfoundations/open_clip",
|
200 |
+
snippets: snippets.open_clip,
|
201 |
+
filter: true,
|
202 |
+
countDownloads: { wildcard: { path: "*pytorch_model.bin" } },
|
203 |
+
},
|
204 |
+
paddlenlp: {
|
205 |
+
prettyLabel: "paddlenlp",
|
206 |
+
repoName: "PaddleNLP",
|
207 |
+
repoUrl: "https://github.com/PaddlePaddle/PaddleNLP",
|
208 |
+
docsUrl: "https://huggingface.co/docs/hub/paddlenlp",
|
209 |
+
snippets: snippets.paddlenlp,
|
210 |
+
filter: true,
|
211 |
+
countDownloads: {
|
212 |
+
term: { path: "model_config.json" },
|
213 |
+
},
|
214 |
+
},
|
215 |
+
peft: {
|
216 |
+
prettyLabel: "PEFT",
|
217 |
+
repoName: "PEFT",
|
218 |
+
repoUrl: "https://github.com/huggingface/peft",
|
219 |
+
snippets: snippets.peft,
|
220 |
+
filter: true,
|
221 |
+
countDownloads: {
|
222 |
+
term: { path: "adapter_config.json" },
|
223 |
+
},
|
224 |
+
},
|
225 |
+
"pyannote-audio": {
|
226 |
+
prettyLabel: "pyannote.audio",
|
227 |
+
repoName: "pyannote-audio",
|
228 |
+
repoUrl: "https://github.com/pyannote/pyannote-audio",
|
229 |
+
snippets: snippets.pyannote_audio,
|
230 |
+
filter: true,
|
231 |
+
},
|
232 |
+
pythae: {
|
233 |
+
prettyLabel: "pythae",
|
234 |
+
repoName: "pythae",
|
235 |
+
repoUrl: "https://github.com/clementchadebec/benchmark_VAE",
|
236 |
+
snippets: snippets.pythae,
|
237 |
+
filter: true,
|
238 |
+
},
|
239 |
+
"sample-factory": {
|
240 |
+
prettyLabel: "sample-factory",
|
241 |
+
repoName: "sample-factory",
|
242 |
+
repoUrl: "https://github.com/alex-petrenko/sample-factory",
|
243 |
+
docsUrl: "https://huggingface.co/docs/hub/sample-factory",
|
244 |
+
snippets: snippets.sampleFactory,
|
245 |
+
filter: true,
|
246 |
+
countDownloads: { term: { path: "cfg.json" } },
|
247 |
+
},
|
248 |
+
"sentence-transformers": {
|
249 |
+
prettyLabel: "sentence-transformers",
|
250 |
+
repoName: "sentence-transformers",
|
251 |
+
repoUrl: "https://github.com/UKPLab/sentence-transformers",
|
252 |
+
docsUrl: "https://huggingface.co/docs/hub/sentence-transformers",
|
253 |
+
snippets: snippets.sentenceTransformers,
|
254 |
+
filter: true,
|
255 |
+
},
|
256 |
+
setfit: {
|
257 |
+
prettyLabel: "setfit",
|
258 |
+
repoName: "setfit",
|
259 |
+
repoUrl: "https://github.com/huggingface/setfit",
|
260 |
+
docsUrl: "https://huggingface.co/docs/hub/setfit",
|
261 |
+
snippets: snippets.setfit,
|
262 |
+
filter: true,
|
263 |
+
},
|
264 |
+
sklearn: {
|
265 |
+
prettyLabel: "Scikit-learn",
|
266 |
+
repoName: "Scikit-learn",
|
267 |
+
repoUrl: "https://github.com/scikit-learn/scikit-learn",
|
268 |
+
snippets: snippets.sklearn,
|
269 |
+
filter: true,
|
270 |
+
countDownloads: {
|
271 |
+
term: { path: "sklearn_model.joblib" },
|
272 |
+
},
|
273 |
+
},
|
274 |
+
spacy: {
|
275 |
+
prettyLabel: "spaCy",
|
276 |
+
repoName: "spaCy",
|
277 |
+
repoUrl: "https://github.com/explosion/spaCy",
|
278 |
+
docsUrl: "https://huggingface.co/docs/hub/spacy",
|
279 |
+
snippets: snippets.spacy,
|
280 |
+
filter: true,
|
281 |
+
countDownloads: {
|
282 |
+
wildcard: { path: "*.whl" },
|
283 |
+
},
|
284 |
+
},
|
285 |
+
"span-marker": {
|
286 |
+
prettyLabel: "SpanMarker",
|
287 |
+
repoName: "SpanMarkerNER",
|
288 |
+
repoUrl: "https://github.com/tomaarsen/SpanMarkerNER",
|
289 |
+
docsUrl: "https://huggingface.co/docs/hub/span_marker",
|
290 |
+
snippets: snippets.span_marker,
|
291 |
+
filter: true,
|
292 |
+
},
|
293 |
+
speechbrain: {
|
294 |
+
prettyLabel: "speechbrain",
|
295 |
+
repoName: "speechbrain",
|
296 |
+
repoUrl: "https://github.com/speechbrain/speechbrain",
|
297 |
+
docsUrl: "https://huggingface.co/docs/hub/speechbrain",
|
298 |
+
snippets: snippets.speechbrain,
|
299 |
+
filter: true,
|
300 |
+
countDownloads: {
|
301 |
+
term: { path: "hyperparams.yaml" },
|
302 |
+
},
|
303 |
+
},
|
304 |
+
"stable-baselines3": {
|
305 |
+
prettyLabel: "stable-baselines3",
|
306 |
+
repoName: "stable-baselines3",
|
307 |
+
repoUrl: "https://github.com/huggingface/huggingface_sb3",
|
308 |
+
docsUrl: "https://huggingface.co/docs/hub/stable-baselines3",
|
309 |
+
snippets: snippets.stableBaselines3,
|
310 |
+
filter: true,
|
311 |
+
countDownloads: {
|
312 |
+
wildcard: { path: "*.zip" },
|
313 |
+
},
|
314 |
+
},
|
315 |
+
stanza: {
|
316 |
+
prettyLabel: "Stanza",
|
317 |
+
repoName: "stanza",
|
318 |
+
repoUrl: "https://github.com/stanfordnlp/stanza",
|
319 |
+
docsUrl: "https://huggingface.co/docs/hub/stanza",
|
320 |
+
snippets: snippets.stanza,
|
321 |
+
filter: true,
|
322 |
+
countDownloads: {
|
323 |
+
term: { path: "models/default.zip" },
|
324 |
+
},
|
325 |
+
},
|
326 |
+
tensorflowtts: {
|
327 |
+
prettyLabel: "TensorFlowTTS",
|
328 |
+
repoName: "TensorFlowTTS",
|
329 |
+
repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
|
330 |
+
snippets: snippets.tensorflowtts,
|
331 |
+
},
|
332 |
+
timm: {
|
333 |
+
prettyLabel: "timm",
|
334 |
+
repoName: "pytorch-image-models",
|
335 |
+
repoUrl: "https://github.com/rwightman/pytorch-image-models",
|
336 |
+
docsUrl: "https://huggingface.co/docs/hub/timm",
|
337 |
+
snippets: snippets.timm,
|
338 |
+
filter: true,
|
339 |
+
countDownloads: {
|
340 |
+
terms: { path: ["pytorch_model.bin", "model.safetensors"] },
|
341 |
+
},
|
342 |
+
},
|
343 |
+
transformers: {
|
344 |
+
prettyLabel: "Transformers",
|
345 |
+
repoName: "π€/transformers",
|
346 |
+
repoUrl: "https://github.com/huggingface/transformers",
|
347 |
+
docsUrl: "https://huggingface.co/docs/hub/transformers",
|
348 |
+
snippets: snippets.transformers,
|
349 |
+
filter: true,
|
350 |
+
},
|
351 |
+
"transformers.js": {
|
352 |
+
prettyLabel: "Transformers.js",
|
353 |
+
repoName: "transformers.js",
|
354 |
+
repoUrl: "https://github.com/xenova/transformers.js",
|
355 |
+
docsUrl: "https://huggingface.co/docs/hub/transformers-js",
|
356 |
+
snippets: snippets.transformersJS,
|
357 |
+
filter: true,
|
358 |
+
},
|
359 |
+
"unity-sentis": {
|
360 |
+
prettyLabel: "unity-sentis",
|
361 |
+
repoName: "unity-sentis",
|
362 |
+
repoUrl: "https://github.com/Unity-Technologies/sentis-samples",
|
363 |
+
snippets: snippets.sentis,
|
364 |
+
filter: true,
|
365 |
+
countDownloads: {
|
366 |
+
wildcard: { path: "*.sentis" },
|
367 |
+
},
|
368 |
+
},
|
369 |
+
} satisfies Record<string, LibraryUiElement>;
|
370 |
+
|
371 |
+
export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
372 |
|
373 |
+
export const ALL_MODEL_LIBRARY_KEYS = Object.keys(MODEL_LIBRARIES_UI_ELEMENTS) as ModelLibraryKey[];
|
374 |
|
375 |
+
export const ALL_DISPLAY_MODEL_LIBRARY_KEYS = (
|
376 |
+
Object.entries(MODEL_LIBRARIES_UI_ELEMENTS as Record<ModelLibraryKey, LibraryUiElement>) as [
|
377 |
+
ModelLibraryKey,
|
378 |
+
LibraryUiElement,
|
379 |
+
][]
|
380 |
+
)
|
381 |
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
382 |
+
.filter(([_, v]) => v.filter)
|
383 |
+
.map(([k]) => k);
|
packages/tasks/src/pipelines.ts
CHANGED
@@ -225,20 +225,9 @@ export const PIPELINE_DATA = {
|
|
225 |
modality: "nlp",
|
226 |
color: "indigo",
|
227 |
},
|
228 |
-
conversational: {
|
229 |
-
name: "Conversational",
|
230 |
-
subtasks: [
|
231 |
-
{
|
232 |
-
type: "dialogue-generation",
|
233 |
-
name: "Dialogue Generation",
|
234 |
-
},
|
235 |
-
],
|
236 |
-
modality: "nlp",
|
237 |
-
color: "green",
|
238 |
-
},
|
239 |
"feature-extraction": {
|
240 |
name: "Feature Extraction",
|
241 |
-
modality: "
|
242 |
color: "red",
|
243 |
},
|
244 |
"text-generation": {
|
@@ -248,6 +237,14 @@ export const PIPELINE_DATA = {
|
|
248 |
type: "dialogue-modeling",
|
249 |
name: "Dialogue Modeling",
|
250 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
{
|
252 |
type: "language-modeling",
|
253 |
name: "Language Modeling",
|
@@ -419,7 +416,7 @@ export const PIPELINE_DATA = {
|
|
419 |
},
|
420 |
"text-to-image": {
|
421 |
name: "Text-to-Image",
|
422 |
-
modality: "
|
423 |
color: "yellow",
|
424 |
},
|
425 |
"image-to-text": {
|
@@ -430,7 +427,7 @@ export const PIPELINE_DATA = {
|
|
430 |
name: "Image Captioning",
|
431 |
},
|
432 |
],
|
433 |
-
modality: "
|
434 |
color: "red",
|
435 |
},
|
436 |
"image-to-image": {
|
@@ -454,7 +451,7 @@ export const PIPELINE_DATA = {
|
|
454 |
},
|
455 |
"image-to-video": {
|
456 |
name: "Image-to-Video",
|
457 |
-
modality: "
|
458 |
color: "indigo",
|
459 |
},
|
460 |
"unconditional-image-generation": {
|
@@ -589,9 +586,15 @@ export const PIPELINE_DATA = {
|
|
589 |
},
|
590 |
"text-to-video": {
|
591 |
name: "Text-to-Video",
|
592 |
-
modality: "
|
593 |
color: "green",
|
594 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
595 |
"visual-question-answering": {
|
596 |
name: "Visual Question Answering",
|
597 |
subtasks: [
|
@@ -622,7 +625,7 @@ export const PIPELINE_DATA = {
|
|
622 |
},
|
623 |
"graph-ml": {
|
624 |
name: "Graph Machine Learning",
|
625 |
-
modality: "
|
626 |
color: "green",
|
627 |
},
|
628 |
"mask-generation": {
|
@@ -637,14 +640,19 @@ export const PIPELINE_DATA = {
|
|
637 |
},
|
638 |
"text-to-3d": {
|
639 |
name: "Text-to-3D",
|
640 |
-
modality: "
|
641 |
color: "yellow",
|
642 |
},
|
643 |
"image-to-3d": {
|
644 |
name: "Image-to-3D",
|
645 |
-
modality: "
|
646 |
color: "green",
|
647 |
},
|
|
|
|
|
|
|
|
|
|
|
648 |
other: {
|
649 |
name: "Other",
|
650 |
modality: "other",
|
@@ -656,6 +664,8 @@ export const PIPELINE_DATA = {
|
|
656 |
|
657 |
export type PipelineType = keyof typeof PIPELINE_DATA;
|
658 |
|
|
|
|
|
659 |
export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
|
660 |
|
661 |
export const SUBTASK_TYPES = Object.values(PIPELINE_DATA)
|
|
|
225 |
modality: "nlp",
|
226 |
color: "indigo",
|
227 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
"feature-extraction": {
|
229 |
name: "Feature Extraction",
|
230 |
+
modality: "nlp",
|
231 |
color: "red",
|
232 |
},
|
233 |
"text-generation": {
|
|
|
237 |
type: "dialogue-modeling",
|
238 |
name: "Dialogue Modeling",
|
239 |
},
|
240 |
+
{
|
241 |
+
type: "dialogue-generation",
|
242 |
+
name: "Dialogue Generation",
|
243 |
+
},
|
244 |
+
{
|
245 |
+
type: "conversational",
|
246 |
+
name: "Conversational",
|
247 |
+
},
|
248 |
{
|
249 |
type: "language-modeling",
|
250 |
name: "Language Modeling",
|
|
|
416 |
},
|
417 |
"text-to-image": {
|
418 |
name: "Text-to-Image",
|
419 |
+
modality: "cv",
|
420 |
color: "yellow",
|
421 |
},
|
422 |
"image-to-text": {
|
|
|
427 |
name: "Image Captioning",
|
428 |
},
|
429 |
],
|
430 |
+
modality: "cv",
|
431 |
color: "red",
|
432 |
},
|
433 |
"image-to-image": {
|
|
|
451 |
},
|
452 |
"image-to-video": {
|
453 |
name: "Image-to-Video",
|
454 |
+
modality: "cv",
|
455 |
color: "indigo",
|
456 |
},
|
457 |
"unconditional-image-generation": {
|
|
|
586 |
},
|
587 |
"text-to-video": {
|
588 |
name: "Text-to-Video",
|
589 |
+
modality: "cv",
|
590 |
color: "green",
|
591 |
},
|
592 |
+
"image-text-to-text": {
|
593 |
+
name: "Image + Text to Text (VLLMs)",
|
594 |
+
modality: "multimodal",
|
595 |
+
color: "red",
|
596 |
+
hideInDatasets: true,
|
597 |
+
},
|
598 |
"visual-question-answering": {
|
599 |
name: "Visual Question Answering",
|
600 |
subtasks: [
|
|
|
625 |
},
|
626 |
"graph-ml": {
|
627 |
name: "Graph Machine Learning",
|
628 |
+
modality: "other",
|
629 |
color: "green",
|
630 |
},
|
631 |
"mask-generation": {
|
|
|
640 |
},
|
641 |
"text-to-3d": {
|
642 |
name: "Text-to-3D",
|
643 |
+
modality: "cv",
|
644 |
color: "yellow",
|
645 |
},
|
646 |
"image-to-3d": {
|
647 |
name: "Image-to-3D",
|
648 |
+
modality: "cv",
|
649 |
color: "green",
|
650 |
},
|
651 |
+
"image-feature-extraction": {
|
652 |
+
name: "Image Feature Extraction",
|
653 |
+
modality: "cv",
|
654 |
+
color: "indigo",
|
655 |
+
},
|
656 |
other: {
|
657 |
name: "Other",
|
658 |
modality: "other",
|
|
|
664 |
|
665 |
export type PipelineType = keyof typeof PIPELINE_DATA;
|
666 |
|
667 |
+
export type WidgetType = PipelineType | "conversational";
|
668 |
+
|
669 |
export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
|
670 |
|
671 |
export const SUBTASK_TYPES = Object.values(PIPELINE_DATA)
|
packages/tasks/src/snippets/curl.ts
CHANGED
@@ -34,7 +34,6 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelData, acces
|
|
34 |
"zero-shot-classification": snippetZeroShotClassification,
|
35 |
translation: snippetBasic,
|
36 |
summarization: snippetBasic,
|
37 |
-
conversational: snippetBasic,
|
38 |
"feature-extraction": snippetBasic,
|
39 |
"text-generation": snippetBasic,
|
40 |
"text2text-generation": snippetBasic,
|
|
|
34 |
"zero-shot-classification": snippetZeroShotClassification,
|
35 |
translation: snippetBasic,
|
36 |
summarization: snippetBasic,
|
|
|
37 |
"feature-extraction": snippetBasic,
|
38 |
"text-generation": snippetBasic,
|
39 |
"text2text-generation": snippetBasic,
|
packages/tasks/src/snippets/inputs.ts
CHANGED
@@ -9,13 +9,6 @@ const inputsTranslation = () => `"ΠΠ΅Π½Ρ Π·ΠΎΠ²ΡΡ ΠΠΎΠ»ΡΡΠ³Π°Π½Π³ ΠΈ Ρ
|
|
9 |
const inputsSummarization = () =>
|
10 |
`"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
|
11 |
|
12 |
-
const inputsConversational = () =>
|
13 |
-
`{
|
14 |
-
"past_user_inputs": ["Which movie is the best ?"],
|
15 |
-
"generated_responses": ["It is Die Hard for sure."],
|
16 |
-
"text": "Can you explain why ?"
|
17 |
-
}`;
|
18 |
-
|
19 |
const inputsTableQuestionAnswering = () =>
|
20 |
`{
|
21 |
"query": "How many stars does the transformers repository have?",
|
@@ -96,7 +89,6 @@ const modelInputSnippets: {
|
|
96 |
"audio-to-audio": inputsAudioToAudio,
|
97 |
"audio-classification": inputsAudioClassification,
|
98 |
"automatic-speech-recognition": inputsAutomaticSpeechRecognition,
|
99 |
-
conversational: inputsConversational,
|
100 |
"document-question-answering": inputsVisualQuestionAnswering,
|
101 |
"feature-extraction": inputsFeatureExtraction,
|
102 |
"fill-mask": inputsFillMask,
|
|
|
9 |
const inputsSummarization = () =>
|
10 |
`"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
const inputsTableQuestionAnswering = () =>
|
13 |
`{
|
14 |
"query": "How many stars does the transformers repository have?",
|
|
|
89 |
"audio-to-audio": inputsAudioToAudio,
|
90 |
"audio-classification": inputsAudioClassification,
|
91 |
"automatic-speech-recognition": inputsAutomaticSpeechRecognition,
|
|
|
92 |
"document-question-answering": inputsVisualQuestionAnswering,
|
93 |
"feature-extraction": inputsFeatureExtraction,
|
94 |
"fill-mask": inputsFillMask,
|
packages/tasks/src/snippets/js.ts
CHANGED
@@ -121,7 +121,6 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelData, accessT
|
|
121 |
"zero-shot-classification": snippetZeroShotClassification,
|
122 |
translation: snippetBasic,
|
123 |
summarization: snippetBasic,
|
124 |
-
conversational: snippetBasic,
|
125 |
"feature-extraction": snippetBasic,
|
126 |
"text-generation": snippetBasic,
|
127 |
"text2text-generation": snippetBasic,
|
|
|
121 |
"zero-shot-classification": snippetZeroShotClassification,
|
122 |
translation: snippetBasic,
|
123 |
summarization: snippetBasic,
|
|
|
124 |
"feature-extraction": snippetBasic,
|
125 |
"text-generation": snippetBasic,
|
126 |
"text2text-generation": snippetBasic,
|
packages/tasks/src/snippets/python.ts
CHANGED
@@ -116,7 +116,6 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelData) =>
|
|
116 |
"zero-shot-classification": snippetZeroShotClassification,
|
117 |
translation: snippetBasic,
|
118 |
summarization: snippetBasic,
|
119 |
-
conversational: snippetBasic,
|
120 |
"feature-extraction": snippetBasic,
|
121 |
"text-generation": snippetBasic,
|
122 |
"text2text-generation": snippetBasic,
|
|
|
116 |
"zero-shot-classification": snippetZeroShotClassification,
|
117 |
translation: snippetBasic,
|
118 |
summarization: snippetBasic,
|
|
|
119 |
"feature-extraction": snippetBasic,
|
120 |
"text-generation": snippetBasic,
|
121 |
"text2text-generation": snippetBasic,
|
packages/tasks/src/tasks/conversational/about.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
## Use Cases
|
2 |
-
|
3 |
-
### Chatbot π¬
|
4 |
-
|
5 |
-
Chatbots are used to have conversations instead of providing direct contact with a live human. They are used to provide customer service, sales, and can even be used to play games (see [ELIZA](https://en.wikipedia.org/wiki/ELIZA) from 1966 for one of the earliest examples).
|
6 |
-
|
7 |
-
## Voice Assistants ποΈ
|
8 |
-
|
9 |
-
Conversational response models are used as part of voice assistants to provide appropriate responses to voice based queries.
|
10 |
-
|
11 |
-
## Inference
|
12 |
-
|
13 |
-
You can infer with Conversational models with the π€ Transformers library using the `conversational` pipeline. This pipeline takes a conversation prompt or a list of conversations and generates responses for each prompt. The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task (see https://huggingface.co/models?filter=conversational for a list of updated Conversational models).
|
14 |
-
|
15 |
-
```python
|
16 |
-
from transformers import pipeline, Conversation
|
17 |
-
converse = pipeline("conversational")
|
18 |
-
|
19 |
-
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
|
20 |
-
conversation_2 = Conversation("What's the last book you have read?")
|
21 |
-
converse([conversation_1, conversation_2])
|
22 |
-
|
23 |
-
## Output:
|
24 |
-
## Conversation 1
|
25 |
-
## user >> Going to the movies tonight - any suggestions?
|
26 |
-
## bot >> The Big Lebowski ,
|
27 |
-
## Conversation 2
|
28 |
-
## user >> What's the last book you have read?
|
29 |
-
## bot >> The Last Question
|
30 |
-
```
|
31 |
-
|
32 |
-
You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with conversational models on Hugging Face Hub.
|
33 |
-
|
34 |
-
```javascript
|
35 |
-
import { HfInference } from "@huggingface/inference";
|
36 |
-
|
37 |
-
const inference = new HfInference(HF_TOKEN);
|
38 |
-
await inference.conversational({
|
39 |
-
model: "facebook/blenderbot-400M-distill",
|
40 |
-
inputs: "Going to the movies tonight - any suggestions?",
|
41 |
-
});
|
42 |
-
```
|
43 |
-
|
44 |
-
## Useful Resources
|
45 |
-
|
46 |
-
- Learn how ChatGPT and InstructGPT work in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf)
|
47 |
-
- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM)
|
48 |
-
- [A guide on Dialog Agents](https://huggingface.co/blog/dialog-agents)
|
49 |
-
|
50 |
-
This page was made possible thanks to the efforts of [Viraat Aryabumi](https://huggingface.co/viraat).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
packages/tasks/src/tasks/conversational/data.ts
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import type { TaskDataCustom } from "..";
|
2 |
-
|
3 |
-
const taskData: TaskDataCustom = {
|
4 |
-
datasets: [
|
5 |
-
{
|
6 |
-
description:
|
7 |
-
"A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.",
|
8 |
-
id: "blended_skill_talk",
|
9 |
-
},
|
10 |
-
{
|
11 |
-
description:
|
12 |
-
"ConvAI is a dataset of human-to-bot conversations labeled for quality. This data can be used to train a metric for evaluating dialogue systems",
|
13 |
-
id: "conv_ai_2",
|
14 |
-
},
|
15 |
-
{
|
16 |
-
description: "EmpatheticDialogues, is a dataset of 25k conversations grounded in emotional situations",
|
17 |
-
id: "empathetic_dialogues",
|
18 |
-
},
|
19 |
-
],
|
20 |
-
demo: {
|
21 |
-
inputs: [
|
22 |
-
{
|
23 |
-
label: "Input",
|
24 |
-
content: "Hey my name is Julien! How are you?",
|
25 |
-
type: "text",
|
26 |
-
},
|
27 |
-
],
|
28 |
-
outputs: [
|
29 |
-
{
|
30 |
-
label: "Answer",
|
31 |
-
content: "Hi Julien! My name is Julia! I am well.",
|
32 |
-
type: "text",
|
33 |
-
},
|
34 |
-
],
|
35 |
-
},
|
36 |
-
metrics: [
|
37 |
-
{
|
38 |
-
description:
|
39 |
-
"BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called βn-gramsβ. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all",
|
40 |
-
id: "bleu",
|
41 |
-
},
|
42 |
-
],
|
43 |
-
models: [
|
44 |
-
{
|
45 |
-
description: "A faster and smaller model than the famous BERT model.",
|
46 |
-
id: "facebook/blenderbot-400M-distill",
|
47 |
-
},
|
48 |
-
{
|
49 |
-
description:
|
50 |
-
"DialoGPT is a large-scale pretrained dialogue response generation model for multiturn conversations.",
|
51 |
-
id: "microsoft/DialoGPT-large",
|
52 |
-
},
|
53 |
-
],
|
54 |
-
spaces: [
|
55 |
-
{
|
56 |
-
description: "A chatbot based on Blender model.",
|
57 |
-
id: "EXFINITE/BlenderBot-UI",
|
58 |
-
},
|
59 |
-
],
|
60 |
-
summary:
|
61 |
-
"Conversational response modelling is the task of generating conversational text that is relevant, coherent and knowledgable given a prompt. These models have applications in chatbots, and as a part of voice assistants",
|
62 |
-
widgetModels: ["facebook/blenderbot-400M-distill"],
|
63 |
-
youtubeId: "",
|
64 |
-
};
|
65 |
-
|
66 |
-
export default taskData;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
packages/tasks/src/tasks/index.ts
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
import {
|
|
|
2 |
|
3 |
import audioClassification from "./audio-classification/data";
|
4 |
import audioToAudio from "./audio-to-audio/data";
|
5 |
import automaticSpeechRecognition from "./automatic-speech-recognition/data";
|
6 |
-
import conversational from "./conversational/data";
|
7 |
import documentQuestionAnswering from "./document-question-answering/data";
|
8 |
import featureExtraction from "./feature-extraction/data";
|
9 |
import fillMask from "./fill-mask/data";
|
@@ -45,14 +45,15 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
|
45 |
"audio-classification": ["speechbrain", "transformers", "transformers.js"],
|
46 |
"audio-to-audio": ["asteroid", "speechbrain"],
|
47 |
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
48 |
-
conversational: ["transformers"],
|
49 |
"depth-estimation": ["transformers", "transformers.js"],
|
50 |
"document-question-answering": ["transformers", "transformers.js"],
|
51 |
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
52 |
"fill-mask": ["transformers", "transformers.js"],
|
53 |
"graph-ml": ["transformers"],
|
54 |
"image-classification": ["keras", "timm", "transformers", "transformers.js"],
|
|
|
55 |
"image-segmentation": ["transformers", "transformers.js"],
|
|
|
56 |
"image-to-image": ["diffusers", "transformers", "transformers.js"],
|
57 |
"image-to-text": ["transformers", "transformers.js"],
|
58 |
"image-to-video": ["diffusers"],
|
@@ -122,7 +123,6 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
|
|
122 |
"audio-classification": getData("audio-classification", audioClassification),
|
123 |
"audio-to-audio": getData("audio-to-audio", audioToAudio),
|
124 |
"automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition),
|
125 |
-
conversational: getData("conversational", conversational),
|
126 |
"depth-estimation": getData("depth-estimation", depthEstimation),
|
127 |
"document-question-answering": getData("document-question-answering", documentQuestionAnswering),
|
128 |
"feature-extraction": getData("feature-extraction", featureExtraction),
|
@@ -130,6 +130,7 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
|
|
130 |
"graph-ml": undefined,
|
131 |
"image-classification": getData("image-classification", imageClassification),
|
132 |
"image-segmentation": getData("image-segmentation", imageSegmentation),
|
|
|
133 |
"image-to-image": getData("image-to-image", imageToImage),
|
134 |
"image-to-text": getData("image-to-text", imageToText),
|
135 |
"image-to-video": undefined,
|
@@ -167,6 +168,7 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
|
|
167 |
"zero-shot-object-detection": getData("zero-shot-object-detection", zeroShotObjectDetection),
|
168 |
"text-to-3d": getData("text-to-3d", placeholder),
|
169 |
"image-to-3d": getData("image-to-3d", placeholder),
|
|
|
170 |
} as const;
|
171 |
|
172 |
export interface ExampleRepo {
|
|
|
1 |
+
import type { PipelineType } from "../pipelines";
|
2 |
+
import { PIPELINE_DATA } from "../pipelines";
|
3 |
|
4 |
import audioClassification from "./audio-classification/data";
|
5 |
import audioToAudio from "./audio-to-audio/data";
|
6 |
import automaticSpeechRecognition from "./automatic-speech-recognition/data";
|
|
|
7 |
import documentQuestionAnswering from "./document-question-answering/data";
|
8 |
import featureExtraction from "./feature-extraction/data";
|
9 |
import fillMask from "./fill-mask/data";
|
|
|
45 |
"audio-classification": ["speechbrain", "transformers", "transformers.js"],
|
46 |
"audio-to-audio": ["asteroid", "speechbrain"],
|
47 |
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
|
|
48 |
"depth-estimation": ["transformers", "transformers.js"],
|
49 |
"document-question-answering": ["transformers", "transformers.js"],
|
50 |
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
51 |
"fill-mask": ["transformers", "transformers.js"],
|
52 |
"graph-ml": ["transformers"],
|
53 |
"image-classification": ["keras", "timm", "transformers", "transformers.js"],
|
54 |
+
"image-feature-extraction": ["timm", "transformers"],
|
55 |
"image-segmentation": ["transformers", "transformers.js"],
|
56 |
+
"image-text-to-text": ["transformers"],
|
57 |
"image-to-image": ["diffusers", "transformers", "transformers.js"],
|
58 |
"image-to-text": ["transformers", "transformers.js"],
|
59 |
"image-to-video": ["diffusers"],
|
|
|
123 |
"audio-classification": getData("audio-classification", audioClassification),
|
124 |
"audio-to-audio": getData("audio-to-audio", audioToAudio),
|
125 |
"automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition),
|
|
|
126 |
"depth-estimation": getData("depth-estimation", depthEstimation),
|
127 |
"document-question-answering": getData("document-question-answering", documentQuestionAnswering),
|
128 |
"feature-extraction": getData("feature-extraction", featureExtraction),
|
|
|
130 |
"graph-ml": undefined,
|
131 |
"image-classification": getData("image-classification", imageClassification),
|
132 |
"image-segmentation": getData("image-segmentation", imageSegmentation),
|
133 |
+
"image-text-to-text": undefined,
|
134 |
"image-to-image": getData("image-to-image", imageToImage),
|
135 |
"image-to-text": getData("image-to-text", imageToText),
|
136 |
"image-to-video": undefined,
|
|
|
168 |
"zero-shot-object-detection": getData("zero-shot-object-detection", zeroShotObjectDetection),
|
169 |
"text-to-3d": getData("text-to-3d", placeholder),
|
170 |
"image-to-3d": getData("image-to-3d", placeholder),
|
171 |
+
"image-feature-extraction": getData("image-feature-extraction", placeholder),
|
172 |
} as const;
|
173 |
|
174 |
export interface ExampleRepo {
|
packages/tasks/src/tokenizer-data.ts
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export const SPECIAL_TOKENS_ATTRIBUTES = [
|
2 |
+
"bos_token",
|
3 |
+
"eos_token",
|
4 |
+
"unk_token",
|
5 |
+
"sep_token",
|
6 |
+
"pad_token",
|
7 |
+
"cls_token",
|
8 |
+
"mask_token",
|
9 |
+
// additional_special_tokens (TODO)
|
10 |
+
] as const;
|
11 |
+
|
12 |
+
/**
|
13 |
+
* Public interface for a tokenizer's special tokens mapping
|
14 |
+
*/
|
15 |
+
export type SpecialTokensMap = {
|
16 |
+
[key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string;
|
17 |
+
};
|
18 |
+
/**
|
19 |
+
* Public interface for tokenizer config
|
20 |
+
*/
|
21 |
+
export interface TokenizerConfig extends SpecialTokensMap {
|
22 |
+
use_default_system_prompt?: boolean;
|
23 |
+
chat_template?: string;
|
24 |
+
}
|
packages/widgets/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"name": "@huggingface/widgets",
|
3 |
"packageManager": "pnpm@8.10.5",
|
4 |
-
"version": "0.1.
|
5 |
"publishConfig": {
|
6 |
"access": "public"
|
7 |
},
|
@@ -45,7 +45,8 @@
|
|
45 |
"static/audioProcessor.js"
|
46 |
],
|
47 |
"dependencies": {
|
48 |
-
"@huggingface/tasks": "workspace:^"
|
|
|
49 |
},
|
50 |
"peerDependencies": {
|
51 |
"svelte": "^3.59.2"
|
|
|
1 |
{
|
2 |
"name": "@huggingface/widgets",
|
3 |
"packageManager": "pnpm@8.10.5",
|
4 |
+
"version": "0.1.4",
|
5 |
"publishConfig": {
|
6 |
"access": "public"
|
7 |
},
|
|
|
45 |
"static/audioProcessor.js"
|
46 |
],
|
47 |
"dependencies": {
|
48 |
+
"@huggingface/tasks": "workspace:^",
|
49 |
+
"@huggingface/jinja": "workspace:^"
|
50 |
},
|
51 |
"peerDependencies": {
|
52 |
"svelte": "^3.59.2"
|
packages/widgets/pnpm-lock.yaml
CHANGED
@@ -5,6 +5,9 @@ settings:
|
|
5 |
excludeLinksFromLockfile: false
|
6 |
|
7 |
dependencies:
|
|
|
|
|
|
|
8 |
'@huggingface/tasks':
|
9 |
specifier: workspace:^
|
10 |
version: link:../tasks
|
|
|
5 |
excludeLinksFromLockfile: false
|
6 |
|
7 |
dependencies:
|
8 |
+
'@huggingface/jinja':
|
9 |
+
specifier: workspace:^
|
10 |
+
version: link:../jinja
|
11 |
'@huggingface/tasks':
|
12 |
specifier: workspace:^
|
13 |
version: link:../tasks
|
packages/widgets/src/lib/components/Icons/IconImageAndTextToText.svelte
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let classNames = "";
|
3 |
+
</script>
|
4 |
+
|
5 |
+
<svg
|
6 |
+
class={classNames}
|
7 |
+
xmlns="http://www.w3.org/2000/svg"
|
8 |
+
xmlns:xlink="http://www.w3.org/1999/xlink"
|
9 |
+
aria-hidden="true"
|
10 |
+
role="img"
|
11 |
+
width="1em"
|
12 |
+
height="1em"
|
13 |
+
preserveAspectRatio="xMidYMid meet"
|
14 |
+
viewBox="0 0 32 32"
|
15 |
+
>
|
16 |
+
<path
|
17 |
+
d="M29.707 19.293l-3-3a1 1 0 0 0-1.414 0L16 25.586V30h4.414l9.293-9.293a1 1 0 0 0 0-1.414zM19.586 28H18v-1.586l5-5L24.586 23zM26 21.586L24.414 20L26 18.414L27.586 20z"
|
18 |
+
fill="currentColor"
|
19 |
+
/>
|
20 |
+
<path
|
21 |
+
d="M20 13v-2h-2.142a3.94 3.94 0 0 0-.425-1.019l1.517-1.517l-1.414-1.414l-1.517 1.517A3.944 3.944 0 0 0 15 8.142V6h-2v2.142a3.944 3.944 0 0 0-1.019.425L10.464 7.05L9.05 8.464l1.517 1.517A3.94 3.94 0 0 0 10.142 11H8v2h2.142a3.94 3.94 0 0 0 .425 1.019L9.05 15.536l1.414 1.414l1.517-1.517a3.944 3.944 0 0 0 1.019.425V18h2v-2.142a3.944 3.944 0 0 0 1.019-.425l1.517 1.517l1.414-1.414l-1.517-1.517A3.94 3.94 0 0 0 17.858 13zm-6 1a2 2 0 1 1 2-2a2.002 2.002 0 0 1-2 2z"
|
22 |
+
fill="currentColor"
|
23 |
+
/>
|
24 |
+
<path
|
25 |
+
d="M12 30H6a2.002 2.002 0 0 1-2-2V4a2.002 2.002 0 0 1 2-2h16a2.002 2.002 0 0 1 2 2v10h-2V4H6v24h6z"
|
26 |
+
fill="currentColor"
|
27 |
+
/>
|
28 |
+
</svg>
|
packages/widgets/src/lib/components/Icons/IconImageFeatureExtraction.svelte
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let classNames = "";
|
3 |
+
</script>
|
4 |
+
|
5 |
+
<svg
|
6 |
+
class={classNames}
|
7 |
+
xmlns="http://www.w3.org/2000/svg"
|
8 |
+
xmlns:xlink="http://www.w3.org/1999/xlink"
|
9 |
+
aria-hidden="true"
|
10 |
+
fill="currentColor"
|
11 |
+
focusable="false"
|
12 |
+
role="img"
|
13 |
+
width="1em"
|
14 |
+
height="1em"
|
15 |
+
preserveAspectRatio="xMidYMid meet"
|
16 |
+
viewBox="0 0 24 24"
|
17 |
+
>
|
18 |
+
<path
|
19 |
+
fill="currentColor"
|
20 |
+
d="M5 21q-.825 0-1.412-.587T3 19V5q0-.825.588-1.412T5 3h5v2H5v14h14v-5.35l2 2V19q0 .825-.587 1.413T19 21H5Zm1-4l3-4l2.25 3l3-4L18 17H6Zm15.55-3.6l-3.1-3.1q-.525.35-1.125.525T16.05 11q-1.85 0-3.15-1.312T11.6 6.5q0-1.875 1.313-3.187T16.1 2q1.875 0 3.188 1.313T20.6 6.5q0 .675-.2 1.3t-.5 1.15L22.95 12l-1.4 1.4ZM16.1 9q1.05 0 1.775-.725T18.6 6.5q0-1.05-.725-1.775T16.1 4q-1.05 0-1.775.725T13.6 6.5q0 1.05.725 1.775T16.1 9Z"
|
21 |
+
/>
|
22 |
+
</svg>
|
packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte
CHANGED
@@ -26,7 +26,7 @@
|
|
26 |
import VisualQuestionAnsweringWidget from "./widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte";
|
27 |
import ZeroShotClassificationWidget from "./widgets/ZeroShotClassificationWidget/ZeroShotClassificationWidget.svelte";
|
28 |
import ZeroShotImageClassificationWidget from "./widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte";
|
29 |
-
import type {
|
30 |
import WidgetInfo from "./shared/WidgetInfo/WidgetInfo.svelte";
|
31 |
|
32 |
export let apiToken: WidgetProps["apiToken"] = undefined;
|
@@ -44,7 +44,7 @@
|
|
44 |
// In the future it may be useful / easier to maintain if we created
|
45 |
// a single dedicated widget for each pipeline type.
|
46 |
const WIDGET_COMPONENTS: {
|
47 |
-
[key in
|
48 |
} = {
|
49 |
"audio-to-audio": AudioToAudioWidget,
|
50 |
"audio-classification": AudioClassificationWidget,
|
@@ -79,9 +79,11 @@
|
|
79 |
};
|
80 |
|
81 |
$: widgetComponent =
|
82 |
-
model.pipeline_tag && model.
|
83 |
-
?
|
84 |
-
:
|
|
|
|
|
85 |
|
86 |
// prettier-ignore
|
87 |
$: widgetProps = ({
|
|
|
26 |
import VisualQuestionAnsweringWidget from "./widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte";
|
27 |
import ZeroShotClassificationWidget from "./widgets/ZeroShotClassificationWidget/ZeroShotClassificationWidget.svelte";
|
28 |
import ZeroShotImageClassificationWidget from "./widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte";
|
29 |
+
import type { WidgetType } from "@huggingface/tasks";
|
30 |
import WidgetInfo from "./shared/WidgetInfo/WidgetInfo.svelte";
|
31 |
|
32 |
export let apiToken: WidgetProps["apiToken"] = undefined;
|
|
|
44 |
// In the future it may be useful / easier to maintain if we created
|
45 |
// a single dedicated widget for each pipeline type.
|
46 |
const WIDGET_COMPONENTS: {
|
47 |
+
[key in WidgetType]?: typeof SvelteComponent;
|
48 |
} = {
|
49 |
"audio-to-audio": AudioToAudioWidget,
|
50 |
"audio-classification": AudioClassificationWidget,
|
|
|
79 |
};
|
80 |
|
81 |
$: widgetComponent =
|
82 |
+
model.pipeline_tag === "text-generation" && model.tags?.includes("conversational")
|
83 |
+
? (ConversationalWidget as typeof SvelteComponent)
|
84 |
+
: model.pipeline_tag && model.pipeline_tag in WIDGET_COMPONENTS
|
85 |
+
? WIDGET_COMPONENTS[model.pipeline_tag as keyof typeof WIDGET_COMPONENTS]
|
86 |
+
: undefined;
|
87 |
|
88 |
// prettier-ignore
|
89 |
$: widgetProps = ({
|
packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte
CHANGED
@@ -5,9 +5,9 @@
|
|
5 |
import WidgetOutputConvoBubble from "../WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte";
|
6 |
|
7 |
export let modelId: string;
|
8 |
-
export let
|
9 |
-
|
10 |
-
|
11 |
}>;
|
12 |
|
13 |
let wrapperEl: HTMLElement;
|
@@ -25,9 +25,12 @@
|
|
25 |
<strong>{modelId}</strong>.
|
26 |
</div>
|
27 |
<div class="flex flex-col items-end space-y-4 p-3">
|
28 |
-
{#each
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
31 |
{/each}
|
32 |
</div>
|
33 |
</div>
|
|
|
5 |
import WidgetOutputConvoBubble from "../WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte";
|
6 |
|
7 |
export let modelId: string;
|
8 |
+
export let messages: Array<{
|
9 |
+
role: string;
|
10 |
+
content: string;
|
11 |
}>;
|
12 |
|
13 |
let wrapperEl: HTMLElement;
|
|
|
25 |
<strong>{modelId}</strong>.
|
26 |
</div>
|
27 |
<div class="flex flex-col items-end space-y-4 p-3">
|
28 |
+
{#each messages as message}
|
29 |
+
{#if message.role === "user"}
|
30 |
+
<WidgetOutputConvoBubble position="right" text={message.content} />
|
31 |
+
{:else}
|
32 |
+
<WidgetOutputConvoBubble position="left" text={message.content} />
|
33 |
+
{/if}
|
34 |
{/each}
|
35 |
</div>
|
36 |
</div>
|
packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
<script lang="ts">
|
|
|
2 |
import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "../../shared/types.js";
|
3 |
-
import
|
|
|
|
|
4 |
|
5 |
import WidgetOutputConvo from "../../shared/WidgetOutputConvo/WidgetOutputConvo.svelte";
|
6 |
import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
|
@@ -19,58 +22,91 @@
|
|
19 |
|
20 |
$: isDisabled = $widgetStates?.[model.id]?.isDisabled;
|
21 |
|
22 |
-
interface
|
23 |
-
|
24 |
-
|
25 |
}
|
26 |
-
interface Response {
|
27 |
-
conversation: Conversation;
|
28 |
-
generated_text: string;
|
29 |
-
}
|
30 |
-
|
31 |
-
type Output = Array<{
|
32 |
-
input: string;
|
33 |
-
response: string;
|
34 |
-
}>;
|
35 |
|
36 |
let computeTime = "";
|
37 |
-
let
|
38 |
-
generated_responses: string[];
|
39 |
-
past_user_inputs: string[];
|
40 |
-
} = {
|
41 |
-
generated_responses: [],
|
42 |
-
past_user_inputs: [],
|
43 |
-
};
|
44 |
let error: string = "";
|
45 |
let isLoading = false;
|
46 |
let modelLoading = {
|
47 |
isLoading: false,
|
48 |
estimatedTime: 0,
|
49 |
};
|
50 |
-
let output: Output = [];
|
51 |
let outputJson: string;
|
52 |
let text = "";
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
if (!trimmedText) {
|
62 |
return;
|
63 |
}
|
64 |
|
65 |
-
if (shouldUpdateUrl && !
|
66 |
updateUrl({ text: trimmedText });
|
67 |
}
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
const requestBody = {
|
70 |
-
inputs:
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
},
|
75 |
};
|
76 |
addInferenceParameters(requestBody, model);
|
@@ -82,7 +118,7 @@
|
|
82 |
model.id,
|
83 |
requestBody,
|
84 |
apiToken,
|
85 |
-
parseOutput,
|
86 |
withModelLoading,
|
87 |
includeCredentials,
|
88 |
isOnLoadCall
|
@@ -99,8 +135,7 @@
|
|
99 |
computeTime = res.computeTime;
|
100 |
outputJson = res.outputJson;
|
101 |
if (res.output) {
|
102 |
-
|
103 |
-
output = res.output.output;
|
104 |
}
|
105 |
// Emptying input value
|
106 |
text = "";
|
@@ -115,34 +150,28 @@
|
|
115 |
}
|
116 |
}
|
117 |
|
118 |
-
function
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
}
|
123 |
|
124 |
-
function
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
const generatedResponses = conversation.generated_responses;
|
132 |
-
const output = pastUserInputs
|
133 |
-
.filter(
|
134 |
-
(x, i) =>
|
135 |
-
x !== null && x !== undefined && generatedResponses[i] !== null && generatedResponses[i] !== undefined
|
136 |
-
)
|
137 |
-
.map((x, i) => ({
|
138 |
-
input: x ?? "",
|
139 |
-
response: generatedResponses[i] ?? "",
|
140 |
-
}));
|
141 |
-
return { conversation, output };
|
142 |
}
|
143 |
-
|
144 |
-
"Invalid output: output must be of type <conversation: <generated_responses:Array; past_user_inputs:Array>>"
|
145 |
-
);
|
146 |
}
|
147 |
|
148 |
function applyWidgetExample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
|
@@ -165,7 +194,7 @@
|
|
165 |
{applyWidgetExample}
|
166 |
validateExample={isTextInput}
|
167 |
/>
|
168 |
-
<WidgetOutputConvo modelId={model.id} {
|
169 |
|
170 |
<WidgetQuickInput
|
171 |
bind:value={text}
|
|
|
1 |
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "../../shared/types.js";
|
4 |
+
import { Template } from "@huggingface/jinja";
|
5 |
+
import type { SpecialTokensMap, TokenizerConfig, WidgetExampleTextInput } from "@huggingface/tasks";
|
6 |
+
import { SPECIAL_TOKENS_ATTRIBUTES } from "@huggingface/tasks";
|
7 |
|
8 |
import WidgetOutputConvo from "../../shared/WidgetOutputConvo/WidgetOutputConvo.svelte";
|
9 |
import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
|
|
|
22 |
|
23 |
$: isDisabled = $widgetStates?.[model.id]?.isDisabled;
|
24 |
|
25 |
+
interface Message {
|
26 |
+
role: string;
|
27 |
+
content: string;
|
28 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
let computeTime = "";
|
31 |
+
let messages: Message[] = [];
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
let error: string = "";
|
33 |
let isLoading = false;
|
34 |
let modelLoading = {
|
35 |
isLoading: false,
|
36 |
estimatedTime: 0,
|
37 |
};
|
|
|
38 |
let outputJson: string;
|
39 |
let text = "";
|
40 |
|
41 |
+
let compiledTemplate: Template;
|
42 |
+
let tokenizerConfig: TokenizerConfig;
|
43 |
+
|
44 |
+
// Check config and compile template
|
45 |
+
onMount(() => {
|
46 |
+
const config = model.config;
|
47 |
+
if (config === undefined) {
|
48 |
+
error = "Model config not found";
|
49 |
+
return;
|
50 |
+
}
|
51 |
+
|
52 |
+
if (config.tokenizer === undefined) {
|
53 |
+
error = "Tokenizer config not found";
|
54 |
+
return;
|
55 |
+
}
|
56 |
+
tokenizerConfig = config.tokenizer;
|
57 |
+
|
58 |
+
const chatTemplate = tokenizerConfig.chat_template;
|
59 |
+
if (chatTemplate === undefined) {
|
60 |
+
error = "No chat template found in tokenizer config";
|
61 |
+
return;
|
62 |
+
}
|
63 |
+
try {
|
64 |
+
compiledTemplate = new Template(chatTemplate);
|
65 |
+
} catch (e) {
|
66 |
+
error = `Invalid chat template: "${(e as Error).message}"`;
|
67 |
+
return;
|
68 |
+
}
|
69 |
+
});
|
70 |
|
71 |
+
async function getOutput({ withModelLoading = false, isOnLoadCall = false }: InferenceRunOpts = {}) {
|
72 |
+
if (!compiledTemplate) {
|
73 |
+
return;
|
74 |
+
}
|
75 |
+
|
76 |
+
const trimmedText = text.trim();
|
77 |
if (!trimmedText) {
|
78 |
return;
|
79 |
}
|
80 |
|
81 |
+
if (shouldUpdateUrl && !messages.length) {
|
82 |
updateUrl({ text: trimmedText });
|
83 |
}
|
84 |
|
85 |
+
if (!withModelLoading) {
|
86 |
+
// Add user message to chat
|
87 |
+
messages = [...messages, { role: "user", content: trimmedText }];
|
88 |
+
}
|
89 |
+
|
90 |
+
// Render chat template
|
91 |
+
const special_tokens_map = extractSpecialTokensMap(tokenizerConfig);
|
92 |
+
|
93 |
+
let chatText;
|
94 |
+
try {
|
95 |
+
chatText = compiledTemplate.render({
|
96 |
+
messages,
|
97 |
+
add_generation_prompt: true,
|
98 |
+
...special_tokens_map,
|
99 |
+
});
|
100 |
+
} catch (e) {
|
101 |
+
error = `An error occurred while rendering the chat template: "${(e as Error).message}"`;
|
102 |
+
return;
|
103 |
+
}
|
104 |
+
|
105 |
const requestBody = {
|
106 |
+
inputs: chatText,
|
107 |
+
parameters: {
|
108 |
+
return_full_text: false,
|
109 |
+
max_new_tokens: 100,
|
110 |
},
|
111 |
};
|
112 |
addInferenceParameters(requestBody, model);
|
|
|
118 |
model.id,
|
119 |
requestBody,
|
120 |
apiToken,
|
121 |
+
(body) => parseOutput(body, messages),
|
122 |
withModelLoading,
|
123 |
includeCredentials,
|
124 |
isOnLoadCall
|
|
|
135 |
computeTime = res.computeTime;
|
136 |
outputJson = res.outputJson;
|
137 |
if (res.output) {
|
138 |
+
messages = res.output;
|
|
|
139 |
}
|
140 |
// Emptying input value
|
141 |
text = "";
|
|
|
150 |
}
|
151 |
}
|
152 |
|
153 |
+
function parseOutput(body: unknown, chat: Message[]): Message[] {
|
154 |
+
if (Array.isArray(body) && body.length) {
|
155 |
+
const text = body[0]?.generated_text ?? "";
|
156 |
+
|
157 |
+
if (!text.length) {
|
158 |
+
throw new Error("Model did not generate a response.");
|
159 |
+
}
|
160 |
+
|
161 |
+
return [...chat, { role: "assistant", content: text }];
|
162 |
+
}
|
163 |
+
throw new TypeError("Invalid output: output must be of type Array & non-empty");
|
164 |
}
|
165 |
|
166 |
+
function extractSpecialTokensMap(tokenizerConfig: TokenizerConfig): SpecialTokensMap {
|
167 |
+
const specialTokensMap = Object.create(null);
|
168 |
+
for (const key of SPECIAL_TOKENS_ATTRIBUTES) {
|
169 |
+
const value = tokenizerConfig[key];
|
170 |
+
if (typeof value === "string") {
|
171 |
+
specialTokensMap[key] = value;
|
172 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
}
|
174 |
+
return specialTokensMap;
|
|
|
|
|
175 |
}
|
176 |
|
177 |
function applyWidgetExample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
|
|
|
194 |
{applyWidgetExample}
|
195 |
validateExample={isTextInput}
|
196 |
/>
|
197 |
+
<WidgetOutputConvo modelId={model.id} {messages} />
|
198 |
|
199 |
<WidgetQuickInput
|
200 |
bind:value={text}
|
packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte
CHANGED
@@ -7,6 +7,7 @@
|
|
7 |
import IconConversational from "../Icons/IconConversational.svelte";
|
8 |
import IconFeatureExtraction from "../Icons/IconFeatureExtraction.svelte";
|
9 |
import IconFillMask from "../Icons/IconFillMask.svelte";
|
|
|
10 |
import IconImageClassification from "../Icons/IconImageClassification.svelte";
|
11 |
import IconImageSegmentation from "../Icons/IconImageSegmentation.svelte";
|
12 |
import IconObjectDetection from "../Icons/IconObjectDetection.svelte";
|
@@ -40,13 +41,14 @@
|
|
40 |
import IconMaskGeneration from "../Icons/IconMaskGeneration.svelte";
|
41 |
import IconTextTo3D from "../Icons/IconTextTo3D.svelte";
|
42 |
import IconImageTo3D from "../Icons/IconImageTo3D.svelte";
|
43 |
-
import
|
|
|
44 |
|
45 |
export let classNames = "";
|
46 |
export let pipeline = "";
|
47 |
|
48 |
const ICON_COMPONENTS: {
|
49 |
-
[key in
|
50 |
} = {
|
51 |
/// Keep same order as in huggingface_hub/Types.ts
|
52 |
/// for easy mapping.
|
@@ -75,6 +77,7 @@
|
|
75 |
"video-classification": IconVideoClassification,
|
76 |
"image-segmentation": IconImageSegmentation,
|
77 |
"text-to-image": IconTextToImage,
|
|
|
78 |
"image-to-text": IconImageToText,
|
79 |
"image-to-image": IconImageToImage,
|
80 |
"image-to-video": IconImageToVideo,
|
@@ -90,6 +93,7 @@
|
|
90 |
"zero-shot-object-detection": IconZeroShotObjectDetection,
|
91 |
"text-to-3d": IconTextTo3D,
|
92 |
"image-to-3d": IconImageTo3D,
|
|
|
93 |
};
|
94 |
|
95 |
$: iconComponent =
|
|
|
7 |
import IconConversational from "../Icons/IconConversational.svelte";
|
8 |
import IconFeatureExtraction from "../Icons/IconFeatureExtraction.svelte";
|
9 |
import IconFillMask from "../Icons/IconFillMask.svelte";
|
10 |
+
import IconImageAndTextToText from "../Icons/IconImageAndTextToText.svelte";
|
11 |
import IconImageClassification from "../Icons/IconImageClassification.svelte";
|
12 |
import IconImageSegmentation from "../Icons/IconImageSegmentation.svelte";
|
13 |
import IconObjectDetection from "../Icons/IconObjectDetection.svelte";
|
|
|
41 |
import IconMaskGeneration from "../Icons/IconMaskGeneration.svelte";
|
42 |
import IconTextTo3D from "../Icons/IconTextTo3D.svelte";
|
43 |
import IconImageTo3D from "../Icons/IconImageTo3D.svelte";
|
44 |
+
import IconImageFeatureExtraction from "../Icons/IconImageFeatureExtraction.svelte";
|
45 |
+
import type { WidgetType } from "@huggingface/tasks";
|
46 |
|
47 |
export let classNames = "";
|
48 |
export let pipeline = "";
|
49 |
|
50 |
const ICON_COMPONENTS: {
|
51 |
+
[key in WidgetType]?: typeof SvelteComponent;
|
52 |
} = {
|
53 |
/// Keep same order as in huggingface_hub/Types.ts
|
54 |
/// for easy mapping.
|
|
|
77 |
"video-classification": IconVideoClassification,
|
78 |
"image-segmentation": IconImageSegmentation,
|
79 |
"text-to-image": IconTextToImage,
|
80 |
+
"image-text-to-text": IconImageAndTextToText,
|
81 |
"image-to-text": IconImageToText,
|
82 |
"image-to-image": IconImageToImage,
|
83 |
"image-to-video": IconImageToVideo,
|
|
|
93 |
"zero-shot-object-detection": IconZeroShotObjectDetection,
|
94 |
"text-to-3d": IconTextTo3D,
|
95 |
"image-to-3d": IconImageTo3D,
|
96 |
+
"image-feature-extraction": IconImageFeatureExtraction,
|
97 |
};
|
98 |
|
99 |
$: iconComponent =
|
packages/widgets/src/routes/+page.svelte
CHANGED
@@ -30,6 +30,23 @@
|
|
30 |
});
|
31 |
|
32 |
const models: ModelData[] = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
{
|
34 |
id: "WizardLM/WizardLM-70B-V1.0",
|
35 |
pipeline_tag: "text-generation",
|
@@ -315,7 +332,7 @@
|
|
315 |
},
|
316 |
{
|
317 |
id: "facebook/blenderbot-400M-distill",
|
318 |
-
pipeline_tag: "
|
319 |
inference: InferenceDisplayability.Yes,
|
320 |
widgetData: [{ text: "Hey my name is Julien! How are you?" }],
|
321 |
},
|
|
|
30 |
});
|
31 |
|
32 |
const models: ModelData[] = [
|
33 |
+
{
|
34 |
+
id: "HuggingFaceH4/zephyr-7b-beta",
|
35 |
+
pipeline_tag: "text-generation",
|
36 |
+
tags: ["conversational"],
|
37 |
+
inference: InferenceDisplayability.Yes,
|
38 |
+
config: {
|
39 |
+
tokenizer: {
|
40 |
+
bos_token: "<s>",
|
41 |
+
chat_template:
|
42 |
+
"{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
43 |
+
eos_token: "</s>",
|
44 |
+
pad_token: "</s>",
|
45 |
+
unk_token: "<unk>",
|
46 |
+
use_default_system_prompt: true,
|
47 |
+
},
|
48 |
+
},
|
49 |
+
},
|
50 |
{
|
51 |
id: "WizardLM/WizardLM-70B-V1.0",
|
52 |
pipeline_tag: "text-generation",
|
|
|
332 |
},
|
333 |
{
|
334 |
id: "facebook/blenderbot-400M-distill",
|
335 |
+
pipeline_tag: "text2text-generation",
|
336 |
inference: InferenceDisplayability.Yes,
|
337 |
widgetData: [{ text: "Hey my name is Julien! How are you?" }],
|
338 |
},
|