mishig HF staff commited on
Commit
86574c0
1 Parent(s): e9a4671

Settings dynamically change (#27)

Browse files
src/lib/components/InferencePlayground/InferencePlayground.svelte CHANGED
@@ -194,6 +194,10 @@
194
  submit();
195
  }
196
  }
 
 
 
 
197
  </script>
198
 
199
  <svelte:window on:keydown={onKeydown} />
@@ -364,7 +368,7 @@
364
  >
365
  <PlaygroundModelSelector
366
  compatibleModels={models}
367
- bind:currentModel={conversations[0].model}
368
  />
369
  <div
370
  class="group relative -mt-4 flex h-[26px] w-full items-center justify-center gap-2 rounded-lg bg-black px-5 text-sm text-white hover:bg-gray-900 focus:outline-none focus:ring-4 focus:ring-gray-300 dark:border-gray-700 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-gray-700"
 
194
  submit();
195
  }
196
  }
197
+
198
+ function changeSelectedModel(modelIdx: number) {
199
+ conversations[0] = { ...conversations[0], model: models[modelIdx] };
200
+ }
201
  </script>
202
 
203
  <svelte:window on:keydown={onKeydown} />
 
368
  >
369
  <PlaygroundModelSelector
370
  compatibleModels={models}
371
+ on:modelIdxChange={(e) => changeSelectedModel(e.detail)}
372
  />
373
  <div
374
  class="group relative -mt-4 flex h-[26px] w-full items-center justify-center gap-2 rounded-lg bg-black px-5 text-sm text-white hover:bg-gray-900 focus:outline-none focus:ring-4 focus:ring-gray-300 dark:border-gray-700 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-gray-700"
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -25,10 +25,10 @@
25
  language?: Language;
26
  }
27
 
28
- const snippetsByLanguage: Record<Language, Snippet[]> = {
29
- javascript: getJavascriptSnippets(),
30
- python: getPythonSnippets(),
31
- bash: getBashSnippets()
32
  };
33
 
34
  let selectedLanguage: Language = 'javascript';
@@ -46,7 +46,7 @@
46
  return hljs.highlight(code, { language }).value;
47
  }
48
 
49
- function getJavascriptSnippets() {
50
  const messagesStr = getMessages().replace(/"([^"]+)":/g, '$1:');
51
  const snippets: Snippet[] = [];
52
  snippets.push({
@@ -66,7 +66,7 @@ const inference = new HfInference("your HF token")
66
  let out = "";
67
 
68
  for await (const chunk of inference.chatCompletionStream({
69
- model: "${conversation.model}",
70
  messages: ${messagesStr},
71
  temperature: ${conversation.config.temperature},
72
  max_tokens: ${conversation.config.maxTokens},
@@ -89,7 +89,7 @@ for await (const chunk of inference.chatCompletionStream({
89
  const inference = new HfInference("your access token")
90
 
91
  const out = await inference.chatCompletion({
92
- model: "${conversation.model}",
93
  messages: ${messagesStr},
94
  temperature: ${conversation.config.temperature},
95
  max_tokens: ${conversation.config.maxTokens},
@@ -103,7 +103,7 @@ console.log(out.choices[0].message);`
103
  return snippets;
104
  }
105
 
106
- function getPythonSnippets() {
107
  const messagesStr = getMessages();
108
  const snippets: Snippet[] = [];
109
  snippets.push({
@@ -116,7 +116,7 @@ console.log(out.choices[0].message);`
116
  label: 'Streaming API',
117
  code: `from huggingface_hub import InferenceClient
118
 
119
- model_id="${conversation.model}"
120
  hf_token = "your HF token"
121
  inference_client = InferenceClient(model_id, token=hf_token)
122
 
@@ -135,7 +135,7 @@ for token in client.chat_completion(messages, stream=True, temperature=${convers
135
  label: 'Non-Streaming API',
136
  code: `from huggingface_hub import InferenceClient
137
 
138
- model_id="${conversation.model}"
139
  hf_token = "your HF token"
140
  inference_client = InferenceClient(model_id, token=hf_token)
141
 
@@ -150,14 +150,14 @@ print(output.choices[0].message)`
150
  return snippets;
151
  }
152
 
153
- function getBashSnippets() {
154
  const messagesStr = getMessages();
155
  const snippets: Snippet[] = [];
156
 
157
  if (conversation.config.streaming) {
158
  snippets.push({
159
  label: 'Streaming API',
160
- code: `curl 'https://api-inference.huggingface.co/models/${conversation.model}/v1/chat/completions' \
161
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \
162
  --header 'Content-Type: application/json' \
163
  --data '{
@@ -172,7 +172,7 @@ print(output.choices[0].message)`
172
  // non-streaming
173
  snippets.push({
174
  label: 'Non-Streaming API',
175
- code: `curl 'https://api-inference.huggingface.co/models/${conversation.model}/v1/chat/completions' \
176
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \
177
  --header 'Content-Type: application/json' \
178
  --data '{
 
25
  language?: Language;
26
  }
27
 
28
+ $: snippetsByLanguage = {
29
+ javascript: getJavascriptSnippets(conversation),
30
+ python: getPythonSnippets(conversation),
31
+ bash: getBashSnippets(conversation)
32
  };
33
 
34
  let selectedLanguage: Language = 'javascript';
 
46
  return hljs.highlight(code, { language }).value;
47
  }
48
 
49
+ function getJavascriptSnippets(conversation: Conversation) {
50
  const messagesStr = getMessages().replace(/"([^"]+)":/g, '$1:');
51
  const snippets: Snippet[] = [];
52
  snippets.push({
 
66
  let out = "";
67
 
68
  for await (const chunk of inference.chatCompletionStream({
69
+ model: "${conversation.model.id}",
70
  messages: ${messagesStr},
71
  temperature: ${conversation.config.temperature},
72
  max_tokens: ${conversation.config.maxTokens},
 
89
  const inference = new HfInference("your access token")
90
 
91
  const out = await inference.chatCompletion({
92
+ model: "${conversation.model.id}",
93
  messages: ${messagesStr},
94
  temperature: ${conversation.config.temperature},
95
  max_tokens: ${conversation.config.maxTokens},
 
103
  return snippets;
104
  }
105
 
106
+ function getPythonSnippets(conversation: Conversation) {
107
  const messagesStr = getMessages();
108
  const snippets: Snippet[] = [];
109
  snippets.push({
 
116
  label: 'Streaming API',
117
  code: `from huggingface_hub import InferenceClient
118
 
119
+ model_id="${conversation.model.id}"
120
  hf_token = "your HF token"
121
  inference_client = InferenceClient(model_id, token=hf_token)
122
 
 
135
  label: 'Non-Streaming API',
136
  code: `from huggingface_hub import InferenceClient
137
 
138
+ model_id="${conversation.model.id}"
139
  hf_token = "your HF token"
140
  inference_client = InferenceClient(model_id, token=hf_token)
141
 
 
150
  return snippets;
151
  }
152
 
153
+ function getBashSnippets(conversation: Conversation) {
154
  const messagesStr = getMessages();
155
  const snippets: Snippet[] = [];
156
 
157
  if (conversation.config.streaming) {
158
  snippets.push({
159
  label: 'Streaming API',
160
+ code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \
161
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \
162
  --header 'Content-Type: application/json' \
163
  --data '{
 
172
  // non-streaming
173
  snippets.push({
174
  label: 'Non-Streaming API',
175
+ code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \
176
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \
177
  --header 'Content-Type: application/json' \
178
  --data '{
src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte CHANGED
@@ -1,9 +1,11 @@
1
  <script lang="ts">
2
  import { type ModelEntry } from '@huggingface/hub';
 
3
 
4
  export let compatibleModels: ModelEntry[] = [];
5
- export let currentModel = compatibleModels[0];
6
  export let disabled = false;
 
 
7
  </script>
8
 
9
  <div>
@@ -14,8 +16,8 @@
14
  </label>
15
  <select
16
  {disabled}
17
- bind:value={currentModel}
18
  class="block w-full rounded-lg border border-gray-300 bg-gray-50 p-2.5 text-sm text-gray-900 focus:border-blue-500 focus:ring-blue-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400 dark:focus:border-blue-500 dark:focus:ring-blue-500"
 
19
  >
20
  {#each compatibleModels as model}
21
  <option value={model.id}>{model.id}</option>
 
1
  <script lang="ts">
2
  import { type ModelEntry } from '@huggingface/hub';
3
+ import { createEventDispatcher } from 'svelte';
4
 
5
  export let compatibleModels: ModelEntry[] = [];
 
6
  export let disabled = false;
7
+
8
+ const dispatch = createEventDispatcher<{ modelIdxChange: number }>();
9
  </script>
10
 
11
  <div>
 
16
  </label>
17
  <select
18
  {disabled}
 
19
  class="block w-full rounded-lg border border-gray-300 bg-gray-50 p-2.5 text-sm text-gray-900 focus:border-blue-500 focus:ring-blue-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400 dark:focus:border-blue-500 dark:focus:ring-blue-500"
20
+ on:change={(e) => dispatch('modelIdxChange', e.currentTarget.selectedIndex)}
21
  >
22
  {#each compatibleModels as model}
23
  <option value={model.id}>{model.id}</option>