Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
[system prompts] Support default system prompts (#56)
Browse files
src/lib/components/InferencePlayground/InferencePlayground.svelte
CHANGED
|
@@ -13,7 +13,7 @@
|
|
| 13 |
} from "./inferencePlaygroundUtils";
|
| 14 |
|
| 15 |
import { onDestroy, onMount } from "svelte";
|
| 16 |
-
import GenerationConfig from "./InferencePlaygroundGenerationConfig.svelte";
|
| 17 |
import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
|
| 18 |
import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
|
| 19 |
import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
|
|
@@ -29,10 +29,12 @@
|
|
| 29 |
export let models: ModelEntryWithTokenizer[];
|
| 30 |
|
| 31 |
const startMessageUser: ChatCompletionInputMessage = { role: "user", content: "" };
|
| 32 |
-
const startMessageSystem: ChatCompletionInputMessage = { role: "system", content: "" };
|
| 33 |
-
|
| 34 |
const modelIdsFromQueryParam = $page.url.searchParams.get("modelId")?.split(",");
|
| 35 |
const modelsFromQueryParam = modelIdsFromQueryParam?.map(id => models.find(model => model.id === id));
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
let session: Session = {
|
| 38 |
conversations: [
|
|
@@ -40,7 +42,7 @@
|
|
| 40 |
model: models.find(m => FEATURED_MODELS_IDS.includes(m.id)) ?? models[0],
|
| 41 |
config: { ...defaultGenerationConfig },
|
| 42 |
messages: [{ ...startMessageUser }],
|
| 43 |
-
systemMessage
|
| 44 |
streaming: true,
|
| 45 |
},
|
| 46 |
],
|
|
@@ -52,7 +54,7 @@
|
|
| 52 |
model,
|
| 53 |
config: { ...defaultGenerationConfig },
|
| 54 |
messages: [{ ...startMessageUser }],
|
| 55 |
-
systemMessage
|
| 56 |
streaming: true,
|
| 57 |
};
|
| 58 |
}) as [Conversation] | [Conversation, Conversation];
|
|
|
|
| 13 |
} from "./inferencePlaygroundUtils";
|
| 14 |
|
| 15 |
import { onDestroy, onMount } from "svelte";
|
| 16 |
+
import GenerationConfig, { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
|
| 17 |
import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
|
| 18 |
import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
|
| 19 |
import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
|
|
|
|
| 29 |
export let models: ModelEntryWithTokenizer[];
|
| 30 |
|
| 31 |
const startMessageUser: ChatCompletionInputMessage = { role: "user", content: "" };
|
|
|
|
|
|
|
| 32 |
const modelIdsFromQueryParam = $page.url.searchParams.get("modelId")?.split(",");
|
| 33 |
const modelsFromQueryParam = modelIdsFromQueryParam?.map(id => models.find(model => model.id === id));
|
| 34 |
+
const systemMessage: ChatCompletionInputMessage = {
|
| 35 |
+
role: "system",
|
| 36 |
+
content: modelIdsFromQueryParam ? (defaultSystemMessage?.[modelIdsFromQueryParam[0]] ?? "") : "",
|
| 37 |
+
};
|
| 38 |
|
| 39 |
let session: Session = {
|
| 40 |
conversations: [
|
|
|
|
| 42 |
model: models.find(m => FEATURED_MODELS_IDS.includes(m.id)) ?? models[0],
|
| 43 |
config: { ...defaultGenerationConfig },
|
| 44 |
messages: [{ ...startMessageUser }],
|
| 45 |
+
systemMessage,
|
| 46 |
streaming: true,
|
| 47 |
},
|
| 48 |
],
|
|
|
|
| 54 |
model,
|
| 55 |
config: { ...defaultGenerationConfig },
|
| 56 |
messages: [{ ...startMessageUser }],
|
| 57 |
+
systemMessage,
|
| 58 |
streaming: true,
|
| 59 |
};
|
| 60 |
}) as [Conversation] | [Conversation, Conversation];
|
src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte
CHANGED
|
@@ -1,3 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
<script lang="ts">
|
| 2 |
import type { Conversation } from "$lib/components/InferencePlayground/types";
|
| 3 |
|
|
|
|
| 1 |
+
<script context="module" lang="ts">
|
| 2 |
+
export const defaultSystemMessage: { [key: string]: string } = {
|
| 3 |
+
"Qwen/QwQ-32B-Preview":
|
| 4 |
+
"You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.",
|
| 5 |
+
} as const;
|
| 6 |
+
</script>
|
| 7 |
+
|
| 8 |
<script lang="ts">
|
| 9 |
import type { Conversation } from "$lib/components/InferencePlayground/types";
|
| 10 |
|
src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte
CHANGED
|
@@ -7,6 +7,7 @@
|
|
| 7 |
import IconCaret from "../Icons/IconCaret.svelte";
|
| 8 |
import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
|
| 9 |
import Avatar from "../Avatar.svelte";
|
|
|
|
| 10 |
|
| 11 |
export let models: ModelEntryWithTokenizer[] = [];
|
| 12 |
export let conversation: Conversation;
|
|
@@ -19,6 +20,7 @@
|
|
| 19 |
return;
|
| 20 |
}
|
| 21 |
conversation.model = model;
|
|
|
|
| 22 |
|
| 23 |
const url = new URL($page.url);
|
| 24 |
url.searchParams.set("modelId", model.id);
|
|
|
|
| 7 |
import IconCaret from "../Icons/IconCaret.svelte";
|
| 8 |
import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
|
| 9 |
import Avatar from "../Avatar.svelte";
|
| 10 |
+
import { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
|
| 11 |
|
| 12 |
export let models: ModelEntryWithTokenizer[] = [];
|
| 13 |
export let conversation: Conversation;
|
|
|
|
| 20 |
return;
|
| 21 |
}
|
| 22 |
conversation.model = model;
|
| 23 |
+
conversation.systemMessage = { role: "system", content: defaultSystemMessage?.[modelId] ?? "" };
|
| 24 |
|
| 25 |
const url = new URL($page.url);
|
| 26 |
url.searchParams.set("modelId", model.id);
|