mishig HF staff commited on
Commit
e08d702
1 Parent(s): 05476c1

Render current messages in snippets (#19)

Browse files
src/lib/components/CodeSnippets.svelte CHANGED
@@ -33,11 +33,18 @@
33
 
34
  let selectedLanguage: Language = 'javascript';
35
 
 
 
 
 
 
 
36
  function highlight(code: string, language: Language) {
37
  return hljs.highlight(code, { language }).value;
38
  }
39
 
40
  function getJavascriptSnippets() {
 
41
  const snippets: Snippet[] = [];
42
  snippets.push({
43
  label: 'Install @huggingface/inference',
@@ -57,9 +64,7 @@ let out = "";
57
 
58
  for await (const chunk of inference.chatCompletionStream({
59
  model: "${conversation.model}",
60
- messages: [
61
- { role: "user", content: "Tell me a story" },
62
- ],
63
  max_tokens: ${conversation.config.maxTokens},
64
  temperature: ${conversation.config.temperature},
65
  seed: 0,
@@ -81,9 +86,7 @@ const inference = new HfInference("your access token")
81
 
82
  const out = await inference.chatCompletion({
83
  model: "${conversation.model}",
84
- messages: [
85
- { role: "user", content: "Who are you?" }
86
- ],
87
  max_tokens: ${conversation.config.maxTokens},
88
  temperature: ${conversation.config.temperature},
89
  seed: 0,
 
33
 
34
  let selectedLanguage: Language = 'javascript';
35
 
36
+ function getMessages(){
37
+ const placeholder = [{ role: "user", content: "Tell me a story" }];
38
+ const messages = conversation.messages.length ? conversation.messages : placeholder;
39
+ return JSON.stringify(messages, null, 2);
40
+ }
41
+
42
  function highlight(code: string, language: Language) {
43
  return hljs.highlight(code, { language }).value;
44
  }
45
 
46
  function getJavascriptSnippets() {
47
+ const messagesStr = getMessages().replace(/"([^"]+)":/g, '$1:');
48
  const snippets: Snippet[] = [];
49
  snippets.push({
50
  label: 'Install @huggingface/inference',
 
64
 
65
  for await (const chunk of inference.chatCompletionStream({
66
  model: "${conversation.model}",
67
+ messages: ${messagesStr},
 
 
68
  max_tokens: ${conversation.config.maxTokens},
69
  temperature: ${conversation.config.temperature},
70
  seed: 0,
 
86
 
87
  const out = await inference.chatCompletion({
88
  model: "${conversation.model}",
89
+ messages: ${messagesStr},
 
 
90
  max_tokens: ${conversation.config.maxTokens},
91
  temperature: ${conversation.config.temperature},
92
  seed: 0,