coyotte508 HF staff olivierdehaene HF staff commited on
Commit
abaaa3e
1 Parent(s): 572bb0e

🔧 Change separator token to </s> (#127)

Browse files

Co-authored-by: OlivierDehaene <Olivier.dehaene@gmail.com>

.env CHANGED
@@ -12,7 +12,7 @@ PUBLIC_MODEL_ID=OpenAssistant/oasst-sft-6-llama-30b-xor # used to link to model
12
  PUBLIC_DISABLE_INTRO_TILES=false
13
  PUBLIC_USER_MESSAGE_TOKEN=<|prompter|>
14
  PUBLIC_ASSISTANT_MESSAGE_TOKEN=<|assistant|>
15
- PUBLIC_SEP_TOKEN=<|endoftext|>
16
  PUBLIC_PREPROMPT="Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful."
17
 
18
  # [{"endpoint": "https://api-inference.huggingface.co/models/...", authorization: "Bearer hf_<token>", weight: 1}] to load balance
 
12
  PUBLIC_DISABLE_INTRO_TILES=false
13
  PUBLIC_USER_MESSAGE_TOKEN=<|prompter|>
14
  PUBLIC_ASSISTANT_MESSAGE_TOKEN=<|assistant|>
15
+ PUBLIC_SEP_TOKEN=</s>
16
  PUBLIC_PREPROMPT="Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful."
17
 
18
  # [{"endpoint": "https://api-inference.huggingface.co/models/...", authorization: "Bearer hf_<token>", weight: 1}] to load balance
src/routes/conversation/[id]/+page.svelte CHANGED
@@ -6,7 +6,7 @@
6
  import { textGenerationStream } from "@huggingface/inference";
7
  import { invalidate } from "$app/navigation";
8
  import { base } from "$app/paths";
9
- import { PUBLIC_MAX_INPUT_TOKENS } from "$env/static/public";
10
  import { shareConversation } from "$lib/shareConversation";
11
  import { UrlDependency } from "$lib/types/UrlDependency";
12
  import { error } from "$lib/stores/errors";
@@ -43,7 +43,7 @@
43
  truncate: parseInt(PUBLIC_MAX_INPUT_TOKENS),
44
  watermark: false,
45
  max_new_tokens: 1024,
46
- stop: ["<|endoftext|>"],
47
  return_full_text: false,
48
  },
49
  },
 
6
  import { textGenerationStream } from "@huggingface/inference";
7
  import { invalidate } from "$app/navigation";
8
  import { base } from "$app/paths";
9
+ import { PUBLIC_MAX_INPUT_TOKENS, PUBLIC_SEP_TOKEN } from "$env/static/public";
10
  import { shareConversation } from "$lib/shareConversation";
11
  import { UrlDependency } from "$lib/types/UrlDependency";
12
  import { error } from "$lib/stores/errors";
 
43
  truncate: parseInt(PUBLIC_MAX_INPUT_TOKENS),
44
  watermark: false,
45
  max_new_tokens: 1024,
46
+ stop: [PUBLIC_SEP_TOKEN],
47
  return_full_text: false,
48
  },
49
  },
src/routes/conversation/[id]/summarize/+server.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { PUBLIC_MAX_INPUT_TOKENS } from "$env/static/public";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import { collections } from "$lib/server/database.js";
4
  import { modelEndpoint } from "$lib/server/modelEndpoint.js";
@@ -34,7 +34,7 @@ export async function POST({ params, locals, fetch }) {
34
  watermark: false,
35
  max_new_tokens: 1024,
36
  truncate: parseInt(PUBLIC_MAX_INPUT_TOKENS),
37
- stop: ["<|endoftext|>"],
38
  return_full_text: false,
39
  };
40
 
 
1
+ import { PUBLIC_MAX_INPUT_TOKENS, PUBLIC_SEP_TOKEN } from "$env/static/public";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import { collections } from "$lib/server/database.js";
4
  import { modelEndpoint } from "$lib/server/modelEndpoint.js";
 
34
  watermark: false,
35
  max_new_tokens: 1024,
36
  truncate: parseInt(PUBLIC_MAX_INPUT_TOKENS),
37
+ stop: [PUBLIC_SEP_TOKEN],
38
  return_full_text: false,
39
  };
40