Spaces:
Running
on
Zero
Running
on
Zero
Locutusque
commited on
Commit
•
fc61ac0
1
Parent(s):
f27c287
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ def generate(
|
|
24 |
prompt = user_input
|
25 |
else:
|
26 |
prompt = f"<|im_start|>system\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.<|im_end|>\n<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
|
27 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
|
28 |
generation_kwargs = dict(text_inputs=prompt, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p, top_k=top_k,
|
29 |
temperature=temperature, num_beams=1, repetition_penalty=repetition_penalty)
|
30 |
t = Thread(target=pipe.__call__, kwargs=generation_kwargs)
|
|
|
24 |
prompt = user_input
|
25 |
else:
|
26 |
prompt = f"<|im_start|>system\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.<|im_end|>\n<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
|
27 |
+
streamer = TextIteratorStreamer(pipe.tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
|
28 |
generation_kwargs = dict(text_inputs=prompt, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p, top_k=top_k,
|
29 |
temperature=temperature, num_beams=1, repetition_penalty=repetition_penalty)
|
30 |
t = Thread(target=pipe.__call__, kwargs=generation_kwargs)
|