not-lain commited on
Commit
873314a
1 Parent(s): 7019db3

add generation prompt

Browse files

let's not allow AI to carry the entirety of the prompt including the generation prompt, some things are better set manually to avoid extreme cases

Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -77,7 +77,7 @@ def chat_llama3_8b(message: str,
77
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
78
  conversation.append({"role": "user", "content": message})
79
 
80
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
81
 
82
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
83
 
 
77
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
78
  conversation.append({"role": "user", "content": message})
79
 
80
+ input_ids = tokenizer.apply_chat_template(conversation,add_generation_prompt=True, return_tensors="pt").to(model.device)
81
 
82
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
83