Spaces:
Sleeping
Sleeping
research14
commited on
Commit
•
cfed1ed
1
Parent(s):
68440cc
test removing prompt from bot output
Browse files
app.py
CHANGED
@@ -75,6 +75,8 @@ def llama_respond(tab_name, message, chat_history):
|
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
76 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|
77 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
|
|
|
78 |
print(bot_message)
|
79 |
|
80 |
chat_history.append((formatted_prompt, bot_message))
|
|
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
76 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|
77 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
78 |
+
# Remove formatted prompt from bot_message
|
79 |
+
bot_message = bot_message.replace(formatted_prompt, '')
|
80 |
print(bot_message)
|
81 |
|
82 |
chat_history.append((formatted_prompt, bot_message))
|