Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -91,7 +91,7 @@ def launch_bot():
|
|
91 |
# if response == 'The returned results did not contain sufficient information to be summarized into a useful answer for your query. Please try a different search or restate your query differently.':
|
92 |
#st.write("reroute to LLM")
|
93 |
#call in Mistral
|
94 |
-
prompt3 = prompt2 + "context:" + response
|
95 |
print("Called in Mistral")
|
96 |
device = "cuda" # the device to load the model onto
|
97 |
|
@@ -109,7 +109,7 @@ def launch_bot():
|
|
109 |
|
110 |
|
111 |
generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
|
112 |
-
st.write(tokenizer.batch_decode(generated_ids)[0])
|
113 |
# else:
|
114 |
# st.write(response)
|
115 |
message = {"role": "assistant", "content": response}
|
|
|
91 |
# if response == 'The returned results did not contain sufficient information to be summarized into a useful answer for your query. Please try a different search or restate your query differently.':
|
92 |
#st.write("reroute to LLM")
|
93 |
#call in Mistral
|
94 |
+
prompt3 = master_prompt + prompt2 + "context:" + response
|
95 |
print("Called in Mistral")
|
96 |
device = "cuda" # the device to load the model onto
|
97 |
|
|
|
109 |
|
110 |
|
111 |
generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
|
112 |
+
st.write("Mistral:" + tokenizer.batch_decode(generated_ids)[0])
|
113 |
# else:
|
114 |
# st.write(response)
|
115 |
message = {"role": "assistant", "content": response}
|