Spaces:
Running
Running
jonathanjordan21
commited on
Commit
•
f9a4cc4
1
Parent(s):
968db75
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,6 @@ from custom_llm import CustomLLM, custom_chain_with_history
|
|
9 |
|
10 |
API_TOKEN = os.getenv('HF_INFER_API')
|
11 |
|
12 |
-
#API_URL = "https://api-inference.huggingface.co/models/gpt2"
|
13 |
|
14 |
from typing import Optional
|
15 |
|
@@ -48,10 +47,10 @@ if prompt := st.chat_input("Ask me anything.."):
|
|
48 |
response = st.session_state.chain.invoke(prompt).split("\n<|")[0]
|
49 |
|
50 |
# Display assistant response in chat message container
|
51 |
-
with st.chat_message("
|
52 |
st.markdown(response)
|
53 |
st.session_state.memory.save_context({"question":prompt}, {"output":prompt})
|
54 |
st.session_state.memory.chat_memory.messages = st.session_state.memory.chat_memory.messages[-15:]
|
55 |
# Add assistant response to chat history
|
56 |
-
st.session_state.messages.append({"role": "
|
57 |
|
|
|
9 |
|
10 |
API_TOKEN = os.getenv('HF_INFER_API')
|
11 |
|
|
|
12 |
|
13 |
from typing import Optional
|
14 |
|
|
|
47 |
response = st.session_state.chain.invoke(prompt).split("\n<|")[0]
|
48 |
|
49 |
# Display assistant response in chat message container
|
50 |
+
with st.chat_message("assistant"):
|
51 |
st.markdown(response)
|
52 |
st.session_state.memory.save_context({"question":prompt}, {"output":prompt})
|
53 |
st.session_state.memory.chat_memory.messages = st.session_state.memory.chat_memory.messages[-15:]
|
54 |
# Add assistant response to chat history
|
55 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
56 |
|