lingyit1108 commited on
Commit
c6352d6
1 Parent(s): c3572db

to fix minor bug

Browse files
Files changed (1) hide show
  1. streamlit_app.py +2 -2
streamlit_app.py CHANGED
@@ -40,7 +40,7 @@ def clear_chat_history():
40
  st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
41
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
42
 
43
- def generate_llm_response(prompt_input):
44
  system_content = ("You are a helpful assistant. "
45
  "You do not respond as 'User' or pretend to be 'User'. "
46
  "You only respond once as 'Assistant'."
@@ -67,7 +67,7 @@ if prompt := st.chat_input(disabled=not openai_api):
67
  if st.session_state.messages[-1]["role"] != "assistant":
68
  with st.chat_message("assistant"):
69
  with st.spinner("Thinking..."):
70
- response = generate_llm_response(prompt)
71
  placeholder = st.empty()
72
  full_response = ''
73
  for chunk in response:
 
40
  st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
41
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
42
 
43
+ def generate_llm_response(client, prompt_input):
44
  system_content = ("You are a helpful assistant. "
45
  "You do not respond as 'User' or pretend to be 'User'. "
46
  "You only respond once as 'Assistant'."
 
67
  if st.session_state.messages[-1]["role"] != "assistant":
68
  with st.chat_message("assistant"):
69
  with st.spinner("Thinking..."):
70
+ response = generate_llm_response(client, prompt)
71
  placeholder = st.empty()
72
  full_response = ''
73
  for chunk in response: