lingyit1108 commited on
Commit
09f2e2a
1 Parent(s): b6cdc6a

corrected code to generate openai completion delta content

Browse files
Files changed (1) hide show
  1. streamlit_app.py +3 -3
streamlit_app.py CHANGED
@@ -54,7 +54,7 @@ def generate_llm_response(prompt_input):
54
  temperature=temperature,
55
  stream=True
56
  )
57
- return completion.choices[0].message.content
58
 
59
  # User-provided prompt
60
  if prompt := st.chat_input(disabled=not openai_api):
@@ -70,8 +70,8 @@ if st.session_state.messages[-1]["role"] != "assistant":
70
  response = generate_llm_response(prompt)
71
  placeholder = st.empty()
72
  full_response = ''
73
- for item in response:
74
- full_response += item
75
  placeholder.markdown(full_response)
76
  placeholder.markdown(full_response)
77
  message = {"role": "assistant", "content": full_response}
 
54
  temperature=temperature,
55
  stream=True
56
  )
57
+ return completion
58
 
59
  # User-provided prompt
60
  if prompt := st.chat_input(disabled=not openai_api):
 
70
  response = generate_llm_response(prompt)
71
  placeholder = st.empty()
72
  full_response = ''
73
+ for chunk in response:
74
+ full_response += chunk.choices[0].delta.content
75
  placeholder.markdown(full_response)
76
  placeholder.markdown(full_response)
77
  message = {"role": "assistant", "content": full_response}