awacke1 commited on
Commit
264f77f
1 Parent(s): b36a63b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -841,7 +841,8 @@ def read_file_content(file,max_length):
841
 
842
  # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
843
  @st.cache_resource
844
- def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
 
845
  model = model_choice
846
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
847
  conversation.append({'role': 'user', 'content': prompt})
@@ -852,7 +853,8 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-previe
852
  res_box = st.empty()
853
  collected_chunks = []
854
  collected_messages = []
855
- for chunk in openai.ChatCompletion.create(model='gpt-4-0125-preview', messages=conversation, temperature=0.5, stream=True): # gpt-4-0125-preview gpt-3.5-turbo
 
856
  collected_chunks.append(chunk)
857
  chunk_message = chunk['choices'][0]['delta']
858
  collected_messages.append(chunk_message)
@@ -870,7 +872,8 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-previe
870
  return full_reply_content
871
 
872
  @st.cache_resource
873
- def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
 
874
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
875
  conversation.append({'role': 'user', 'content': prompt})
876
  if len(file_content)>0:
 
841
 
842
  # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
843
  @st.cache_resource
844
+ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
845
+ #def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
846
  model = model_choice
847
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
848
  conversation.append({'role': 'user', 'content': prompt})
 
853
  res_box = st.empty()
854
  collected_chunks = []
855
  collected_messages = []
856
+
857
+ for chunk in openai.ChatCompletion.create(model=model_choice, messages=conversation, temperature=0.5, stream=True):
858
  collected_chunks.append(chunk)
859
  chunk_message = chunk['choices'][0]['delta']
860
  collected_messages.append(chunk_message)
 
872
  return full_reply_content
873
 
874
  @st.cache_resource
875
+ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
876
+ #def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
877
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
878
  conversation.append({'role': 'user', 'content': prompt})
879
  if len(file_content)>0: