awacke1 commited on
Commit
3c2f20e
1 Parent(s): 83d0e33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -1
app.py CHANGED
@@ -763,6 +763,9 @@ def whisper_main():
763
  try:
764
  transcript = transcription['text']
765
  st.write(transcript)
 
 
 
766
  response = StreamLLMChatResponse(transcript)
767
  filename_txt = generate_filename(transcript, ".txt")
768
  create_file(filename_txt, transcript, response, should_save)
@@ -771,6 +774,17 @@ def whisper_main():
771
  shutil.copyfile(filename, filename_wav)
772
  if os.path.exists(filename):
773
  os.remove(filename)
 
 
 
 
 
 
 
 
 
 
 
774
  except:
775
  st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
776
 
@@ -912,6 +926,8 @@ def main():
912
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
913
  create_file(filename, user_prompt, response, should_save)
914
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
 
915
  if st.button('💬 Chat'):
916
  st.write('Reasoning with your inputs...')
917
  user_prompt_sections = divide_prompt(user_prompt, max_length)
@@ -924,7 +940,6 @@ def main():
924
  st.write(response)
925
  filename = generate_filename(user_prompt, choice)
926
  create_file(filename, user_prompt, response, should_save)
927
- #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
928
 
929
  # Compose a file sidebar of markdown md files:
930
  all_files = glob.glob("*.md")
 
763
  try:
764
  transcript = transcription['text']
765
  st.write(transcript)
766
+
767
+
768
+ # Whisper to Llama:
769
  response = StreamLLMChatResponse(transcript)
770
  filename_txt = generate_filename(transcript, ".txt")
771
  create_file(filename_txt, transcript, response, should_save)
 
774
  shutil.copyfile(filename, filename_wav)
775
  if os.path.exists(filename):
776
  os.remove(filename)
777
+
778
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
779
+ st.write('Reasoning with your inputs with GPT..')
780
+ response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
781
+ st.write('Response:')
782
+ st.write(response)
783
+ filename = generate_filename(user_prompt, choice)
784
+ create_file(filename, user_prompt, response, should_save)
785
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
786
+
787
+
788
  except:
789
  st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
790
 
 
926
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
927
  create_file(filename, user_prompt, response, should_save)
928
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
929
+
930
+
931
  if st.button('💬 Chat'):
932
  st.write('Reasoning with your inputs...')
933
  user_prompt_sections = divide_prompt(user_prompt, max_length)
 
940
  st.write(response)
941
  filename = generate_filename(user_prompt, choice)
942
  create_file(filename, user_prompt, response, should_save)
 
943
 
944
  # Compose a file sidebar of markdown md files:
945
  all_files = glob.glob("*.md")