awacke1 commited on
Commit
b1ea756
1 Parent(s): e07e606

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -13
app.py CHANGED
@@ -185,11 +185,6 @@ def display_content_or_image(query):
185
 
186
 
187
 
188
- # ------------------------------------------------------------------------- Can't Believe I'm Doing This. --------------------------------------------------------
189
-
190
-
191
-
192
-
193
 
194
 
195
  # Imports
@@ -228,9 +223,6 @@ from xml.etree import ElementTree as ET
228
  import streamlit.components.v1 as components # Import Streamlit Components for HTML5
229
 
230
 
231
- #st.set_page_config(page_title="🐪Llama Whisperer🦙 Voice Chat🌟", layout="wide")
232
-
233
-
234
  def add_Med_Licensing_Exam_Dataset():
235
  import streamlit as st
236
  from datasets import load_dataset
@@ -774,30 +766,33 @@ def whisper_main():
774
  response = chat_with_model(transcript)
775
  st.write('Response:')
776
  st.write(response)
777
- filename = generate_filename(transcript, "txt")
 
778
  create_file(filename, transcript, response, should_save)
779
  # Whisper to GPT: New!! ---------------------------------------------------------------------
780
 
781
-
782
 
783
  # Whisper to Llama:
784
  response = StreamLLMChatResponse(transcript)
785
  filename_txt = generate_filename(transcript, "md")
786
  create_file(filename_txt, transcript, response, should_save)
 
787
  filename_wav = filename_txt.replace('.txt', '.wav')
788
  import shutil
789
  try:
790
- shutil.copyfile(filename, filename_wav)
 
791
  except:
792
- st.write('fail')
 
793
  if os.path.exists(filename):
794
  os.remove(filename)
795
 
 
796
  #except:
797
  # st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
798
 
799
 
800
- import streamlit as st
801
 
802
  # Sample function to demonstrate a response, replace with your own logic
803
  def StreamMedChatResponse(topic):
 
185
 
186
 
187
 
 
 
 
 
 
188
 
189
 
190
  # Imports
 
223
  import streamlit.components.v1 as components # Import Streamlit Components for HTML5
224
 
225
 
 
 
 
226
  def add_Med_Licensing_Exam_Dataset():
227
  import streamlit as st
228
  from datasets import load_dataset
 
766
  response = chat_with_model(transcript)
767
  st.write('Response:')
768
  st.write(response)
769
+
770
+ filename = generate_filename(response, "txt")
771
  create_file(filename, transcript, response, should_save)
772
  # Whisper to GPT: New!! ---------------------------------------------------------------------
773
 
 
774
 
775
  # Whisper to Llama:
776
  response = StreamLLMChatResponse(transcript)
777
  filename_txt = generate_filename(transcript, "md")
778
  create_file(filename_txt, transcript, response, should_save)
779
+
780
  filename_wav = filename_txt.replace('.txt', '.wav')
781
  import shutil
782
  try:
783
+ if os.path.exists(filename):
784
+ shutil.copyfile(filename, filename_wav)
785
  except:
786
+ st.write('.')
787
+
788
  if os.path.exists(filename):
789
  os.remove(filename)
790
 
791
+ #st.experimental_rerun()
792
  #except:
793
  # st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
794
 
795
 
 
796
 
797
  # Sample function to demonstrate a response, replace with your own logic
798
  def StreamMedChatResponse(topic):