Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -775,6 +775,37 @@ def main():
|
|
775 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
776 |
if next_action=='md':
|
777 |
st.markdown(file_contents)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
778 |
|
779 |
if next_action=='search':
|
780 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
@@ -790,7 +821,7 @@ def main():
|
|
790 |
filename = generate_filename(user_prompt, ".md")
|
791 |
create_file(filename, file_contents, response, should_save)
|
792 |
all=response
|
793 |
-
SpeechSynthesis(response)
|
794 |
except:
|
795 |
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
796 |
|
@@ -801,7 +832,7 @@ def main():
|
|
801 |
filename2 = generate_filename(file_contents, choice)
|
802 |
create_file(filename2, user_prompt, response, should_save)
|
803 |
all=all+response2
|
804 |
-
SpeechSynthesis(response2)
|
805 |
except:
|
806 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
807 |
|
|
|
775 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
776 |
if next_action=='md':
|
777 |
st.markdown(file_contents)
|
778 |
+
|
779 |
+
buttonlabel = '🔍Run with Llama and GPT.'
|
780 |
+
if st.button(key='RunWithLlamaandGPT', label = buttonlabel)
|
781 |
+
user_prompt = file_contents
|
782 |
+
|
783 |
+
# Llama versus GPT Battle!
|
784 |
+
all=""
|
785 |
+
try:
|
786 |
+
st.write('🔍Running with Llama.')
|
787 |
+
response = StreamLLMChatResponse(file_contents)
|
788 |
+
filename = generate_filename(user_prompt, ".md")
|
789 |
+
create_file(filename, file_contents, response, should_save)
|
790 |
+
all=response
|
791 |
+
#SpeechSynthesis(response)
|
792 |
+
except:
|
793 |
+
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
794 |
+
|
795 |
+
# gpt
|
796 |
+
try:
|
797 |
+
st.write('🔍Running with GPT.')
|
798 |
+
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
799 |
+
filename2 = generate_filename(file_contents, choice)
|
800 |
+
create_file(filename2, user_prompt, response, should_save)
|
801 |
+
all=all+response2
|
802 |
+
#SpeechSynthesis(response2)
|
803 |
+
except:
|
804 |
+
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
805 |
+
|
806 |
+
SpeechSynthesis(all)
|
807 |
+
|
808 |
+
|
809 |
|
810 |
if next_action=='search':
|
811 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
|
|
821 |
filename = generate_filename(user_prompt, ".md")
|
822 |
create_file(filename, file_contents, response, should_save)
|
823 |
all=response
|
824 |
+
#SpeechSynthesis(response)
|
825 |
except:
|
826 |
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
827 |
|
|
|
832 |
filename2 = generate_filename(file_contents, choice)
|
833 |
create_file(filename2, user_prompt, response, should_save)
|
834 |
all=all+response2
|
835 |
+
#SpeechSynthesis(response2)
|
836 |
except:
|
837 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
838 |
|