Update backupapp.py
Browse files- backupapp.py +9 -12
backupapp.py
CHANGED
@@ -115,7 +115,7 @@ def StreamLLMChatResponse(prompt):
|
|
115 |
res_box = st.empty()
|
116 |
collected_chunks=[]
|
117 |
collected_messages=[]
|
118 |
-
|
119 |
for r in stream:
|
120 |
if r.token.special:
|
121 |
continue
|
@@ -129,12 +129,10 @@ def StreamLLMChatResponse(prompt):
|
|
129 |
if len(r.token.text) > 0:
|
130 |
result="".join(report).strip()
|
131 |
res_box.markdown(f'*{result}*')
|
132 |
-
|
133 |
except:
|
134 |
st.write('Stream llm issue')
|
135 |
-
|
136 |
-
#st.write(allresults)
|
137 |
-
#return allresults
|
138 |
except:
|
139 |
st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
140 |
|
@@ -195,7 +193,7 @@ def create_file(filename, prompt, response, should_save=True):
|
|
195 |
has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
|
196 |
if ext in ['.txt', '.htm', '.md']:
|
197 |
with open(f"{base_filename}-Prompt.txt", 'w') as file:
|
198 |
-
file.write(prompt)
|
199 |
with open(f"{base_filename}-Response.md", 'w') as file:
|
200 |
file.write(response)
|
201 |
if has_python_code:
|
@@ -437,12 +435,12 @@ def whisper_main():
|
|
437 |
filename = save_and_play_audio(audio_recorder)
|
438 |
if filename is not None:
|
439 |
transcription = transcribe_audio(filename)
|
|
|
440 |
st.write(transcription)
|
441 |
-
response = StreamLLMChatResponse(transcription
|
442 |
-
st.write(response)
|
443 |
-
|
444 |
-
|
445 |
-
#create_file(filename, transcription, '', True)
|
446 |
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
447 |
|
448 |
def main():
|
@@ -593,4 +591,3 @@ def main():
|
|
593 |
if __name__ == "__main__":
|
594 |
whisper_main()
|
595 |
main()
|
596 |
-
|
|
|
115 |
res_box = st.empty()
|
116 |
collected_chunks=[]
|
117 |
collected_messages=[]
|
118 |
+
allresults=''
|
119 |
for r in stream:
|
120 |
if r.token.special:
|
121 |
continue
|
|
|
129 |
if len(r.token.text) > 0:
|
130 |
result="".join(report).strip()
|
131 |
res_box.markdown(f'*{result}*')
|
132 |
+
|
133 |
except:
|
134 |
st.write('Stream llm issue')
|
135 |
+
return result
|
|
|
|
|
136 |
except:
|
137 |
st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
138 |
|
|
|
193 |
has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
|
194 |
if ext in ['.txt', '.htm', '.md']:
|
195 |
with open(f"{base_filename}-Prompt.txt", 'w') as file:
|
196 |
+
file.write(prompt.strip())
|
197 |
with open(f"{base_filename}-Response.md", 'w') as file:
|
198 |
file.write(response)
|
199 |
if has_python_code:
|
|
|
435 |
filename = save_and_play_audio(audio_recorder)
|
436 |
if filename is not None:
|
437 |
transcription = transcribe_audio(filename)
|
438 |
+
transcription = transcription['text']
|
439 |
st.write(transcription)
|
440 |
+
response = StreamLLMChatResponse(transcription)
|
441 |
+
# st.write(response) - redundant with streaming result?
|
442 |
+
filename = generate_filename(transcription, ".txt")
|
443 |
+
create_file(filename, transcription, response, should_save)
|
|
|
444 |
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
445 |
|
446 |
def main():
|
|
|
591 |
if __name__ == "__main__":
|
592 |
whisper_main()
|
593 |
main()
|
|