awacke1 commited on
Commit
67f9944
1 Parent(s): 8d59633

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -105
app.py CHANGED
@@ -68,6 +68,122 @@ with st.expander("Help / About 📚", expanded=False):
68
  ''')
69
 
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  # ---- Art Card Sidebar with Random Selection of image:
72
  def get_image_as_base64(url):
73
  response = requests.get(url)
@@ -1033,6 +1149,8 @@ def add_medical_exam_buttons():
1033
  create_file(filename, input, response, should_save)
1034
 
1035
 
 
 
1036
  # 17. Main
1037
  def main():
1038
  prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
@@ -1101,111 +1219,6 @@ def main():
1101
  st.write(response)
1102
  filename = generate_filename(user_prompt, choice)
1103
  create_file(filename, user_prompt, response, should_save)
1104
-
1105
- # Compose a file sidebar of markdown md files:
1106
- all_files = glob.glob("*.md")
1107
- all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1108
- all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1109
- if st.sidebar.button("🗑 Delete All Text"):
1110
- for file in all_files:
1111
- os.remove(file)
1112
- st.experimental_rerun()
1113
- if st.sidebar.button("⬇️ Download All"):
1114
- zip_file = create_zip_of_files(all_files)
1115
- st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
1116
- file_contents=''
1117
- next_action=''
1118
- for file in all_files:
1119
- col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
1120
- with col1:
1121
- if st.button("🌐", key="md_"+file): # md emoji button
1122
- with open(file, 'r') as f:
1123
- file_contents = f.read()
1124
- next_action='md'
1125
- with col2:
1126
- st.markdown(get_table_download_link(file), unsafe_allow_html=True)
1127
- with col3:
1128
- if st.button("📂", key="open_"+file): # open emoji button
1129
- with open(file, 'r') as f:
1130
- file_contents = f.read()
1131
- next_action='open'
1132
- with col4:
1133
- if st.button("🔍", key="read_"+file): # search emoji button
1134
- with open(file, 'r') as f:
1135
- file_contents = f.read()
1136
- next_action='search'
1137
- with col5:
1138
- if st.button("🗑", key="delete_"+file):
1139
- os.remove(file)
1140
- st.experimental_rerun()
1141
-
1142
-
1143
- if len(file_contents) > 0:
1144
- if next_action=='open':
1145
- file_content_area = st.text_area("File Contents:", file_contents, height=500)
1146
- if next_action=='md':
1147
- st.markdown(file_contents)
1148
-
1149
- buttonlabel = '🔍Run with Llama and GPT.'
1150
- if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
1151
- user_prompt = file_contents
1152
-
1153
- # Llama versus GPT Battle!
1154
- all=""
1155
- try:
1156
- st.write('🔍Running with Llama.')
1157
- response = StreamLLMChatResponse(file_contents)
1158
- filename = generate_filename(user_prompt, "md")
1159
- create_file(filename, file_contents, response, should_save)
1160
- all=response
1161
- #SpeechSynthesis(response)
1162
- except:
1163
- st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1164
-
1165
- # gpt
1166
- try:
1167
- st.write('🔍Running with GPT.')
1168
- response2 = chat_with_model(user_prompt, file_contents, model_choice)
1169
- filename2 = generate_filename(file_contents, choice)
1170
- create_file(filename2, user_prompt, response, should_save)
1171
- all=all+response2
1172
- #SpeechSynthesis(response2)
1173
- except:
1174
- st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1175
-
1176
- SpeechSynthesis(all)
1177
-
1178
-
1179
- if next_action=='search':
1180
- file_content_area = st.text_area("File Contents:", file_contents, height=500)
1181
- st.write('🔍Running with Llama and GPT.')
1182
-
1183
- user_prompt = file_contents
1184
-
1185
- # Llama versus GPT Battle!
1186
- all=""
1187
- try:
1188
- st.write('🔍Running with Llama.')
1189
- response = StreamLLMChatResponse(file_contents)
1190
- filename = generate_filename(user_prompt, ".md")
1191
- create_file(filename, file_contents, response, should_save)
1192
- all=response
1193
- #SpeechSynthesis(response)
1194
- except:
1195
- st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1196
-
1197
- # gpt
1198
- try:
1199
- st.write('🔍Running with GPT.')
1200
- response2 = chat_with_model(user_prompt, file_contents, model_choice)
1201
- filename2 = generate_filename(file_contents, choice)
1202
- create_file(filename2, user_prompt, response, should_save)
1203
- all=all+response2
1204
- #SpeechSynthesis(response2)
1205
- except:
1206
- st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1207
-
1208
- SpeechSynthesis(all)
1209
 
1210
 
1211
  # Function to encode file to base64
 
68
  ''')
69
 
70
 
71
+
72
+ def FileSidebar():
73
+ # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
74
+ # Compose a file sidebar of markdown md files:
75
+ all_files = glob.glob("*.md")
76
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
77
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
78
+ if st.sidebar.button("🗑 Delete All Text"):
79
+ for file in all_files:
80
+ os.remove(file)
81
+ st.experimental_rerun()
82
+ if st.sidebar.button("⬇️ Download All"):
83
+ zip_file = create_zip_of_files(all_files)
84
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
85
+ file_contents=''
86
+ next_action=''
87
+ for file in all_files:
88
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
89
+ with col1:
90
+ if st.button("🌐", key="md_"+file): # md emoji button
91
+ with open(file, 'r') as f:
92
+ file_contents = f.read()
93
+ next_action='md'
94
+ with col2:
95
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
96
+ with col3:
97
+ if st.button("📂", key="open_"+file): # open emoji button
98
+ with open(file, 'r') as f:
99
+ file_contents = f.read()
100
+ next_action='open'
101
+ with col4:
102
+ if st.button("🔍", key="read_"+file): # search emoji button
103
+ with open(file, 'r') as f:
104
+ file_contents = f.read()
105
+ next_action='search'
106
+ with col5:
107
+ if st.button("🗑", key="delete_"+file):
108
+ os.remove(file)
109
+ st.experimental_rerun()
110
+
111
+
112
+ if len(file_contents) > 0:
113
+ if next_action=='open':
114
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
115
+ if next_action=='md':
116
+ st.markdown(file_contents)
117
+
118
+ buttonlabel = '🔍Run with Llama and GPT.'
119
+ if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
120
+ user_prompt = file_contents
121
+
122
+ # Llama versus GPT Battle!
123
+ all=""
124
+ try:
125
+ st.write('🔍Running with Llama.')
126
+ response = StreamLLMChatResponse(file_contents)
127
+ filename = generate_filename(user_prompt, "md")
128
+ create_file(filename, file_contents, response, should_save)
129
+ all=response
130
+ #SpeechSynthesis(response)
131
+ except:
132
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
133
+
134
+ # gpt
135
+ try:
136
+ st.write('🔍Running with GPT.')
137
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
138
+ filename2 = generate_filename(file_contents, choice)
139
+ create_file(filename2, user_prompt, response, should_save)
140
+ all=all+response2
141
+ #SpeechSynthesis(response2)
142
+ except:
143
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
144
+
145
+ SpeechSynthesis(all)
146
+
147
+
148
+ if next_action=='search':
149
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
150
+ st.write('🔍Running with Llama and GPT.')
151
+
152
+ user_prompt = file_contents
153
+
154
+ # Llama versus GPT Battle!
155
+ all=""
156
+ try:
157
+ st.write('🔍Running with Llama.')
158
+ response = StreamLLMChatResponse(file_contents)
159
+ filename = generate_filename(user_prompt, ".md")
160
+ create_file(filename, file_contents, response, should_save)
161
+ all=response
162
+ #SpeechSynthesis(response)
163
+ except:
164
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
165
+
166
+ # gpt
167
+ try:
168
+ st.write('🔍Running with GPT.')
169
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
170
+ filename2 = generate_filename(file_contents, choice)
171
+ create_file(filename2, user_prompt, response, should_save)
172
+ all=all+response2
173
+ #SpeechSynthesis(response2)
174
+ except:
175
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
176
+
177
+ SpeechSynthesis(all)
178
+ # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
179
+
180
+
181
+ FileSidebar()
182
+
183
+
184
+
185
+
186
+
187
  # ---- Art Card Sidebar with Random Selection of image:
188
  def get_image_as_base64(url):
189
  response = requests.get(url)
 
1149
  create_file(filename, input, response, should_save)
1150
 
1151
 
1152
+
1153
+
1154
  # 17. Main
1155
  def main():
1156
  prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
 
1219
  st.write(response)
1220
  filename = generate_filename(user_prompt, choice)
1221
  create_file(filename, user_prompt, response, should_save)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1222
 
1223
 
1224
  # Function to encode file to base64