Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -229,7 +229,7 @@ def FileSidebar():
|
|
229 |
|
230 |
# gpt
|
231 |
try:
|
232 |
-
st.write('🔍Running..')
|
233 |
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
234 |
filename2 = generate_filename(file_contents, choice)
|
235 |
create_file(filename2, user_prompt, response, should_save)
|
@@ -242,13 +242,13 @@ def FileSidebar():
|
|
242 |
|
243 |
if next_action=='search':
|
244 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
245 |
-
st.write('🔍Running..')
|
246 |
|
247 |
user_prompt = file_contents
|
248 |
|
249 |
# gpt
|
250 |
try:
|
251 |
-
st.write('🔍Running with GPT.')
|
252 |
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
253 |
filename2 = generate_filename(file_contents, choice)
|
254 |
create_file(filename2, user_prompt, response, should_save)
|
@@ -356,7 +356,7 @@ def search_glossary(query):
|
|
356 |
|
357 |
query2 = PromptPrefix + query # Add prompt preface for method step task behavior
|
358 |
# st.write('## ' + query2)
|
359 |
-
st.write('## 🔍 Running with GPT.') # -------------------------------------------------------------------------------------------------
|
360 |
response = chat_with_model(query2)
|
361 |
filename = generate_filename(query2 + ' --- ' + response, "md")
|
362 |
create_file(filename, query, response, should_save)
|
@@ -746,7 +746,7 @@ def StreamLLMChatResponse(prompt):
|
|
746 |
try:
|
747 |
endpoint_url = API_URL
|
748 |
hf_token = API_KEY
|
749 |
-
st.write('Running client ' + endpoint_url)
|
750 |
client = InferenceClient(endpoint_url, token=hf_token)
|
751 |
gen_kwargs = dict(
|
752 |
max_new_tokens=512,
|
@@ -1140,34 +1140,6 @@ def StreamMedChatResponse(topic):
|
|
1140 |
st.write(f"Showing resources or questions related to: {topic}")
|
1141 |
|
1142 |
|
1143 |
-
|
1144 |
-
def add_medical_exam_buttons():
|
1145 |
-
# Medical exam terminology descriptions
|
1146 |
-
descriptions = {
|
1147 |
-
"White Blood Cells 🌊": "3 Q&A with emojis about types, facts, function, inputs and outputs of white blood cells 🎥",
|
1148 |
-
"CT Imaging🦠": "3 Q&A with emojis on CT Imaging post surgery, how to, what to look for 💊",
|
1149 |
-
"Hematoma 💉": "3 Q&A with emojis about hematoma and infection care and study including bacteria cultures and tests or labs💪",
|
1150 |
-
"Post Surgery Wound Care 🍌": "3 Q&A with emojis on wound care, and good bedside manner 🩸",
|
1151 |
-
"Healing and humor 💊": "3 Q&A with emojis on stories and humor about healing and caregiving 🚑",
|
1152 |
-
"Psychology of bedside manner 🧬": "3 Q&A with emojis on bedside manner and how to make patients feel at ease🛠",
|
1153 |
-
"CT scan 💊": "3 Q&A with analysis on infection using CT scan and packing for skin, cellulitus and fascia 🩺"
|
1154 |
-
}
|
1155 |
-
|
1156 |
-
# Expander for medical topics
|
1157 |
-
with st.expander("Medical Licensing Exam Topics 📚", expanded=False):
|
1158 |
-
st.markdown("🩺 **Important**: Variety of topics for medical licensing exams.")
|
1159 |
-
|
1160 |
-
# Create buttons for each description with unique keys
|
1161 |
-
for idx, (label, content) in enumerate(descriptions.items()):
|
1162 |
-
button_key = f"button_{idx}"
|
1163 |
-
if st.button(label, key=button_key):
|
1164 |
-
st.write(f"Running {label}")
|
1165 |
-
input='Create markdown outline for definition of topic ' + label + ' also short quiz with appropriate emojis and definitions for: ' + content
|
1166 |
-
response=StreamLLMChatResponse(input)
|
1167 |
-
filename = generate_filename(response, 'txt')
|
1168 |
-
create_file(filename, input, response, should_save)
|
1169 |
-
|
1170 |
-
|
1171 |
|
1172 |
|
1173 |
# 17. Main
|
@@ -1318,7 +1290,7 @@ def main():
|
|
1318 |
query_params = st.query_params
|
1319 |
#query = (query_params.get('q') or query_params.get('query') or [''])[0]
|
1320 |
query = (query_params.get('q') or query_params.get('query') or [''])
|
1321 |
-
st.markdown('# Running query: ' + query)
|
1322 |
if query: search_glossary(query)
|
1323 |
except:
|
1324 |
st.markdown(' ')
|
|
|
229 |
|
230 |
# gpt
|
231 |
try:
|
232 |
+
#st.write('🔍Running..')
|
233 |
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
234 |
filename2 = generate_filename(file_contents, choice)
|
235 |
create_file(filename2, user_prompt, response, should_save)
|
|
|
242 |
|
243 |
if next_action=='search':
|
244 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
245 |
+
#st.write('🔍Running..')
|
246 |
|
247 |
user_prompt = file_contents
|
248 |
|
249 |
# gpt
|
250 |
try:
|
251 |
+
#st.write('🔍Running with GPT.')
|
252 |
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
253 |
filename2 = generate_filename(file_contents, choice)
|
254 |
create_file(filename2, user_prompt, response, should_save)
|
|
|
356 |
|
357 |
query2 = PromptPrefix + query # Add prompt preface for method step task behavior
|
358 |
# st.write('## ' + query2)
|
359 |
+
#st.write('## 🔍 Running with GPT.') # -------------------------------------------------------------------------------------------------
|
360 |
response = chat_with_model(query2)
|
361 |
filename = generate_filename(query2 + ' --- ' + response, "md")
|
362 |
create_file(filename, query, response, should_save)
|
|
|
746 |
try:
|
747 |
endpoint_url = API_URL
|
748 |
hf_token = API_KEY
|
749 |
+
#st.write('Running client ' + endpoint_url)
|
750 |
client = InferenceClient(endpoint_url, token=hf_token)
|
751 |
gen_kwargs = dict(
|
752 |
max_new_tokens=512,
|
|
|
1140 |
st.write(f"Showing resources or questions related to: {topic}")
|
1141 |
|
1142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1143 |
|
1144 |
|
1145 |
# 17. Main
|
|
|
1290 |
query_params = st.query_params
|
1291 |
#query = (query_params.get('q') or query_params.get('query') or [''])[0]
|
1292 |
query = (query_params.get('q') or query_params.get('query') or [''])
|
1293 |
+
#st.markdown('# Running query: ' + query)
|
1294 |
if query: search_glossary(query)
|
1295 |
except:
|
1296 |
st.markdown(' ')
|