awacke1 commited on
Commit
ce872b6
·
verified ·
1 Parent(s): 3f63e75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -61
app.py CHANGED
@@ -572,33 +572,6 @@ def display_videos_and_links():
572
  col_index += 1 # Increment column index to place the next video in the next column
573
 
574
 
575
-
576
- @st.cache_resource
577
- def display_videos_and_links_old():
578
- video_files = [f for f in os.listdir('.') if f.endswith('.mp4')]
579
- if not video_files:
580
- st.write("No MP4 videos found in the current directory.")
581
- return
582
- video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
583
- grid_sizes = [len(f.split('.')[0]) for f in video_files_sorted]
584
- col_sizes = ['small' if size <= 4 else 'medium' if size <= 8 else 'large' for size in grid_sizes]
585
-
586
- # Create a map for number of columns to use for each size
587
- num_columns_map = {"small": 4, "medium": 3, "large": 2}
588
- current_grid_size = 0
589
-
590
- for video_file, col_size in zip(video_files_sorted, col_sizes):
591
- if current_grid_size != num_columns_map[col_size]:
592
- cols = st.columns(num_columns_map[col_size])
593
- current_grid_size = num_columns_map[col_size]
594
- col_index = 0
595
-
596
- with cols[col_index % current_grid_size]:
597
- st.video(video_file, format='video/mp4', start_time=0)
598
- k = video_file.split('.')[0] # Assumes keyword is the file name without extension
599
- display_glossary_entity(k)
600
-
601
-
602
 
603
  @st.cache_resource
604
  def display_images_and_wikipedia_summaries():
@@ -606,15 +579,9 @@ def display_images_and_wikipedia_summaries():
606
  if not image_files:
607
  st.write("No PNG images found in the current directory.")
608
  return
609
-
610
- # Sort image_files based on the length of the keyword to create a visually consistent grid
611
  image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
612
- # Calculate the grid size based on the sorted keywords
613
  grid_sizes = [len(f.split('.')[0]) for f in image_files_sorted]
614
- # Dynamically adjust column size based on keyword length
615
  col_sizes = ['small' if size <= 4 else 'medium' if size <= 8 else 'large' for size in grid_sizes]
616
-
617
- # Create a map for number of columns to use for each size
618
  num_columns_map = {"small": 4, "medium": 3, "large": 2}
619
  current_grid_size = 0
620
  for image_file, col_size in zip(image_files_sorted, col_sizes):
@@ -625,10 +592,8 @@ def display_images_and_wikipedia_summaries():
625
  with cols[col_index % current_grid_size]:
626
  image = Image.open(image_file)
627
  st.image(image, caption=image_file, use_column_width=True)
628
- # Display search links
629
  k = image_file.split('.')[0] # Assumes keyword is the file name without extension
630
  display_glossary_entity(k)
631
- #col_index += 1
632
 
633
 
634
  def get_all_query_params(key):
@@ -641,22 +606,17 @@ def clear_query_params():
641
  # Function to display content or image based on a query
642
  @st.cache_resource
643
  def display_content_or_image(query):
644
- # Check if the query matches any glossary term
645
  for category, terms in transhuman_glossary.items():
646
  for term in terms:
647
  if query.lower() in term.lower():
648
  st.subheader(f"Found in {category}:")
649
  st.write(term)
650
  return True # Return after finding and displaying the first match
651
-
652
- # Check for an image match in a predefined directory (adjust path as needed)
653
  image_dir = "images" # Example directory where images are stored
654
  image_path = f"{image_dir}/{query}.png" # Construct image path with query
655
  if os.path.exists(image_path):
656
  st.image(image_path, caption=f"Image for {query}")
657
  return True
658
-
659
- # If no content or image is found
660
  st.warning("No matching content or image found.")
661
  return False
662
 
@@ -676,7 +636,7 @@ headers = {
676
  "Content-Type": "application/json"
677
  }
678
  key = os.getenv('OPENAI_API_KEY')
679
- prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface."
680
  should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
681
 
682
 
@@ -1004,10 +964,6 @@ def transcribe_audio(filename):
1004
  return output
1005
 
1006
  def whisper_main():
1007
- #st.title("Speech to Text")
1008
- #st.write("Record your speech and get the text.")
1009
-
1010
- # Audio, transcribe, GPT:
1011
  filename = save_and_play_audio(audio_recorder)
1012
  if filename is not None:
1013
  transcription = transcribe_audio(filename)
@@ -1019,14 +975,12 @@ def whisper_main():
1019
  transcript=''
1020
  st.write(transcript)
1021
 
1022
- # Whisper to GPT: New!! ---------------------------------------------------------------------
1023
- st.write('Reasoning with your inputs with GPT..')
1024
  response = chat_with_model(transcript)
1025
  st.write('Response:')
1026
  st.write(response)
1027
  filename = generate_filename(response, "txt")
1028
  create_file(filename, transcript, response, should_save)
1029
- # Whisper to GPT: New!! ---------------------------------------------------------------------
1030
 
1031
  # Whisper to Llama:
1032
  response = StreamLLMChatResponse(transcript)
@@ -1046,22 +1000,18 @@ def whisper_main():
1046
  def main():
1047
  prompt = PromptPrefix2
1048
  with st.expander("Prompts 📚", expanded=False):
1049
- example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
1050
- if st.button("Run Prompt With Llama model", help="Click to run the prompt."):
1051
  try:
1052
  response=StreamLLMChatResponse(example_input)
1053
  create_file(filename, example_input, response, should_save)
1054
  except:
1055
- st.write('Llama model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
1056
-
1057
  openai.api_key = os.getenv('OPENAI_API_KEY')
1058
  if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
1059
-
1060
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
1061
  choice = st.sidebar.selectbox("Output File Type:", menu)
1062
-
1063
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
1064
-
1065
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
1066
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
1067
  with collength:
@@ -1092,8 +1042,6 @@ def main():
1092
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
1093
  create_file(filename, user_prompt, response, should_save)
1094
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
1095
-
1096
-
1097
  if st.button('💬 Chat'):
1098
  st.write('Reasoning with your inputs...')
1099
  user_prompt_sections = divide_prompt(user_prompt, max_length)
@@ -1144,8 +1092,6 @@ def main():
1144
  os.remove(file)
1145
  st.experimental_rerun()
1146
 
1147
-
1148
-
1149
  GiveFeedback=False
1150
  if GiveFeedback:
1151
  with st.expander("Give your feedback 👍", expanded=False):
@@ -1177,9 +1123,7 @@ def main():
1177
 
1178
  try:
1179
  query_params = st.query_params
1180
- #query = (query_params.get('q') or query_params.get('query') or [''])[0]
1181
  query = (query_params.get('q') or query_params.get('query') or [''])
1182
- #st.markdown('# Running query: ' + query)
1183
  if query: search_glossary(query)
1184
  except:
1185
  st.markdown(' ')
 
572
  col_index += 1 # Increment column index to place the next video in the next column
573
 
574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575
 
576
  @st.cache_resource
577
  def display_images_and_wikipedia_summaries():
 
579
  if not image_files:
580
  st.write("No PNG images found in the current directory.")
581
  return
 
 
582
  image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
 
583
  grid_sizes = [len(f.split('.')[0]) for f in image_files_sorted]
 
584
  col_sizes = ['small' if size <= 4 else 'medium' if size <= 8 else 'large' for size in grid_sizes]
 
 
585
  num_columns_map = {"small": 4, "medium": 3, "large": 2}
586
  current_grid_size = 0
587
  for image_file, col_size in zip(image_files_sorted, col_sizes):
 
592
  with cols[col_index % current_grid_size]:
593
  image = Image.open(image_file)
594
  st.image(image, caption=image_file, use_column_width=True)
 
595
  k = image_file.split('.')[0] # Assumes keyword is the file name without extension
596
  display_glossary_entity(k)
 
597
 
598
 
599
  def get_all_query_params(key):
 
606
  # Function to display content or image based on a query
607
  @st.cache_resource
608
  def display_content_or_image(query):
 
609
  for category, terms in transhuman_glossary.items():
610
  for term in terms:
611
  if query.lower() in term.lower():
612
  st.subheader(f"Found in {category}:")
613
  st.write(term)
614
  return True # Return after finding and displaying the first match
 
 
615
  image_dir = "images" # Example directory where images are stored
616
  image_path = f"{image_dir}/{query}.png" # Construct image path with query
617
  if os.path.exists(image_path):
618
  st.image(image_path, caption=f"Image for {query}")
619
  return True
 
 
620
  st.warning("No matching content or image found.")
621
  return False
622
 
 
636
  "Content-Type": "application/json"
637
  }
638
  key = os.getenv('OPENAI_API_KEY')
639
+ prompt = f"..."
640
  should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
641
 
642
 
 
964
  return output
965
 
966
  def whisper_main():
 
 
 
 
967
  filename = save_and_play_audio(audio_recorder)
968
  if filename is not None:
969
  transcription = transcribe_audio(filename)
 
975
  transcript=''
976
  st.write(transcript)
977
 
978
+ st.write('Reasoning with your inputs..')
 
979
  response = chat_with_model(transcript)
980
  st.write('Response:')
981
  st.write(response)
982
  filename = generate_filename(response, "txt")
983
  create_file(filename, transcript, response, should_save)
 
984
 
985
  # Whisper to Llama:
986
  response = StreamLLMChatResponse(transcript)
 
1000
  def main():
1001
  prompt = PromptPrefix2
1002
  with st.expander("Prompts 📚", expanded=False):
1003
+ example_input = st.text_input("Enter your prompt text:", value=prompt, help="Enter text to get a response.")
1004
+ if st.button("Run Prompt", help="Click to run."):
1005
  try:
1006
  response=StreamLLMChatResponse(example_input)
1007
  create_file(filename, example_input, response, should_save)
1008
  except:
1009
+ st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
 
1010
  openai.api_key = os.getenv('OPENAI_API_KEY')
1011
  if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
 
1012
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
1013
  choice = st.sidebar.selectbox("Output File Type:", menu)
 
1014
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
 
1015
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
1016
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
1017
  with collength:
 
1042
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
1043
  create_file(filename, user_prompt, response, should_save)
1044
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
 
1045
  if st.button('💬 Chat'):
1046
  st.write('Reasoning with your inputs...')
1047
  user_prompt_sections = divide_prompt(user_prompt, max_length)
 
1092
  os.remove(file)
1093
  st.experimental_rerun()
1094
 
 
 
1095
  GiveFeedback=False
1096
  if GiveFeedback:
1097
  with st.expander("Give your feedback 👍", expanded=False):
 
1123
 
1124
  try:
1125
  query_params = st.query_params
 
1126
  query = (query_params.get('q') or query_params.get('query') or [''])
 
1127
  if query: search_glossary(query)
1128
  except:
1129
  st.markdown(' ')