awacke1 commited on
Commit
ff5e856
β€’
1 Parent(s): e0a37a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -25
app.py CHANGED
@@ -54,6 +54,28 @@ transhuman_glossary = {
54
  }
55
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  # Display the glossary with Streamlit components, ensuring emojis are used
59
  def display_glossary(area):
@@ -90,11 +112,9 @@ def display_buttons_with_scores():
90
  score = load_score(key)
91
  if st.button(f"{term} {score}πŸš€", key=key):
92
  update_score(key)
 
93
  st.experimental_rerun()
94
 
95
-
96
-
97
-
98
  def fetch_wikipedia_summary(keyword):
99
  # Placeholder function for fetching Wikipedia summaries
100
  # In a real app, you might use requests to fetch from the Wikipedia API
@@ -861,28 +881,6 @@ def add_medical_exam_buttons2():
861
  StreamLLMChatResponse(descriptions["Ramipril πŸ’Š"])
862
 
863
 
864
- # Function to search glossary and display results
865
- def search_glossary(query):
866
- for category, terms in transhuman_glossary.items():
867
- if query.lower() in (term.lower() for term in terms):
868
- st.markdown(f"### {category}")
869
- st.write(f"- {query}")
870
-
871
- st.write('## Processing query against GPT and Llama:')
872
- # ------------------------------------------------------------------------------------------------
873
- st.write('Reasoning with your inputs using GPT...')
874
- response = chat_with_model(transcript)
875
- st.write('Response:')
876
- st.write(response)
877
- filename = generate_filename(response, "txt")
878
- create_file(filename, transcript, response, should_save)
879
-
880
- st.write('Reasoning with your inputs using Llama...')
881
- response = StreamLLMChatResponse(transcript)
882
- filename_txt = generate_filename(transcript, "md")
883
- create_file(filename_txt, transcript, response, should_save)
884
- # ------------------------------------------------------------------------------------------------
885
-
886
 
887
  # 17. Main
888
  def main():
 
54
  }
55
 
56
 
57
+ # Function to search glossary and display results
58
+ def search_glossary(query):
59
+ for category, terms in transhuman_glossary.items():
60
+ if query.lower() in (term.lower() for term in terms):
61
+ st.markdown(f"### {category}")
62
+ st.write(f"- {query}")
63
+
64
+ st.write('## Processing query against GPT and Llama:')
65
+ # ------------------------------------------------------------------------------------------------
66
+ st.write('Reasoning with your inputs using GPT...')
67
+ response = chat_with_model(transcript)
68
+ st.write('Response:')
69
+ st.write(response)
70
+ filename = generate_filename(response, "txt")
71
+ create_file(filename, transcript, response, should_save)
72
+
73
+ st.write('Reasoning with your inputs using Llama...')
74
+ response = StreamLLMChatResponse(transcript)
75
+ filename_txt = generate_filename(transcript, "md")
76
+ create_file(filename_txt, transcript, response, should_save)
77
+ # ------------------------------------------------------------------------------------------------
78
+
79
 
80
  # Display the glossary with Streamlit components, ensuring emojis are used
81
  def display_glossary(area):
 
112
  score = load_score(key)
113
  if st.button(f"{term} {score}πŸš€", key=key):
114
  update_score(key)
115
+ search_glossary('Create a three level markdown outline with 3 subpoints each where each line defines and writes out the core technology descriptions with appropriate emojis for the glossary term: ' + term)
116
  st.experimental_rerun()
117
 
 
 
 
118
  def fetch_wikipedia_summary(keyword):
119
  # Placeholder function for fetching Wikipedia summaries
120
  # In a real app, you might use requests to fetch from the Wikipedia API
 
881
  StreamLLMChatResponse(descriptions["Ramipril πŸ’Š"])
882
 
883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
884
 
885
  # 17. Main
886
  def main():