awacke1 commited on
Commit
d7eda6a
โ€ข
1 Parent(s): 7cf7472

Create backup011224.app.py

Browse files
Files changed (1) hide show
  1. backup011224.app.py +1188 -0
backup011224.app.py ADDED
@@ -0,0 +1,1188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import json
4
+ from PIL import Image
5
+
6
+ # Set page configuration with a title and favicon
7
+ st.set_page_config(page_title="๐ŸŒŒ๐Ÿš€ Transhuman Space Encyclopedia", page_icon="๐ŸŒ ", layout="wide")
8
+
9
+ # Ensure the directory for storing scores exists
10
+ score_dir = "scores"
11
+ os.makedirs(score_dir, exist_ok=True)
12
+
13
+ # Function to generate a unique key for each button, including an emoji
14
+ def generate_key(label, header, idx):
15
+ return f"{header}_{label}_{idx}_key"
16
+
17
+ # Function to increment and save score
18
+ def update_score(key, increment=1):
19
+ score_file = os.path.join(score_dir, f"{key}.json")
20
+ if os.path.exists(score_file):
21
+ with open(score_file, "r") as file:
22
+ score_data = json.load(file)
23
+ else:
24
+ score_data = {"clicks": 0, "score": 0}
25
+
26
+ score_data["clicks"] += 1
27
+ score_data["score"] += increment
28
+
29
+ with open(score_file, "w") as file:
30
+ json.dump(score_data, file)
31
+
32
+ return score_data["score"]
33
+
34
+ # Function to load score
35
+ def load_score(key):
36
+ score_file = os.path.join(score_dir, f"{key}.json")
37
+ if os.path.exists(score_file):
38
+ with open(score_file, "r") as file:
39
+ score_data = json.load(file)
40
+ return score_data["score"]
41
+ return 0
42
+
43
+ # Transhuman Space glossary with full content
44
+ transhuman_glossary = {
45
+ "๐Ÿš€ Core Technologies": ["Nanotechnology๐Ÿ”ฌ", "Artificial Intelligence๐Ÿค–", "Quantum Computing๐Ÿ’ป", "Spacecraft Engineering๐Ÿ›ธ", "Biotechnology๐Ÿงฌ", "Cybernetics๐Ÿฆพ", "Virtual Reality๐Ÿ•ถ๏ธ", "Energy Systemsโšก", "Material Science๐Ÿงช", "Communication Technologies๐Ÿ“ก"],
46
+ "๐ŸŒ Nations": ["Terran Federation๐ŸŒ", "Martian Syndicate๐Ÿ”ด", "Jovian Republics๐Ÿช", "Asteroid Belt Communities๐ŸŒŒ", "Venusian Colonies๐ŸŒ‹", "Lunar States๐ŸŒ–", "Outer System Alliancesโœจ", "Digital Consciousness Collectives๐Ÿง ", "Transhumanist Enclaves๐Ÿฆฟ", "Non-Human Intelligence Tribes๐Ÿ‘ฝ"],
47
+ "๐Ÿ’ก Memes": ["Post-Humanism๐Ÿšถโ€โ™‚๏ธโžก๏ธ๐Ÿš€", "Neo-Evolutionism๐Ÿงฌ๐Ÿ“ˆ", "Digital Ascendancy๐Ÿ’พ๐Ÿ‘‘", "Solar System Nationalism๐ŸŒž๐Ÿ›", "Space Explorationism๐Ÿš€๐Ÿ›ฐ", "Cyber Democracy๐Ÿ–ฅ๏ธ๐Ÿ—ณ๏ธ", "Interstellar Environmentalism๐ŸŒ๐Ÿ’š", "Quantum Mysticism๐Ÿ”ฎ๐Ÿ’ซ", "Techno-Anarchism๐Ÿ”Œ๐Ÿด", "Cosmic Preservationism๐ŸŒŒ๐Ÿ›ก๏ธ"],
48
+ "๐Ÿ› Institutions": ["Interstellar Council๐Ÿช–", "Transhuman Ethical Standards Organization๐Ÿ“œ", "Galactic Trade Union๐Ÿค", "Space Habitat Authority๐Ÿ ", "Artificial Intelligence Safety Commission๐Ÿค–๐Ÿ”’", "Extraterrestrial Relations Board๐Ÿ‘ฝ๐Ÿค", "Quantum Research Institute๐Ÿ”ฌ", "Biogenetics Oversight Committee๐Ÿงซ", "Cyberspace Regulatory Agency๐Ÿ’ป", "Planetary Defense Coalition๐ŸŒ๐Ÿ›ก"],
49
+ "๐Ÿ”— Organizations": ["Neural Network Pioneers๐Ÿง ๐ŸŒ", "Spacecraft Innovators Guild๐Ÿš€๐Ÿ› ", "Quantum Computing Consortium๐Ÿ’ป๐Ÿ”—", "Interplanetary Miners Unionโ›๏ธ๐Ÿช", "Cybernetic Augmentation Advocates๐Ÿฆพโค๏ธ", "Biotechnological Harmony Group๐Ÿงฌ๐Ÿ•Š", "Stellar Navigation Circle๐Ÿงญโœจ", "Virtual Reality Creators Syndicate๐Ÿ•ถ๏ธ๐ŸŽจ", "Renewable Energy Pioneersโšก๐ŸŒฑ", "Transhuman Rights Activists๐Ÿฆฟ๐Ÿ“ข"],
50
+ "โš”๏ธ War": ["Space Warfare Tactics๐Ÿš€โš”๏ธ", "Cyber Warfare๐Ÿ–ฅ๏ธ๐Ÿ”’", "Biological Warfare๐Ÿงฌ๐Ÿ’ฃ", "Nanotech Warfare๐Ÿ”ฌโš”๏ธ", "Psychological Operations๐Ÿง ๐Ÿ—ฃ๏ธ", "Quantum Encryption & Decryption๐Ÿ”๐Ÿ’ป", "Kinetic Bombardment๐Ÿš€๐Ÿ’ฅ", "Energy Shield Defense๐Ÿ›ก๏ธโšก", "Stealth Spacecraft๐Ÿš€๐Ÿ”‡", "Artificial Intelligence Combat๐Ÿค–โš”๏ธ"],
51
+ "๐ŸŽ– Military": ["Interstellar Navy๐Ÿš€๐ŸŽ–", "Planetary Guard๐ŸŒ๐Ÿ›ก", "Cybernetic Marines๐Ÿฆพ๐Ÿ”ซ", "Nanotech Soldiers๐Ÿ”ฌ๐Ÿ’‚", "Space Drone Fleet๐Ÿ›ธ๐Ÿค–", "Quantum Signal Corps๐Ÿ’ป๐Ÿ“ก", "Special Operations Forces๐Ÿ‘ฅโš”๏ธ", "Artificial Intelligence Strategists๐Ÿค–๐Ÿ—บ๏ธ", "Orbital Defense Systems๐ŸŒŒ๐Ÿ›ก๏ธ", "Exoskeleton Brigades๐Ÿฆพ๐Ÿšถโ€โ™‚๏ธ"],
52
+ "๐Ÿฆน Outlaws": ["Pirate Fleets๐Ÿดโ€โ˜ ๏ธ๐Ÿš€", "Hacktivist Collectives๐Ÿ’ป๐Ÿšซ", "Smuggler Caravans๐Ÿ›ธ๐Ÿ’ผ", "Rebel AI Entities๐Ÿค–๐Ÿšฉ", "Black Market Biotech Dealers๐Ÿงฌ๐Ÿ’ฐ", "Quantum Thieves๐Ÿ’ป๐Ÿ•ต๏ธโ€โ™‚๏ธ", "Space Nomad Raiders๐Ÿš€๐Ÿดโ€โ˜ ๏ธ", "Cyberspace Intruders๐Ÿ’ป๐Ÿ‘พ", "Anti-Transhumanist Factions๐Ÿšซ๐Ÿฆพ", "Rogue Nanotech Swarms๐Ÿ”ฌ๐Ÿฆ "],
53
+ "๐Ÿ‘ฝ Terrorists": ["Bioengineered Virus Spreaders๐Ÿงฌ๐Ÿ’‰", "Nanotechnology Saboteurs๐Ÿ”ฌ๐Ÿงจ", "Cyber Terrorist Networks๐Ÿ’ป๐Ÿ”ฅ", "Rogue AI Sects๐Ÿค–๐Ÿ›‘", "Space Anarchist Cells๐Ÿš€โ’ถ", "Quantum Data Hijackers๐Ÿ’ป๐Ÿ”“", "Environmental Extremists๐ŸŒ๐Ÿ’ฃ", "Technological Singularity Cults๐Ÿค–๐Ÿ™", "Interspecies Supremacists๐Ÿ‘ฝ๐Ÿ‘‘", "Orbital Bombardment Threats๐Ÿ›ฐ๏ธ๐Ÿ’ฅ"],
54
+ }
55
+
56
+
57
+ # Function to search glossary and display results
58
+ def search_glossary(query):
59
+ for category, terms in transhuman_glossary.items():
60
+ if query.lower() in (term.lower() for term in terms):
61
+ st.markdown(f"### {category}")
62
+ st.write(f"- {query}")
63
+
64
+ st.write('## Processing query against GPT and Llama:')
65
+ # ------------------------------------------------------------------------------------------------
66
+ st.write('Reasoning with your inputs using GPT...')
67
+ response = chat_with_model(query)
68
+ st.write('Response:')
69
+ st.write(response)
70
+ filename = generate_filename(response, "txt")
71
+ create_file(filename, query, response, should_save)
72
+
73
+ st.write('Reasoning with your inputs using Llama...')
74
+ response = StreamLLMChatResponse(query)
75
+ filename_txt = generate_filename(query, "md")
76
+ create_file(filename_txt, query, response, should_save)
77
+ # ------------------------------------------------------------------------------------------------
78
+
79
+
80
+ # Display the glossary with Streamlit components, ensuring emojis are used
81
+ def display_glossary(area):
82
+ st.subheader(f"๐Ÿ“˜ Glossary for {area}")
83
+ terms = transhuman_glossary[area]
84
+ for idx, term in enumerate(terms, start=1):
85
+ st.write(f"{idx}. {term}")
86
+
87
+
88
+ # Function to display glossary in a 3x3 grid
89
+ def display_glossary_grid(glossary):
90
+ # Group related categories for a 3x3 grid
91
+ groupings = [
92
+ ["๐Ÿš€ Core Technologies", "๐ŸŒ Nations", "๐Ÿ’ก Memes"],
93
+ ["๐Ÿ› Institutions", "๐Ÿ”— Organizations", "โš”๏ธ War"],
94
+ ["๐ŸŽ– Military", "๐Ÿฆน Outlaws", "๐Ÿ‘ฝ Terrorists"],
95
+ ]
96
+
97
+ for group in groupings:
98
+ cols = st.columns(3) # Create three columns
99
+ for idx, category in enumerate(group):
100
+ with cols[idx]:
101
+ st.markdown(f"### {category}")
102
+ terms = glossary[category]
103
+ for term in terms:
104
+ st.write(f"- {term}")
105
+
106
+ # Streamlined UI for displaying buttons with scores, integrating emojis
107
+ def display_buttons_with_scores():
108
+ for header, terms in transhuman_glossary.items():
109
+ st.markdown(f"## {header}")
110
+ for term in terms:
111
+ key = generate_key(term, header, terms.index(term))
112
+ score = load_score(key)
113
+ if st.button(f"{term} {score}๐Ÿš€", key=key):
114
+ update_score(key)
115
+ search_glossary('Create a three level markdown outline with 3 subpoints each where each line defines and writes out the core technology descriptions with appropriate emojis for the glossary term: ' + term)
116
+ st.experimental_rerun()
117
+
118
+ def fetch_wikipedia_summary(keyword):
119
+ # Placeholder function for fetching Wikipedia summaries
120
+ # In a real app, you might use requests to fetch from the Wikipedia API
121
+ return f"Summary for {keyword}. For more information, visit Wikipedia."
122
+
123
+ def create_search_url_youtube(keyword):
124
+ base_url = "https://www.youtube.com/results?search_query="
125
+ return base_url + keyword.replace(' ', '+')
126
+
127
+ def create_search_url_bing(keyword):
128
+ base_url = "https://www.bing.com/search?q="
129
+ return base_url + keyword.replace(' ', '+')
130
+
131
+ def create_search_url_wikipedia(keyword):
132
+ base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search="
133
+ return base_url + keyword.replace(' ', '+')
134
+
135
+ def create_search_url_google(keyword):
136
+ base_url = "https://www.google.com/search?q="
137
+ return base_url + keyword.replace(' ', '+')
138
+
139
+
140
+ def display_images_and_wikipedia_summaries():
141
+ st.title('Gallery with Related Stories')
142
+ image_files = [f for f in os.listdir('.') if f.endswith('.png')]
143
+ if not image_files:
144
+ st.write("No PNG images found in the current directory.")
145
+ return
146
+
147
+ for image_file in image_files:
148
+ image = Image.open(image_file)
149
+ st.image(image, caption=image_file, use_column_width=True)
150
+
151
+ keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension
152
+
153
+ # Display Wikipedia and Google search links
154
+ wikipedia_url = create_search_url_wikipedia(keyword)
155
+ google_url = create_search_url_google(keyword)
156
+ youtube_url = create_search_url_youtube(keyword)
157
+ bing_url = create_search_url_bing(keyword)
158
+
159
+ links_md = f"""
160
+ [Wikipedia]({wikipedia_url}) |
161
+ [Google]({google_url}) |
162
+ [YouTube]({youtube_url}) |
163
+ [Bing]({bing_url})
164
+ """
165
+ st.markdown(links_md)
166
+
167
+
168
+ def get_all_query_params(key):
169
+ return st.query_params().get(key, [])
170
+
171
+ def clear_query_params():
172
+ st.query_params()
173
+
174
+
175
+ # Function to display content or image based on a query
176
+ def display_content_or_image(query):
177
+ # Check if the query matches any glossary term
178
+ for category, terms in transhuman_glossary.items():
179
+ for term in terms:
180
+ if query.lower() in term.lower():
181
+ st.subheader(f"Found in {category}:")
182
+ st.write(term)
183
+ return True # Return after finding and displaying the first match
184
+
185
+ # Check for an image match in a predefined directory (adjust path as needed)
186
+ image_dir = "images" # Example directory where images are stored
187
+ image_path = f"{image_dir}/{query}.png" # Construct image path with query
188
+ if os.path.exists(image_path):
189
+ st.image(image_path, caption=f"Image for {query}")
190
+ return True
191
+
192
+ # If no content or image is found
193
+ st.warning("No matching content or image found.")
194
+ return False
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+ # Imports
203
+ import base64
204
+ import glob
205
+ import json
206
+ import math
207
+ import openai
208
+ import os
209
+ import pytz
210
+ import re
211
+ import requests
212
+ import streamlit as st
213
+ import textract
214
+ import time
215
+ import zipfile
216
+ import huggingface_hub
217
+ import dotenv
218
+ from audio_recorder_streamlit import audio_recorder
219
+ from bs4 import BeautifulSoup
220
+ from collections import deque
221
+ from datetime import datetime
222
+ from dotenv import load_dotenv
223
+ from huggingface_hub import InferenceClient
224
+ from io import BytesIO
225
+ from langchain.chat_models import ChatOpenAI
226
+ from langchain.chains import ConversationalRetrievalChain
227
+ from langchain.embeddings import OpenAIEmbeddings
228
+ from langchain.memory import ConversationBufferMemory
229
+ from langchain.text_splitter import CharacterTextSplitter
230
+ from langchain.vectorstores import FAISS
231
+ from openai import ChatCompletion
232
+ from PyPDF2 import PdfReader
233
+ from templates import bot_template, css, user_template
234
+ from xml.etree import ElementTree as ET
235
+ import streamlit.components.v1 as components # Import Streamlit Components for HTML5
236
+
237
+
238
+ def add_Med_Licensing_Exam_Dataset():
239
+ import streamlit as st
240
+ from datasets import load_dataset
241
+ dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split
242
+ st.title("USMLE Step 1 Dataset Viewer")
243
+ if len(dataset) == 0:
244
+ st.write("๐Ÿ˜ข The dataset is empty.")
245
+ else:
246
+ st.write("""
247
+ ๐Ÿ” Use the search box to filter questions or use the grid to scroll through the dataset.
248
+ """)
249
+
250
+ # ๐Ÿ‘ฉโ€๐Ÿ”ฌ Search Box
251
+ search_term = st.text_input("Search for a specific question:", "")
252
+
253
+ # ๐ŸŽ› Pagination
254
+ records_per_page = 100
255
+ num_records = len(dataset)
256
+ num_pages = max(int(num_records / records_per_page), 1)
257
+
258
+ # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page)
259
+ if num_pages > 1:
260
+ page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1)))
261
+ else:
262
+ page_number = 1 # Only one page
263
+
264
+ # ๐Ÿ“Š Display Data
265
+ start_idx = (page_number - 1) * records_per_page
266
+ end_idx = start_idx + records_per_page
267
+
268
+ # ๐Ÿงช Apply the Search Filter
269
+ filtered_data = []
270
+ for record in dataset[start_idx:end_idx]:
271
+ if isinstance(record, dict) and 'text' in record and 'id' in record:
272
+ if search_term:
273
+ if search_term.lower() in record['text'].lower():
274
+ st.markdown(record)
275
+ filtered_data.append(record)
276
+ else:
277
+ filtered_data.append(record)
278
+
279
+ # ๐ŸŒ Render the Grid
280
+ for record in filtered_data:
281
+ st.write(f"## Question ID: {record['id']}")
282
+ st.write(f"### Question:")
283
+ st.write(f"{record['text']}")
284
+ st.write(f"### Answer:")
285
+ st.write(f"{record['answer']}")
286
+ st.write("---")
287
+
288
+ st.write(f"๐Ÿ˜Š Total Records: {num_records} | ๐Ÿ“„ Displaying {start_idx+1} to {min(end_idx, num_records)}")
289
+
290
+ # 1. Constants and Top Level UI Variables
291
+
292
+ # My Inference API Copy
293
+ API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
294
+ # Meta's Original - Chat HF Free Version:
295
+ #API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
296
+ API_KEY = os.getenv('API_KEY')
297
+ MODEL1="meta-llama/Llama-2-7b-chat-hf"
298
+ MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
299
+ HF_KEY = os.getenv('HF_KEY')
300
+ headers = {
301
+ "Authorization": f"Bearer {HF_KEY}",
302
+ "Content-Type": "application/json"
303
+ }
304
+ key = os.getenv('OPENAI_API_KEY')
305
+ prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface."
306
+ should_save = st.sidebar.checkbox("๐Ÿ’พ Save", value=True, help="Save your session data.")
307
+
308
+ # 2. Prompt label button demo for LLM
309
+ def add_witty_humor_buttons():
310
+ with st.expander("Wit and Humor ๐Ÿคฃ", expanded=True):
311
+ # Tip about the Dromedary family
312
+ st.markdown("๐Ÿ”ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.")
313
+
314
+ # Define button descriptions
315
+ descriptions = {
316
+ "Generate Limericks ๐Ÿ˜‚": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ŸŽญ",
317
+ "Wise Quotes ๐Ÿง™": "Generate ten wise quotes that are tweet length ๐Ÿฆ‰",
318
+ "Funny Rhymes ๐ŸŽค": "Create ten funny rhymes that are tweet length ๐ŸŽถ",
319
+ "Medical Jokes ๐Ÿ’‰": "Create ten medical jokes that are tweet length ๐Ÿฅ",
320
+ "Minnesota Humor โ„๏ธ": "Create ten jokes about Minnesota that are tweet length ๐ŸŒจ๏ธ",
321
+ "Top Funny Stories ๐Ÿ“–": "Create ten funny stories that are tweet length ๐Ÿ“š",
322
+ "More Funny Rhymes ๐ŸŽ™๏ธ": "Create ten more funny rhymes that are tweet length ๐ŸŽต"
323
+ }
324
+
325
+ # Create columns
326
+ col1, col2, col3 = st.columns([1, 1, 1], gap="small")
327
+
328
+ # Add buttons to columns
329
+ if col1.button("Wise Limericks ๐Ÿ˜‚"):
330
+ StreamLLMChatResponse(descriptions["Generate Limericks ๐Ÿ˜‚"])
331
+
332
+ if col2.button("Wise Quotes ๐Ÿง™"):
333
+ StreamLLMChatResponse(descriptions["Wise Quotes ๐Ÿง™"])
334
+
335
+ #if col3.button("Funny Rhymes ๐ŸŽค"):
336
+ # StreamLLMChatResponse(descriptions["Funny Rhymes ๐ŸŽค"])
337
+
338
+ col4, col5, col6 = st.columns([1, 1, 1], gap="small")
339
+
340
+ if col4.button("Top Ten Funniest Clean Jokes ๐Ÿ’‰"):
341
+ StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐Ÿ’‰"])
342
+
343
+ if col5.button("Minnesota Humor โ„๏ธ"):
344
+ StreamLLMChatResponse(descriptions["Minnesota Humor โ„๏ธ"])
345
+
346
+ if col6.button("Origins of Medical Science True Stories"):
347
+ StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"])
348
+
349
+ col7 = st.columns(1, gap="small")
350
+
351
+ if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐ŸŽ™๏ธ"):
352
+ StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐ŸŽ™๏ธ"])
353
+
354
+ def SpeechSynthesis(result):
355
+ documentHTML5='''
356
+ <!DOCTYPE html>
357
+ <html>
358
+ <head>
359
+ <title>Read It Aloud</title>
360
+ <script type="text/javascript">
361
+ function readAloud() {
362
+ const text = document.getElementById("textArea").value;
363
+ const speech = new SpeechSynthesisUtterance(text);
364
+ window.speechSynthesis.speak(speech);
365
+ }
366
+ </script>
367
+ </head>
368
+ <body>
369
+ <h1>๐Ÿ”Š Read It Aloud</h1>
370
+ <textarea id="textArea" rows="10" cols="80">
371
+ '''
372
+ documentHTML5 = documentHTML5 + result
373
+ documentHTML5 = documentHTML5 + '''
374
+ </textarea>
375
+ <br>
376
+ <button onclick="readAloud()">๐Ÿ”Š Read Aloud</button>
377
+ </body>
378
+ </html>
379
+ '''
380
+
381
+ components.html(documentHTML5, width=1280, height=300)
382
+ #return result
383
+
384
+
385
+ # 3. Stream Llama Response
386
+ # @st.cache_resource
387
+ def StreamLLMChatResponse(prompt):
388
+ try:
389
+ endpoint_url = API_URL
390
+ hf_token = API_KEY
391
+ st.write('Running client ' + endpoint_url)
392
+ client = InferenceClient(endpoint_url, token=hf_token)
393
+ gen_kwargs = dict(
394
+ max_new_tokens=512,
395
+ top_k=30,
396
+ top_p=0.9,
397
+ temperature=0.2,
398
+ repetition_penalty=1.02,
399
+ stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
400
+ )
401
+ stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
402
+ report=[]
403
+ res_box = st.empty()
404
+ collected_chunks=[]
405
+ collected_messages=[]
406
+ allresults=''
407
+ for r in stream:
408
+ if r.token.special:
409
+ continue
410
+ if r.token.text in gen_kwargs["stop_sequences"]:
411
+ break
412
+ collected_chunks.append(r.token.text)
413
+ chunk_message = r.token.text
414
+ collected_messages.append(chunk_message)
415
+ try:
416
+ report.append(r.token.text)
417
+ if len(r.token.text) > 0:
418
+ result="".join(report).strip()
419
+ res_box.markdown(f'*{result}*')
420
+
421
+ except:
422
+ st.write('Stream llm issue')
423
+ SpeechSynthesis(result)
424
+ return result
425
+ except:
426
+ st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
427
+
428
+ # 4. Run query with payload
429
+ def query(payload):
430
+ response = requests.post(API_URL, headers=headers, json=payload)
431
+ st.markdown(response.json())
432
+ return response.json()
433
+ def get_output(prompt):
434
+ return query({"inputs": prompt})
435
+
436
+ # 5. Auto name generated output files from time and content
437
+ def generate_filename(prompt, file_type):
438
+ central = pytz.timezone('US/Central')
439
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
440
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
441
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:255] # 255 is linux max, 260 is windows max
442
+ #safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:45]
443
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
444
+
445
+ # 6. Speech transcription via OpenAI service
446
+ def transcribe_audio(openai_key, file_path, model):
447
+ openai.api_key = openai_key
448
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
449
+ headers = {
450
+ "Authorization": f"Bearer {openai_key}",
451
+ }
452
+ with open(file_path, 'rb') as f:
453
+ data = {'file': f}
454
+ st.write('STT transcript ' + OPENAI_API_URL)
455
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
456
+ if response.status_code == 200:
457
+ st.write(response.json())
458
+ chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
459
+ transcript = response.json().get('text')
460
+ filename = generate_filename(transcript, 'txt')
461
+ response = chatResponse
462
+ user_prompt = transcript
463
+ create_file(filename, user_prompt, response, should_save)
464
+ return transcript
465
+ else:
466
+ st.write(response.json())
467
+ st.error("Error in API call.")
468
+ return None
469
+
470
+ # 7. Auto stop on silence audio control for recording WAV files
471
+ def save_and_play_audio(audio_recorder):
472
+ audio_bytes = audio_recorder(key='audio_recorder')
473
+ if audio_bytes:
474
+ filename = generate_filename("Recording", "wav")
475
+ with open(filename, 'wb') as f:
476
+ f.write(audio_bytes)
477
+ st.audio(audio_bytes, format="audio/wav")
478
+ return filename
479
+ return None
480
+
481
+ # 8. File creator that interprets type and creates output file for text, markdown and code
482
+ def create_file(filename, prompt, response, should_save=True):
483
+ if not should_save:
484
+ return
485
+ base_filename, ext = os.path.splitext(filename)
486
+ if ext in ['.txt', '.htm', '.md']:
487
+ with open(f"{base_filename}.md", 'w') as file:
488
+ try:
489
+ content = prompt.strip() + '\r\n' + response
490
+ file.write(content)
491
+ except:
492
+ st.write('.')
493
+
494
+ #has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
495
+ #has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
496
+ #if has_python_code:
497
+ # python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
498
+ # with open(f"{base_filename}-Code.py", 'w') as file:
499
+ # file.write(python_code)
500
+ # with open(f"{base_filename}.md", 'w') as file:
501
+ # content = prompt.strip() + '\r\n' + response
502
+ # file.write(content)
503
+
504
+ def truncate_document(document, length):
505
+ return document[:length]
506
+ def divide_document(document, max_length):
507
+ return [document[i:i+max_length] for i in range(0, len(document), max_length)]
508
+
509
+ # 9. Sidebar with UI controls to review and re-run prompts and continue responses
510
+ @st.cache_resource
511
+ def get_table_download_link(file_path):
512
+ with open(file_path, 'r') as file:
513
+ data = file.read()
514
+
515
+ b64 = base64.b64encode(data.encode()).decode()
516
+ file_name = os.path.basename(file_path)
517
+ ext = os.path.splitext(file_name)[1] # get the file extension
518
+ if ext == '.txt':
519
+ mime_type = 'text/plain'
520
+ elif ext == '.py':
521
+ mime_type = 'text/plain'
522
+ elif ext == '.xlsx':
523
+ mime_type = 'text/plain'
524
+ elif ext == '.csv':
525
+ mime_type = 'text/plain'
526
+ elif ext == '.htm':
527
+ mime_type = 'text/html'
528
+ elif ext == '.md':
529
+ mime_type = 'text/markdown'
530
+ elif ext == '.wav':
531
+ mime_type = 'audio/wav'
532
+ else:
533
+ mime_type = 'application/octet-stream' # general binary data type
534
+ href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
535
+ return href
536
+
537
+
538
+ def CompressXML(xml_text):
539
+ root = ET.fromstring(xml_text)
540
+ for elem in list(root.iter()):
541
+ if isinstance(elem.tag, str) and 'Comment' in elem.tag:
542
+ elem.parent.remove(elem)
543
+ return ET.tostring(root, encoding='unicode', method="xml")
544
+
545
+ # 10. Read in and provide UI for past files
546
+ @st.cache_resource
547
+ def read_file_content(file,max_length):
548
+ if file.type == "application/json":
549
+ content = json.load(file)
550
+ return str(content)
551
+ elif file.type == "text/html" or file.type == "text/htm":
552
+ content = BeautifulSoup(file, "html.parser")
553
+ return content.text
554
+ elif file.type == "application/xml" or file.type == "text/xml":
555
+ tree = ET.parse(file)
556
+ root = tree.getroot()
557
+ xml = CompressXML(ET.tostring(root, encoding='unicode'))
558
+ return xml
559
+ elif file.type == "text/markdown" or file.type == "text/md":
560
+ md = mistune.create_markdown()
561
+ content = md(file.read().decode())
562
+ return content
563
+ elif file.type == "text/plain":
564
+ return file.getvalue().decode()
565
+ else:
566
+ return ""
567
+
568
+ # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
569
+ @st.cache_resource
570
+ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
571
+ model = model_choice
572
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
573
+ conversation.append({'role': 'user', 'content': prompt})
574
+ if len(document_section)>0:
575
+ conversation.append({'role': 'assistant', 'content': document_section})
576
+ start_time = time.time()
577
+ report = []
578
+ res_box = st.empty()
579
+ collected_chunks = []
580
+ collected_messages = []
581
+
582
+ st.write('LLM stream ' + 'gpt-3.5-turbo')
583
+ for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
584
+ collected_chunks.append(chunk)
585
+ chunk_message = chunk['choices'][0]['delta']
586
+ collected_messages.append(chunk_message)
587
+ content=chunk["choices"][0].get("delta",{}).get("content")
588
+ try:
589
+ report.append(content)
590
+ if len(content) > 0:
591
+ result = "".join(report).strip()
592
+ res_box.markdown(f'*{result}*')
593
+ except:
594
+ st.write(' ')
595
+ full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
596
+ st.write("Elapsed time:")
597
+ st.write(time.time() - start_time)
598
+ return full_reply_content
599
+
600
+ # 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain
601
+ @st.cache_resource
602
+ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
603
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
604
+ conversation.append({'role': 'user', 'content': prompt})
605
+ if len(file_content)>0:
606
+ conversation.append({'role': 'assistant', 'content': file_content})
607
+ response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
608
+ return response['choices'][0]['message']['content']
609
+
610
+ def extract_mime_type(file):
611
+ if isinstance(file, str):
612
+ pattern = r"type='(.*?)'"
613
+ match = re.search(pattern, file)
614
+ if match:
615
+ return match.group(1)
616
+ else:
617
+ raise ValueError(f"Unable to extract MIME type from {file}")
618
+ elif isinstance(file, streamlit.UploadedFile):
619
+ return file.type
620
+ else:
621
+ raise TypeError("Input should be a string or a streamlit.UploadedFile object")
622
+
623
+ def extract_file_extension(file):
624
+ # get the file name directly from the UploadedFile object
625
+ file_name = file.name
626
+ pattern = r".*?\.(.*?)$"
627
+ match = re.search(pattern, file_name)
628
+ if match:
629
+ return match.group(1)
630
+ else:
631
+ raise ValueError(f"Unable to extract file extension from {file_name}")
632
+
633
+ # Normalize input as text from PDF and other formats
634
+ @st.cache_resource
635
+ def pdf2txt(docs):
636
+ text = ""
637
+ for file in docs:
638
+ file_extension = extract_file_extension(file)
639
+ st.write(f"File type extension: {file_extension}")
640
+ if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
641
+ text += file.getvalue().decode('utf-8')
642
+ elif file_extension.lower() == 'pdf':
643
+ from PyPDF2 import PdfReader
644
+ pdf = PdfReader(BytesIO(file.getvalue()))
645
+ for page in range(len(pdf.pages)):
646
+ text += pdf.pages[page].extract_text() # new PyPDF2 syntax
647
+ return text
648
+
649
+ def txt2chunks(text):
650
+ text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
651
+ return text_splitter.split_text(text)
652
+
653
+ # Vector Store using FAISS
654
+ @st.cache_resource
655
+ def vector_store(text_chunks):
656
+ embeddings = OpenAIEmbeddings(openai_api_key=key)
657
+ return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
658
+
659
+ # Memory and Retrieval chains
660
+ @st.cache_resource
661
+ def get_chain(vectorstore):
662
+ llm = ChatOpenAI()
663
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
664
+ return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
665
+
666
+ def process_user_input(user_question):
667
+ response = st.session_state.conversation({'question': user_question})
668
+ st.session_state.chat_history = response['chat_history']
669
+ for i, message in enumerate(st.session_state.chat_history):
670
+ template = user_template if i % 2 == 0 else bot_template
671
+ st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
672
+ filename = generate_filename(user_question, 'txt')
673
+ response = message.content
674
+ user_prompt = user_question
675
+ create_file(filename, user_prompt, response, should_save)
676
+
677
+ def divide_prompt(prompt, max_length):
678
+ words = prompt.split()
679
+ chunks = []
680
+ current_chunk = []
681
+ current_length = 0
682
+ for word in words:
683
+ if len(word) + current_length <= max_length:
684
+ current_length += len(word) + 1
685
+ current_chunk.append(word)
686
+ else:
687
+ chunks.append(' '.join(current_chunk))
688
+ current_chunk = [word]
689
+ current_length = len(word)
690
+ chunks.append(' '.join(current_chunk))
691
+ return chunks
692
+
693
+
694
+ # 13. Provide way of saving all and deleting all to give way of reviewing output and saving locally before clearing it
695
+
696
+ @st.cache_resource
697
+ def create_zip_of_files(files):
698
+ zip_name = "all_files.zip"
699
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
700
+ for file in files:
701
+ zipf.write(file)
702
+ return zip_name
703
+
704
+ @st.cache_resource
705
+ def get_zip_download_link(zip_file):
706
+ with open(zip_file, 'rb') as f:
707
+ data = f.read()
708
+ b64 = base64.b64encode(data).decode()
709
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
710
+ return href
711
+
712
+ # 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
713
+ # My Inference Endpoint
714
+ API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
715
+ # Original
716
+ API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
717
+ MODEL2 = "openai/whisper-small.en"
718
+ MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
719
+ #headers = {
720
+ # "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
721
+ # "Content-Type": "audio/wav"
722
+ #}
723
+ # HF_KEY = os.getenv('HF_KEY')
724
+ HF_KEY = st.secrets['HF_KEY']
725
+ headers = {
726
+ "Authorization": f"Bearer {HF_KEY}",
727
+ "Content-Type": "audio/wav"
728
+ }
729
+
730
+ #@st.cache_resource
731
+ def query(filename):
732
+ with open(filename, "rb") as f:
733
+ data = f.read()
734
+ response = requests.post(API_URL_IE, headers=headers, data=data)
735
+ return response.json()
736
+
737
+ def generate_filename(prompt, file_type):
738
+ central = pytz.timezone('US/Central')
739
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
740
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
741
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
742
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
743
+
744
+ # 15. Audio recorder to Wav file
745
+ def save_and_play_audio(audio_recorder):
746
+ audio_bytes = audio_recorder()
747
+ if audio_bytes:
748
+ filename = generate_filename("Recording", "wav")
749
+ with open(filename, 'wb') as f:
750
+ f.write(audio_bytes)
751
+ st.audio(audio_bytes, format="audio/wav")
752
+ return filename
753
+
754
+ # 16. Speech transcription to file output
755
+ def transcribe_audio(filename):
756
+ output = query(filename)
757
+ return output
758
+
759
+ def whisper_main():
760
+ #st.title("Speech to Text")
761
+ #st.write("Record your speech and get the text.")
762
+
763
+ # Audio, transcribe, GPT:
764
+ filename = save_and_play_audio(audio_recorder)
765
+ if filename is not None:
766
+ transcription = transcribe_audio(filename)
767
+ try:
768
+ transcript = transcription['text']
769
+ st.write(transcript)
770
+
771
+ except:
772
+ transcript=''
773
+ st.write(transcript)
774
+
775
+
776
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
777
+ st.write('Reasoning with your inputs with GPT..')
778
+ response = chat_with_model(transcript)
779
+ st.write('Response:')
780
+ st.write(response)
781
+
782
+ filename = generate_filename(response, "txt")
783
+ create_file(filename, transcript, response, should_save)
784
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
785
+
786
+
787
+ # Whisper to Llama:
788
+ response = StreamLLMChatResponse(transcript)
789
+ filename_txt = generate_filename(transcript, "md")
790
+ create_file(filename_txt, transcript, response, should_save)
791
+
792
+ filename_wav = filename_txt.replace('.txt', '.wav')
793
+ import shutil
794
+ try:
795
+ if os.path.exists(filename):
796
+ shutil.copyfile(filename, filename_wav)
797
+ except:
798
+ st.write('.')
799
+
800
+ if os.path.exists(filename):
801
+ os.remove(filename)
802
+
803
+ #st.experimental_rerun()
804
+ #except:
805
+ # st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
806
+
807
+
808
+
809
+ # Sample function to demonstrate a response, replace with your own logic
810
+ def StreamMedChatResponse(topic):
811
+ st.write(f"Showing resources or questions related to: {topic}")
812
+
813
+
814
+
815
+ def add_medical_exam_buttons():
816
+ # Medical exam terminology descriptions
817
+ descriptions = {
818
+ "White Blood Cells ๐ŸŒŠ": "3 Q&A with emojis about types, facts, function, inputs and outputs of white blood cells ๐ŸŽฅ",
819
+ "CT Imaging๐Ÿฆ ": "3 Q&A with emojis on CT Imaging post surgery, how to, what to look for ๐Ÿ’Š",
820
+ "Hematoma ๐Ÿ’‰": "3 Q&A with emojis about hematoma and infection care and study including bacteria cultures and tests or labs๐Ÿ’ช",
821
+ "Post Surgery Wound Care ๐ŸŒ": "3 Q&A with emojis on wound care, and good bedside manner ๐Ÿฉธ",
822
+ "Healing and humor ๐Ÿ’Š": "3 Q&A with emojis on stories and humor about healing and caregiving ๐Ÿš‘",
823
+ "Psychology of bedside manner ๐Ÿงฌ": "3 Q&A with emojis on bedside manner and how to make patients feel at ease๐Ÿ› ",
824
+ "CT scan ๐Ÿ’Š": "3 Q&A with analysis on infection using CT scan and packing for skin, cellulitus and fascia ๐Ÿฉบ"
825
+ }
826
+
827
+ # Expander for medical topics
828
+ with st.expander("Medical Licensing Exam Topics ๐Ÿ“š", expanded=False):
829
+ st.markdown("๐Ÿฉบ **Important**: Variety of topics for medical licensing exams.")
830
+
831
+ # Create buttons for each description with unique keys
832
+ for idx, (label, content) in enumerate(descriptions.items()):
833
+ button_key = f"button_{idx}"
834
+ if st.button(label, key=button_key):
835
+ st.write(f"Running {label}")
836
+ input='Create markdown outline for definition of topic ' + label + ' also short quiz with appropriate emojis and definitions for: ' + content
837
+ response=StreamLLMChatResponse(input)
838
+ filename = generate_filename(response, 'txt')
839
+ create_file(filename, input, response, should_save)
840
+
841
+ def add_medical_exam_buttons2():
842
+ with st.expander("Medical Licensing Exam Topics ๐Ÿ“š", expanded=False):
843
+ st.markdown("๐Ÿฉบ **Important**: This section provides a variety of medical topics that are often encountered in medical licensing exams.")
844
+
845
+ # Define medical exam terminology descriptions
846
+ descriptions = {
847
+ "White Blood Cells ๐ŸŒŠ": "3 Questions and Answers with emojis about white blood cells ๐ŸŽฅ",
848
+ "CT Imaging๐Ÿฆ ": "3 Questions and Answers with emojis about CT Imaging of post surgery abscess, hematoma, and cerosanguiness fluid ๐Ÿ’Š",
849
+ "Hematoma ๐Ÿ’‰": "3 Questions and Answers with emojis about hematoma and infection and how heat helps white blood cells ๐Ÿ’ช",
850
+ "Post Surgery Wound Care ๐ŸŒ": "3 Questions and Answers with emojis about wound care and how to help as a caregiver๐Ÿฉธ",
851
+ "Healing and humor ๐Ÿ’Š": "3 Questions and Answers with emojis on the use of stories and humor to help patients and family ๐Ÿš‘",
852
+ "Psychology of bedside manner ๐Ÿงฌ": "3 Questions and Answers with emojis about good bedside manner ๐Ÿ› ",
853
+ "CT scan ๐Ÿ’Š": "3 Questions and Answers with analysis of bacteria and understanding infection using cultures and CT scan ๐Ÿฉบ"
854
+ }
855
+
856
+ # Create columns
857
+ col1, col2, col3, col4 = st.columns([1, 1, 1, 1], gap="small")
858
+
859
+ # Add buttons to columns
860
+ if col1.button("Ultrasound with Doppler ๐ŸŒŠ"):
861
+ StreamLLMChatResponse(descriptions["Ultrasound with Doppler ๐ŸŒŠ"])
862
+
863
+ if col2.button("Oseltamivir ๐Ÿฆ "):
864
+ StreamLLMChatResponse(descriptions["Oseltamivir ๐Ÿฆ "])
865
+
866
+ if col3.button("IM Epinephrine ๐Ÿ’‰"):
867
+ StreamLLMChatResponse(descriptions["IM Epinephrine ๐Ÿ’‰"])
868
+
869
+ if col4.button("Hypokalemia ๐ŸŒ"):
870
+ StreamLLMChatResponse(descriptions["Hypokalemia ๐ŸŒ"])
871
+
872
+ col5, col6, col7, col8 = st.columns([1, 1, 1, 1], gap="small")
873
+
874
+ if col5.button("Succinylcholine ๐Ÿ’Š"):
875
+ StreamLLMChatResponse(descriptions["Succinylcholine ๐Ÿ’Š"])
876
+
877
+ if col6.button("Phosphoinositol System ๐Ÿงฌ"):
878
+ StreamLLMChatResponse(descriptions["Phosphoinositol System ๐Ÿงฌ"])
879
+
880
+ if col7.button("Ramipril ๐Ÿ’Š"):
881
+ StreamLLMChatResponse(descriptions["Ramipril ๐Ÿ’Š"])
882
+
883
+
884
+
885
+ # 17. Main
886
+ def main():
887
+ prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
888
+ # Add Wit and Humor buttons
889
+ # add_witty_humor_buttons()
890
+ # add_medical_exam_buttons()
891
+
892
+ with st.expander("Prompts ๐Ÿ“š", expanded=False):
893
+ example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
894
+ if st.button("Run Prompt With Llama model", help="Click to run the prompt."):
895
+ try:
896
+ response=StreamLLMChatResponse(example_input)
897
+ create_file(filename, example_input, response, should_save)
898
+ except:
899
+ st.write('Llama model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
900
+
901
+ openai.api_key = os.getenv('OPENAI_API_KEY')
902
+ if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
903
+
904
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
905
+ choice = st.sidebar.selectbox("Output File Type:", menu)
906
+
907
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
908
+
909
+ user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
910
+ collength, colupload = st.columns([2,3]) # adjust the ratio as needed
911
+ with collength:
912
+ max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
913
+ with colupload:
914
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
915
+ document_sections = deque()
916
+ document_responses = {}
917
+ if uploaded_file is not None:
918
+ file_content = read_file_content(uploaded_file, max_length)
919
+ document_sections.extend(divide_document(file_content, max_length))
920
+ if len(document_sections) > 0:
921
+ if st.button("๐Ÿ‘๏ธ View Upload"):
922
+ st.markdown("**Sections of the uploaded file:**")
923
+ for i, section in enumerate(list(document_sections)):
924
+ st.markdown(f"**Section {i+1}**\n{section}")
925
+ st.markdown("**Chat with the model:**")
926
+ for i, section in enumerate(list(document_sections)):
927
+ if i in document_responses:
928
+ st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
929
+ else:
930
+ if st.button(f"Chat about Section {i+1}"):
931
+ st.write('Reasoning with your inputs...')
932
+ #response = chat_with_model(user_prompt, section, model_choice)
933
+ st.write('Response:')
934
+ st.write(response)
935
+ document_responses[i] = response
936
+ filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
937
+ create_file(filename, user_prompt, response, should_save)
938
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
939
+
940
+
941
+ if st.button('๐Ÿ’ฌ Chat'):
942
+ st.write('Reasoning with your inputs...')
943
+ user_prompt_sections = divide_prompt(user_prompt, max_length)
944
+ full_response = ''
945
+ for prompt_section in user_prompt_sections:
946
+ response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
947
+ full_response += response + '\n' # Combine the responses
948
+ response = full_response
949
+ st.write('Response:')
950
+ st.write(response)
951
+ filename = generate_filename(user_prompt, choice)
952
+ create_file(filename, user_prompt, response, should_save)
953
+
954
+ # Compose a file sidebar of markdown md files:
955
+ all_files = glob.glob("*.md")
956
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
957
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
958
+ if st.sidebar.button("๐Ÿ—‘ Delete All Text"):
959
+ for file in all_files:
960
+ os.remove(file)
961
+ st.experimental_rerun()
962
+ if st.sidebar.button("โฌ‡๏ธ Download All"):
963
+ zip_file = create_zip_of_files(all_files)
964
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
965
+ file_contents=''
966
+ next_action=''
967
+ for file in all_files:
968
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
969
+ with col1:
970
+ if st.button("๐ŸŒ", key="md_"+file): # md emoji button
971
+ with open(file, 'r') as f:
972
+ file_contents = f.read()
973
+ next_action='md'
974
+ with col2:
975
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
976
+ with col3:
977
+ if st.button("๐Ÿ“‚", key="open_"+file): # open emoji button
978
+ with open(file, 'r') as f:
979
+ file_contents = f.read()
980
+ next_action='open'
981
+ with col4:
982
+ if st.button("๐Ÿ”", key="read_"+file): # search emoji button
983
+ with open(file, 'r') as f:
984
+ file_contents = f.read()
985
+ next_action='search'
986
+ with col5:
987
+ if st.button("๐Ÿ—‘", key="delete_"+file):
988
+ os.remove(file)
989
+ st.experimental_rerun()
990
+
991
+
992
+ if len(file_contents) > 0:
993
+ if next_action=='open':
994
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
995
+ if next_action=='md':
996
+ st.markdown(file_contents)
997
+
998
+ buttonlabel = '๐Ÿ”Run with Llama and GPT.'
999
+ if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
1000
+ user_prompt = file_contents
1001
+
1002
+ # Llama versus GPT Battle!
1003
+ all=""
1004
+ try:
1005
+ st.write('๐Ÿ”Running with Llama.')
1006
+ response = StreamLLMChatResponse(file_contents)
1007
+ filename = generate_filename(user_prompt, "md")
1008
+ create_file(filename, file_contents, response, should_save)
1009
+ all=response
1010
+ #SpeechSynthesis(response)
1011
+ except:
1012
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1013
+
1014
+ # gpt
1015
+ try:
1016
+ st.write('๐Ÿ”Running with GPT.')
1017
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1018
+ filename2 = generate_filename(file_contents, choice)
1019
+ create_file(filename2, user_prompt, response, should_save)
1020
+ all=all+response2
1021
+ #SpeechSynthesis(response2)
1022
+ except:
1023
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1024
+
1025
+ SpeechSynthesis(all)
1026
+
1027
+
1028
+ if next_action=='search':
1029
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1030
+ st.write('๐Ÿ”Running with Llama and GPT.')
1031
+
1032
+ user_prompt = file_contents
1033
+
1034
+ # Llama versus GPT Battle!
1035
+ all=""
1036
+ try:
1037
+ st.write('๐Ÿ”Running with Llama.')
1038
+ response = StreamLLMChatResponse(file_contents)
1039
+ filename = generate_filename(user_prompt, ".md")
1040
+ create_file(filename, file_contents, response, should_save)
1041
+ all=response
1042
+ #SpeechSynthesis(response)
1043
+ except:
1044
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1045
+
1046
+ # gpt
1047
+ try:
1048
+ st.write('๐Ÿ”Running with GPT.')
1049
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1050
+ filename2 = generate_filename(file_contents, choice)
1051
+ create_file(filename2, user_prompt, response, should_save)
1052
+ all=all+response2
1053
+ #SpeechSynthesis(response2)
1054
+ except:
1055
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1056
+
1057
+ SpeechSynthesis(all)
1058
+
1059
+
1060
+ # Function to encode file to base64
1061
+ def get_base64_encoded_file(file_path):
1062
+ with open(file_path, "rb") as file:
1063
+ return base64.b64encode(file.read()).decode()
1064
+
1065
+ # Function to create a download link
1066
+ def get_audio_download_link(file_path):
1067
+ base64_file = get_base64_encoded_file(file_path)
1068
+ return f'<a href="data:file/wav;base64,{base64_file}" download="{os.path.basename(file_path)}">โฌ‡๏ธ Download Audio</a>'
1069
+
1070
+ # Compose a file sidebar of past encounters
1071
+ all_files = glob.glob("*.wav")
1072
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1073
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1074
+
1075
+ filekey = 'delall'
1076
+ if st.sidebar.button("๐Ÿ—‘ Delete All Audio", key=filekey):
1077
+ for file in all_files:
1078
+ os.remove(file)
1079
+ st.experimental_rerun()
1080
+
1081
+ for file in all_files:
1082
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1083
+ with col1:
1084
+ st.markdown(file)
1085
+ if st.button("๐ŸŽต", key="play_" + file): # play emoji button
1086
+ audio_file = open(file, 'rb')
1087
+ audio_bytes = audio_file.read()
1088
+ st.audio(audio_bytes, format='audio/wav')
1089
+ #st.markdown(get_audio_download_link(file), unsafe_allow_html=True)
1090
+ #st.text_input(label="", value=file)
1091
+ with col2:
1092
+ if st.button("๐Ÿ—‘", key="delete_" + file):
1093
+ os.remove(file)
1094
+ st.experimental_rerun()
1095
+
1096
+
1097
+
1098
+ # Feedback
1099
+ # Step: Give User a Way to Upvote or Downvote
1100
+ GiveFeedback=False
1101
+ if GiveFeedback:
1102
+ with st.expander("Give your feedback ๐Ÿ‘", expanded=False):
1103
+
1104
+ feedback = st.radio("Step 8: Give your feedback", ("๐Ÿ‘ Upvote", "๐Ÿ‘Ž Downvote"))
1105
+ if feedback == "๐Ÿ‘ Upvote":
1106
+ st.write("You upvoted ๐Ÿ‘. Thank you for your feedback!")
1107
+ else:
1108
+ st.write("You downvoted ๐Ÿ‘Ž. Thank you for your feedback!")
1109
+
1110
+ load_dotenv()
1111
+ st.write(css, unsafe_allow_html=True)
1112
+ st.header("Chat with documents :books:")
1113
+ user_question = st.text_input("Ask a question about your documents:")
1114
+ if user_question:
1115
+ process_user_input(user_question)
1116
+ with st.sidebar:
1117
+ st.subheader("Your documents")
1118
+ docs = st.file_uploader("import documents", accept_multiple_files=True)
1119
+ with st.spinner("Processing"):
1120
+ raw = pdf2txt(docs)
1121
+ if len(raw) > 0:
1122
+ length = str(len(raw))
1123
+ text_chunks = txt2chunks(raw)
1124
+ vectorstore = vector_store(text_chunks)
1125
+ st.session_state.conversation = get_chain(vectorstore)
1126
+ st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
1127
+ filename = generate_filename(raw, 'txt')
1128
+ create_file(filename, raw, '', should_save)
1129
+
1130
+ # Relocated! Hope you like your new space - enjoy!
1131
+ # Display instructions and handle query parameters
1132
+ st.markdown("## Glossary Lookup\nEnter a term in the URL query, like `?q=Nanotechnology` or `?query=Martian Syndicate`.")
1133
+ try:
1134
+ query_params = st.query_params
1135
+ #query = (query_params.get('q') or query_params.get('query') or [''])[0]
1136
+ query = (query_params.get('q') or query_params.get('query') or [''])
1137
+ st.markdown('# Running query: ' + query)
1138
+ if query: search_glossary(query)
1139
+ except:
1140
+ st.markdown('No glossary lookup')
1141
+
1142
+ # Display the glossary grid
1143
+ st.title("Transhuman Space Glossary ๐ŸŒŒ")
1144
+ display_glossary_grid(transhuman_glossary)
1145
+
1146
+ st.title("๐ŸŒŒ๐Ÿš€ Transhuman Space Encyclopedia")
1147
+ st.markdown("## Explore the universe of Transhuman Space through interactive storytelling and encyclopedic knowledge.๐ŸŒ ")
1148
+
1149
+ display_buttons_with_scores()
1150
+
1151
+ display_images_and_wikipedia_summaries()
1152
+
1153
+ # Assuming the transhuman_glossary and other setup code remains the same
1154
+ #st.write("Current Query Parameters:", st.query_params)
1155
+ #st.markdown("### Query Parameters - These Deep Link Map to Remixable Methods, Navigate or Trigger Functionalities")
1156
+
1157
+ # Example: Using query parameters to navigate or trigger functionalities
1158
+ if 'action' in st.query_params:
1159
+ action = st.query_params()['action'][0] # Get the first (or only) 'action' parameter
1160
+ if action == 'show_message':
1161
+ st.success("Showing a message because 'action=show_message' was found in the URL.")
1162
+ elif action == 'clear':
1163
+ clear_query_params()
1164
+ st.experimental_rerun()
1165
+
1166
+ # Handling repeated keys
1167
+ if 'multi' in st.query_params:
1168
+ multi_values = get_all_query_params('multi')
1169
+ st.write("Values for 'multi':", multi_values)
1170
+
1171
+ # Manual entry for demonstration
1172
+ st.write("Enter query parameters in the URL like this: ?action=show_message&multi=1&multi=2")
1173
+
1174
+ if 'query' in st.query_params:
1175
+ query = st.query_params['query'][0] # Get the query parameter
1176
+ # Display content or image based on the query
1177
+ display_content_or_image(query)
1178
+
1179
+ # Add a clear query parameters button for convenience
1180
+ if st.button("Clear Query Parameters", key='ClearQueryParams'):
1181
+ # This will clear the browser URL's query parameters
1182
+ st.experimental_set_query_params
1183
+ st.experimental_rerun()
1184
+
1185
+ # 18. Run AI Pipeline
1186
+ if __name__ == "__main__":
1187
+ whisper_main()
1188
+ main()