awacke1 commited on
Commit
1032311
1 Parent(s): 1c01f9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1170 -1
app.py CHANGED
@@ -28,4 +28,1173 @@ st.markdown("""
28
  | 19 | 🦶 Feet | 🌡️ Skin | Temperature regulation | 🌞 Skin Cancer | 8.1 | 96910 | 96999 | 1 in 5 |
29
  | 20 | 🦶 Feet | 💪 Muscles | Movement and strength | 🏋️‍♂️ Musculoskeletal Disorders | 176 | 97110 | 97799 | 1 in 2 |
30
 
31
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  | 19 | 🦶 Feet | 🌡️ Skin | Temperature regulation | 🌞 Skin Cancer | 8.1 | 96910 | 96999 | 1 in 5 |
29
  | 20 | 🦶 Feet | 💪 Muscles | Movement and strength | 🏋️‍♂️ Musculoskeletal Disorders | 176 | 97110 | 97799 | 1 in 2 |
30
 
31
+ """)
32
+
33
+
34
+ import os
35
+ import json
36
+ from PIL import Image
37
+ from urllib.parse import quote # Ensure this import is included
38
+
39
+ # Set page configuration with a title and favicon
40
+ st.set_page_config(
41
+ page_title="🌌🚀 Mixable AI - Voice Search",
42
+ page_icon="🌠",
43
+ layout="wide",
44
+ initial_sidebar_state="expanded",
45
+ menu_items={
46
+ 'Get Help': 'https://huggingface.co/awacke1',
47
+ 'Report a bug': "https://huggingface.co/spaces/awacke1/WebDataDownload",
48
+ 'About': "# Midjourney: https://discord.com/channels/@me/997514686608191558"
49
+ }
50
+ )
51
+
52
+ # Ensure the directory for storing scores exists
53
+ score_dir = "scores"
54
+ os.makedirs(score_dir, exist_ok=True)
55
+
56
+ # Function to generate a unique key for each button, including an emoji
57
+ def generate_key(label, header, idx):
58
+ return f"{header}_{label}_{idx}_key"
59
+
60
+ # Function to increment and save score
61
+ def update_score(key, increment=1):
62
+ score_file = os.path.join(score_dir, f"{key}.json")
63
+ if os.path.exists(score_file):
64
+ with open(score_file, "r") as file:
65
+ score_data = json.load(file)
66
+ else:
67
+ score_data = {"clicks": 0, "score": 0}
68
+
69
+ score_data["clicks"] += 1
70
+ score_data["score"] += increment
71
+
72
+ with open(score_file, "w") as file:
73
+ json.dump(score_data, file)
74
+
75
+ return score_data["score"]
76
+
77
+ # Function to load score
78
+ def load_score(key):
79
+ score_file = os.path.join(score_dir, f"{key}.json")
80
+ if os.path.exists(score_file):
81
+ with open(score_file, "r") as file:
82
+ score_data = json.load(file)
83
+ return score_data["score"]
84
+ return 0
85
+
86
+ roleplaying_glossary = {
87
+ "🎴 Traditional Card Games": {
88
+ "Bridge": ["Trick-taking", "Bidding and partnership", "Complex scoring"],
89
+ "Poker": ["Betting/Card ranking", "Bluffing and hand management", "Various play styles"],
90
+ "Hearts": ["Trick-avoidance", "Passing cards strategy", "Shooting the moon"],
91
+ "Spades": ["Trick-taking", "Partnership and bidding", "Blind bidding"],
92
+ "Rummy": ["Matching", "Set and run formation", "Point scoring"],
93
+ },
94
+ "🔮 Collectible Card Games (CCGs)": {
95
+ "Magic: The Gathering": ["Deck building", "Resource management", "Strategic play"],
96
+ "Yu-Gi-Oh!": ["Dueling", "Summoning strategies", "Trap and spell cards"],
97
+ "Pokémon TCG": ["Collectible", "Type advantages", "Energy management"],
98
+ "KeyForge": ["Unique deck", "No deck building", "Chain system"],
99
+ "Legend of the Five Rings": ["Living Card Game", "Honor and conflict", "Clan loyalty"],
100
+ },
101
+ "🕹️ Digital Card Games": {
102
+ "Hearthstone": ["Digital CCG", "Hero powers", "Expansive card sets"],
103
+ "Gwent": ["Strategic depth", "Row-based play", "Witcher universe"],
104
+ "Slay the Spire": ["Roguelike deck-builder", "Card drafting", "Relic synergies"],
105
+ "Eternal Card Game": ["Digital CCG", "Cross-platform", "Drafting and events"],
106
+ },
107
+ "💻 Card Battler Video Games": {
108
+ "Yu-Gi-Oh! Duel Links": ["Speed Duel format", "Mobile and PC", "Competitive ladder"],
109
+ "Magic: The Gathering Arena": ["Digital adaptation", "Regular updates", "Esports"],
110
+ "Monster Train": ["Roguelike", "Multi-tiered defense", "Clan synergies"],
111
+ "Legends of Runeterra": ["League of Legends universe", "Dynamic combat", "Champion leveling"],
112
+ },
113
+ "🧠 Game Design and Dynamics": {
114
+ "Deck Building Strategies": ["Card synergy", "Mana curve", "Meta considerations"],
115
+ "Gameplay Mechanics": ["Turn-based", "Resource management", "Combat dynamics"],
116
+ "Player Engagement": ["Replayability", "Strategic depth", "Social play"],
117
+ },
118
+ "📚 Lore & Background": {
119
+ "Magic: The Gathering": ["Rich lore", "Multiverse settings", "Planeswalker stories"],
120
+ "Yu-Gi-Oh!": ["Anime-based", "Duel Monsters", "Egyptian mythology"],
121
+ "Legends of Runeterra": ["Expansive lore", "Champion backstories", "Faction conflicts"],
122
+ },
123
+ "🛠️ Digital Tools & Platforms": {
124
+ "Online Play": ["Remote gameplay", "Digital tournaments", "Community events"],
125
+ "Deck Building Tools": ["Card database access", "Deck testing", "Community sharing"],
126
+ "Strategy Guides": ["Meta analysis", "Deck guides", "Tournament reports"],
127
+ },
128
+ "🎖️ Competitive Scene": {
129
+ "Tournaments": ["Local game stores", "Regional competitions", "World championships"],
130
+ "Ranking Systems": ["Elo ratings", "Ladder rankings", "Seasonal rewards"],
131
+ "Esports": ["Live-streamed events", "Professional teams", "Sponsorships"],
132
+ },
133
+ }
134
+
135
+
136
+ def search_glossary(query):
137
+ for category, terms in roleplaying_glossary.items():
138
+ if query.lower() in (term.lower() for term in terms):
139
+ st.markdown(f"#### {category}")
140
+ st.write(f"- {query}")
141
+
142
+ st.write('## ' + query)
143
+
144
+ all=""
145
+ st.write('## 🔍 Running with GPT.') # -------------------------------------------------------------------------------------------------
146
+ response = chat_with_model(query)
147
+ #st.write(response)
148
+
149
+ filename = generate_filename(query + ' --- ' + response, "md")
150
+ create_file(filename, query, response, should_save)
151
+
152
+ st.write('## 🔍 Running with Llama.') # -------------------------------------------------------------------------------------------------
153
+ response2 = StreamLLMChatResponse(query)
154
+ #st.write(response2)
155
+
156
+ filename_txt = generate_filename(query + ' --- ' + response2, "md")
157
+ create_file(filename_txt, query, response2, should_save)
158
+
159
+ all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2
160
+
161
+ filename_txt2 = generate_filename(query + ' --- ' + all, "md")
162
+ create_file(filename_txt2, query, all, should_save)
163
+
164
+ SpeechSynthesis(all)
165
+ return all
166
+
167
+
168
+ # Function to display the glossary in a structured format
169
+ def display_glossary(glossary, area):
170
+ if area in glossary:
171
+ st.subheader(f"📘 Glossary for {area}")
172
+ for game, terms in glossary[area].items():
173
+ st.markdown(f"### {game}")
174
+ for idx, term in enumerate(terms, start=1):
175
+ st.write(f"{idx}. {term}")
176
+
177
+
178
+ # Function to display the entire glossary in a grid format with links
179
+ def display_glossary_grid(roleplaying_glossary):
180
+ search_urls = {
181
+ "📖": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
182
+ "🔍": lambda k: f"https://www.google.com/search?q={quote(k)}",
183
+ "▶️": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
184
+ "🔎": lambda k: f"https://www.bing.com/search?q={quote(k)}",
185
+ "🎲": lambda k: f"https://huggingface.co/spaces/awacke1/MixableCardGameAI?q={quote(k)}", # this url plus query!
186
+
187
+ }
188
+
189
+ for category, details in roleplaying_glossary.items():
190
+ st.write(f"### {category}")
191
+ cols = st.columns(len(details)) # Create dynamic columns based on the number of games
192
+ for idx, (game, terms) in enumerate(details.items()):
193
+ with cols[idx]:
194
+ st.markdown(f"#### {game}")
195
+ for term in terms:
196
+ links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()])
197
+ st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
198
+
199
+
200
+ game_emojis = {
201
+ "Dungeons and Dragons": "🐉",
202
+ "Call of Cthulhu": "🐙",
203
+ "GURPS": "🎲",
204
+ "Pathfinder": "🗺️",
205
+ "Kindred of the East": "🌅",
206
+ "Changeling": "🍃",
207
+ }
208
+
209
+ topic_emojis = {
210
+ "Core Rulebooks": "📚",
211
+ "Maps & Settings": "🗺️",
212
+ "Game Mechanics & Tools": "⚙️",
213
+ "Monsters & Adversaries": "👹",
214
+ "Campaigns & Adventures": "📜",
215
+ "Creatives & Assets": "🎨",
216
+ "Game Master Resources": "🛠️",
217
+ "Lore & Background": "📖",
218
+ "Character Development": "🧍",
219
+ "Homebrew Content": "🔧",
220
+ "General Topics": "🌍",
221
+ }
222
+
223
+ # Adjusted display_buttons_with_scores function
224
+ def display_buttons_with_scores():
225
+ for category, games in roleplaying_glossary.items():
226
+ category_emoji = topic_emojis.get(category, "🔍") # Default to search icon if no match
227
+ st.markdown(f"## {category_emoji} {category}")
228
+ for game, terms in games.items():
229
+ game_emoji = game_emojis.get(game, "🎮") # Default to generic game controller if no match
230
+ for term in terms:
231
+ key = f"{category}_{game}_{term}".replace(' ', '_').lower()
232
+ score = load_score(key)
233
+ if st.button(f"{game_emoji} {term} {score}", key=key):
234
+ update_score(key)
235
+ # Create a dynamic query incorporating emojis and formatting for clarity
236
+ query_prefix = f"{category_emoji} {game_emoji} **{game} - {category}:**"
237
+ # -----------------------------------------------------------------
238
+ # query_body = f"Create a detailed outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements."
239
+ query_body = f"Create a streamlit python app.py that produces a detailed markdown outline and CSV dataset user interface with an outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements."
240
+ response = search_glossary(query_prefix + query_body, roleplaying_glossary)
241
+
242
+
243
+ def fetch_wikipedia_summary(keyword):
244
+ # Placeholder function for fetching Wikipedia summaries
245
+ # In a real app, you might use requests to fetch from the Wikipedia API
246
+ return f"Summary for {keyword}. For more information, visit Wikipedia."
247
+
248
+ def create_search_url_youtube(keyword):
249
+ base_url = "https://www.youtube.com/results?search_query="
250
+ return base_url + keyword.replace(' ', '+')
251
+
252
+ def create_search_url_bing(keyword):
253
+ base_url = "https://www.bing.com/search?q="
254
+ return base_url + keyword.replace(' ', '+')
255
+
256
+ def create_search_url_wikipedia(keyword):
257
+ base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search="
258
+ return base_url + keyword.replace(' ', '+')
259
+
260
+ def create_search_url_google(keyword):
261
+ base_url = "https://www.google.com/search?q="
262
+ return base_url + keyword.replace(' ', '+')
263
+
264
+
265
+ def display_images_and_wikipedia_summaries():
266
+ st.title('Gallery with Related Stories')
267
+ image_files = [f for f in os.listdir('.') if f.endswith('.png')]
268
+ if not image_files:
269
+ st.write("No PNG images found in the current directory.")
270
+ return
271
+
272
+ for image_file in image_files:
273
+ image = Image.open(image_file)
274
+ st.image(image, caption=image_file, use_column_width=True)
275
+
276
+ keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension
277
+
278
+ # Display Wikipedia and Google search links
279
+ wikipedia_url = create_search_url_wikipedia(keyword)
280
+ google_url = create_search_url_google(keyword)
281
+ youtube_url = create_search_url_youtube(keyword)
282
+ bing_url = create_search_url_bing(keyword)
283
+
284
+ links_md = f"""
285
+ [Wikipedia]({wikipedia_url}) |
286
+ [Google]({google_url}) |
287
+ [YouTube]({youtube_url}) |
288
+ [Bing]({bing_url})
289
+ """
290
+ st.markdown(links_md)
291
+
292
+
293
+ def get_all_query_params(key):
294
+ return st.query_params().get(key, [])
295
+
296
+ def clear_query_params():
297
+ st.query_params()
298
+
299
+
300
+ # Function to display content or image based on a query
301
+ def display_content_or_image(query):
302
+ # Check if the query matches any glossary term
303
+ for category, terms in transhuman_glossary.items():
304
+ for term in terms:
305
+ if query.lower() in term.lower():
306
+ st.subheader(f"Found in {category}:")
307
+ st.write(term)
308
+ return True # Return after finding and displaying the first match
309
+
310
+ # Check for an image match in a predefined directory (adjust path as needed)
311
+ image_dir = "images" # Example directory where images are stored
312
+ image_path = f"{image_dir}/{query}.png" # Construct image path with query
313
+ if os.path.exists(image_path):
314
+ st.image(image_path, caption=f"Image for {query}")
315
+ return True
316
+
317
+ # If no content or image is found
318
+ st.warning("No matching content or image found.")
319
+ return False
320
+
321
+
322
+
323
+
324
+
325
+
326
+
327
+ # Imports
328
+ import base64
329
+ import glob
330
+ import json
331
+ import math
332
+ import openai
333
+ import os
334
+ import pytz
335
+ import re
336
+ import requests
337
+ import streamlit as st
338
+ import textract
339
+ import time
340
+ import zipfile
341
+ import huggingface_hub
342
+ import dotenv
343
+ from audio_recorder_streamlit import audio_recorder
344
+ from bs4 import BeautifulSoup
345
+ from collections import deque
346
+ from datetime import datetime
347
+ from dotenv import load_dotenv
348
+ from huggingface_hub import InferenceClient
349
+ from io import BytesIO
350
+ from langchain.chat_models import ChatOpenAI
351
+ from langchain.chains import ConversationalRetrievalChain
352
+ from langchain.embeddings import OpenAIEmbeddings
353
+ from langchain.memory import ConversationBufferMemory
354
+ from langchain.text_splitter import CharacterTextSplitter
355
+ from langchain.vectorstores import FAISS
356
+ from openai import ChatCompletion
357
+ from PyPDF2 import PdfReader
358
+ from templates import bot_template, css, user_template
359
+ from xml.etree import ElementTree as ET
360
+ import streamlit.components.v1 as components # Import Streamlit Components for HTML5
361
+
362
+
363
+ def add_Med_Licensing_Exam_Dataset():
364
+ import streamlit as st
365
+ from datasets import load_dataset
366
+ dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split
367
+ st.title("USMLE Step 1 Dataset Viewer")
368
+ if len(dataset) == 0:
369
+ st.write("😢 The dataset is empty.")
370
+ else:
371
+ st.write("""
372
+ 🔍 Use the search box to filter questions or use the grid to scroll through the dataset.
373
+ """)
374
+
375
+ # 👩‍🔬 Search Box
376
+ search_term = st.text_input("Search for a specific question:", "")
377
+
378
+ # 🎛 Pagination
379
+ records_per_page = 100
380
+ num_records = len(dataset)
381
+ num_pages = max(int(num_records / records_per_page), 1)
382
+
383
+ # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page)
384
+ if num_pages > 1:
385
+ page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1)))
386
+ else:
387
+ page_number = 1 # Only one page
388
+
389
+ # 📊 Display Data
390
+ start_idx = (page_number - 1) * records_per_page
391
+ end_idx = start_idx + records_per_page
392
+
393
+ # 🧪 Apply the Search Filter
394
+ filtered_data = []
395
+ for record in dataset[start_idx:end_idx]:
396
+ if isinstance(record, dict) and 'text' in record and 'id' in record:
397
+ if search_term:
398
+ if search_term.lower() in record['text'].lower():
399
+ st.markdown(record)
400
+ filtered_data.append(record)
401
+ else:
402
+ filtered_data.append(record)
403
+
404
+ # 🌐 Render the Grid
405
+ for record in filtered_data:
406
+ st.write(f"## Question ID: {record['id']}")
407
+ st.write(f"### Question:")
408
+ st.write(f"{record['text']}")
409
+ st.write(f"### Answer:")
410
+ st.write(f"{record['answer']}")
411
+ st.write("---")
412
+
413
+ st.write(f"😊 Total Records: {num_records} | 📄 Displaying {start_idx+1} to {min(end_idx, num_records)}")
414
+
415
+ # 1. Constants and Top Level UI Variables
416
+
417
+ # My Inference API Copy
418
+ API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
419
+ # Meta's Original - Chat HF Free Version:
420
+ #API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
421
+ API_KEY = os.getenv('API_KEY')
422
+ MODEL1="meta-llama/Llama-2-7b-chat-hf"
423
+ MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
424
+ HF_KEY = os.getenv('HF_KEY')
425
+ headers = {
426
+ "Authorization": f"Bearer {HF_KEY}",
427
+ "Content-Type": "application/json"
428
+ }
429
+ key = os.getenv('OPENAI_API_KEY')
430
+ prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface."
431
+ should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
432
+
433
+ def SpeechSynthesis(result):
434
+ documentHTML5='''
435
+ <!DOCTYPE html>
436
+ <html>
437
+ <head>
438
+ <title>Read It Aloud</title>
439
+ <script type="text/javascript">
440
+ function readAloud() {
441
+ const text = document.getElementById("textArea").value;
442
+ const speech = new SpeechSynthesisUtterance(text);
443
+ window.speechSynthesis.speak(speech);
444
+ }
445
+ </script>
446
+ </head>
447
+ <body>
448
+ <h1>🔊 Read It Aloud</h1>
449
+ <textarea id="textArea" rows="10" cols="80">
450
+ '''
451
+ documentHTML5 = documentHTML5 + result
452
+ documentHTML5 = documentHTML5 + '''
453
+ </textarea>
454
+ <br>
455
+ <button onclick="readAloud()">🔊 Read Aloud</button>
456
+ </body>
457
+ </html>
458
+ '''
459
+
460
+ components.html(documentHTML5, width=1280, height=300)
461
+ #return result
462
+
463
+
464
+ # 3. Stream Llama Response
465
+ # @st.cache_resource
466
+ def StreamLLMChatResponse(prompt):
467
+ try:
468
+ endpoint_url = API_URL
469
+ hf_token = API_KEY
470
+ st.write('Running client ' + endpoint_url)
471
+ client = InferenceClient(endpoint_url, token=hf_token)
472
+ gen_kwargs = dict(
473
+ max_new_tokens=512,
474
+ top_k=30,
475
+ top_p=0.9,
476
+ temperature=0.2,
477
+ repetition_penalty=1.02,
478
+ stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
479
+ )
480
+ stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
481
+ report=[]
482
+ res_box = st.empty()
483
+ collected_chunks=[]
484
+ collected_messages=[]
485
+ allresults=''
486
+ for r in stream:
487
+ if r.token.special:
488
+ continue
489
+ if r.token.text in gen_kwargs["stop_sequences"]:
490
+ break
491
+ collected_chunks.append(r.token.text)
492
+ chunk_message = r.token.text
493
+ collected_messages.append(chunk_message)
494
+ try:
495
+ report.append(r.token.text)
496
+ if len(r.token.text) > 0:
497
+ result="".join(report).strip()
498
+ res_box.markdown(f'*{result}*')
499
+
500
+ except:
501
+ st.write('Stream llm issue')
502
+ SpeechSynthesis(result)
503
+ return result
504
+ except:
505
+ st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
506
+
507
+ # 4. Run query with payload
508
+ def query(payload):
509
+ response = requests.post(API_URL, headers=headers, json=payload)
510
+ st.markdown(response.json())
511
+ return response.json()
512
+ def get_output(prompt):
513
+ return query({"inputs": prompt})
514
+
515
+ # 5. Auto name generated output files from time and content
516
+ def generate_filename(prompt, file_type):
517
+ central = pytz.timezone('US/Central')
518
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
519
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
520
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:255] # 255 is linux max, 260 is windows max
521
+ #safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:45]
522
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
523
+
524
+ # 6. Speech transcription via OpenAI service
525
+ def transcribe_audio(openai_key, file_path, model):
526
+ openai.api_key = openai_key
527
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
528
+ headers = {
529
+ "Authorization": f"Bearer {openai_key}",
530
+ }
531
+ with open(file_path, 'rb') as f:
532
+ data = {'file': f}
533
+ st.write('STT transcript ' + OPENAI_API_URL)
534
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
535
+ if response.status_code == 200:
536
+ st.write(response.json())
537
+ chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
538
+ transcript = response.json().get('text')
539
+ filename = generate_filename(transcript, 'txt')
540
+ response = chatResponse
541
+ user_prompt = transcript
542
+ create_file(filename, user_prompt, response, should_save)
543
+ return transcript
544
+ else:
545
+ st.write(response.json())
546
+ st.error("Error in API call.")
547
+ return None
548
+
549
+ # 7. Auto stop on silence audio control for recording WAV files
550
+ def save_and_play_audio(audio_recorder):
551
+ audio_bytes = audio_recorder(key='audio_recorder')
552
+ if audio_bytes:
553
+ filename = generate_filename("Recording", "wav")
554
+ with open(filename, 'wb') as f:
555
+ f.write(audio_bytes)
556
+ st.audio(audio_bytes, format="audio/wav")
557
+ return filename
558
+ return None
559
+
560
+ # 8. File creator that interprets type and creates output file for text, markdown and code
561
+ def create_file(filename, prompt, response, should_save=True):
562
+ if not should_save:
563
+ return
564
+ base_filename, ext = os.path.splitext(filename)
565
+ if ext in ['.txt', '.htm', '.md']:
566
+ with open(f"{base_filename}.md", 'w') as file:
567
+ try:
568
+ content = prompt.strip() + '\r\n' + response
569
+ file.write(content)
570
+ except:
571
+ st.write('.')
572
+
573
+ #has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
574
+ #has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
575
+ #if has_python_code:
576
+ # python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
577
+ # with open(f"{base_filename}-Code.py", 'w') as file:
578
+ # file.write(python_code)
579
+ # with open(f"{base_filename}.md", 'w') as file:
580
+ # content = prompt.strip() + '\r\n' + response
581
+ # file.write(content)
582
+
583
+ def truncate_document(document, length):
584
+ return document[:length]
585
+ def divide_document(document, max_length):
586
+ return [document[i:i+max_length] for i in range(0, len(document), max_length)]
587
+
588
+ # 9. Sidebar with UI controls to review and re-run prompts and continue responses
589
+ @st.cache_resource
590
+ def get_table_download_link(file_path):
591
+ with open(file_path, 'r') as file:
592
+ data = file.read()
593
+
594
+ b64 = base64.b64encode(data.encode()).decode()
595
+ file_name = os.path.basename(file_path)
596
+ ext = os.path.splitext(file_name)[1] # get the file extension
597
+ if ext == '.txt':
598
+ mime_type = 'text/plain'
599
+ elif ext == '.py':
600
+ mime_type = 'text/plain'
601
+ elif ext == '.xlsx':
602
+ mime_type = 'text/plain'
603
+ elif ext == '.csv':
604
+ mime_type = 'text/plain'
605
+ elif ext == '.htm':
606
+ mime_type = 'text/html'
607
+ elif ext == '.md':
608
+ mime_type = 'text/markdown'
609
+ elif ext == '.wav':
610
+ mime_type = 'audio/wav'
611
+ else:
612
+ mime_type = 'application/octet-stream' # general binary data type
613
+ href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
614
+ return href
615
+
616
+
617
+ def CompressXML(xml_text):
618
+ root = ET.fromstring(xml_text)
619
+ for elem in list(root.iter()):
620
+ if isinstance(elem.tag, str) and 'Comment' in elem.tag:
621
+ elem.parent.remove(elem)
622
+ return ET.tostring(root, encoding='unicode', method="xml")
623
+
624
+ # 10. Read in and provide UI for past files
625
+ @st.cache_resource
626
+ def read_file_content(file,max_length):
627
+ if file.type == "application/json":
628
+ content = json.load(file)
629
+ return str(content)
630
+ elif file.type == "text/html" or file.type == "text/htm":
631
+ content = BeautifulSoup(file, "html.parser")
632
+ return content.text
633
+ elif file.type == "application/xml" or file.type == "text/xml":
634
+ tree = ET.parse(file)
635
+ root = tree.getroot()
636
+ xml = CompressXML(ET.tostring(root, encoding='unicode'))
637
+ return xml
638
+ elif file.type == "text/markdown" or file.type == "text/md":
639
+ md = mistune.create_markdown()
640
+ content = md(file.read().decode())
641
+ return content
642
+ elif file.type == "text/plain":
643
+ return file.getvalue().decode()
644
+ else:
645
+ return ""
646
+
647
+ # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
648
+ @st.cache_resource
649
+ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
650
+ model = model_choice
651
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
652
+ conversation.append({'role': 'user', 'content': prompt})
653
+ if len(document_section)>0:
654
+ conversation.append({'role': 'assistant', 'content': document_section})
655
+ start_time = time.time()
656
+ report = []
657
+ res_box = st.empty()
658
+ collected_chunks = []
659
+ collected_messages = []
660
+
661
+ st.write('LLM stream ' + 'gpt-3.5-turbo')
662
+ for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
663
+ collected_chunks.append(chunk)
664
+ chunk_message = chunk['choices'][0]['delta']
665
+ collected_messages.append(chunk_message)
666
+ content=chunk["choices"][0].get("delta",{}).get("content")
667
+ try:
668
+ report.append(content)
669
+ if len(content) > 0:
670
+ result = "".join(report).strip()
671
+ res_box.markdown(f'*{result}*')
672
+ except:
673
+ st.write(' ')
674
+ full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
675
+ st.write("Elapsed time:")
676
+ st.write(time.time() - start_time)
677
+ return full_reply_content
678
+
679
+ # 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain
680
+ @st.cache_resource
681
+ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
682
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
683
+ conversation.append({'role': 'user', 'content': prompt})
684
+ if len(file_content)>0:
685
+ conversation.append({'role': 'assistant', 'content': file_content})
686
+ response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
687
+ return response['choices'][0]['message']['content']
688
+
689
+ def extract_mime_type(file):
690
+ if isinstance(file, str):
691
+ pattern = r"type='(.*?)'"
692
+ match = re.search(pattern, file)
693
+ if match:
694
+ return match.group(1)
695
+ else:
696
+ raise ValueError(f"Unable to extract MIME type from {file}")
697
+ elif isinstance(file, streamlit.UploadedFile):
698
+ return file.type
699
+ else:
700
+ raise TypeError("Input should be a string or a streamlit.UploadedFile object")
701
+
702
+ def extract_file_extension(file):
703
+ # get the file name directly from the UploadedFile object
704
+ file_name = file.name
705
+ pattern = r".*?\.(.*?)$"
706
+ match = re.search(pattern, file_name)
707
+ if match:
708
+ return match.group(1)
709
+ else:
710
+ raise ValueError(f"Unable to extract file extension from {file_name}")
711
+
712
+ # Normalize input as text from PDF and other formats
713
+ @st.cache_resource
714
+ def pdf2txt(docs):
715
+ text = ""
716
+ for file in docs:
717
+ file_extension = extract_file_extension(file)
718
+ st.write(f"File type extension: {file_extension}")
719
+ if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
720
+ text += file.getvalue().decode('utf-8')
721
+ elif file_extension.lower() == 'pdf':
722
+ from PyPDF2 import PdfReader
723
+ pdf = PdfReader(BytesIO(file.getvalue()))
724
+ for page in range(len(pdf.pages)):
725
+ text += pdf.pages[page].extract_text() # new PyPDF2 syntax
726
+ return text
727
+
728
+ def txt2chunks(text):
729
+ text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
730
+ return text_splitter.split_text(text)
731
+
732
+ # Vector Store using FAISS
733
+ @st.cache_resource
734
+ def vector_store(text_chunks):
735
+ embeddings = OpenAIEmbeddings(openai_api_key=key)
736
+ return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
737
+
738
+ # Memory and Retrieval chains
739
+ @st.cache_resource
740
+ def get_chain(vectorstore):
741
+ llm = ChatOpenAI()
742
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
743
+ return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
744
+
745
+ def process_user_input(user_question):
746
+ response = st.session_state.conversation({'question': user_question})
747
+ st.session_state.chat_history = response['chat_history']
748
+ for i, message in enumerate(st.session_state.chat_history):
749
+ template = user_template if i % 2 == 0 else bot_template
750
+ st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
751
+ filename = generate_filename(user_question, 'txt')
752
+ response = message.content
753
+ user_prompt = user_question
754
+ create_file(filename, user_prompt, response, should_save)
755
+
756
+ def divide_prompt(prompt, max_length):
757
+ words = prompt.split()
758
+ chunks = []
759
+ current_chunk = []
760
+ current_length = 0
761
+ for word in words:
762
+ if len(word) + current_length <= max_length:
763
+ current_length += len(word) + 1
764
+ current_chunk.append(word)
765
+ else:
766
+ chunks.append(' '.join(current_chunk))
767
+ current_chunk = [word]
768
+ current_length = len(word)
769
+ chunks.append(' '.join(current_chunk))
770
+ return chunks
771
+
772
+
773
+ # 13. Provide way of saving all and deleting all to give way of reviewing output and saving locally before clearing it
774
+
775
+ @st.cache_resource
776
+ def create_zip_of_files(files):
777
+ zip_name = "all_files.zip"
778
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
779
+ for file in files:
780
+ zipf.write(file)
781
+ return zip_name
782
+
783
+ @st.cache_resource
784
+ def get_zip_download_link(zip_file):
785
+ with open(zip_file, 'rb') as f:
786
+ data = f.read()
787
+ b64 = base64.b64encode(data).decode()
788
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
789
+ return href
790
+
791
+ # 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
792
+ # My Inference Endpoint
793
+ API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
794
+ # Original
795
+ API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
796
+ MODEL2 = "openai/whisper-small.en"
797
+ MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
798
+ #headers = {
799
+ # "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
800
+ # "Content-Type": "audio/wav"
801
+ #}
802
+ # HF_KEY = os.getenv('HF_KEY')
803
+ HF_KEY = st.secrets['HF_KEY']
804
+ headers = {
805
+ "Authorization": f"Bearer {HF_KEY}",
806
+ "Content-Type": "audio/wav"
807
+ }
808
+
809
+ #@st.cache_resource
810
+ def query(filename):
811
+ with open(filename, "rb") as f:
812
+ data = f.read()
813
+ response = requests.post(API_URL_IE, headers=headers, data=data)
814
+ return response.json()
815
+
816
+ def generate_filename(prompt, file_type):
817
+ central = pytz.timezone('US/Central')
818
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
819
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
820
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
821
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
822
+
823
+ # 15. Audio recorder to Wav file
824
+ def save_and_play_audio(audio_recorder):
825
+ audio_bytes = audio_recorder()
826
+ if audio_bytes:
827
+ filename = generate_filename("Recording", "wav")
828
+ with open(filename, 'wb') as f:
829
+ f.write(audio_bytes)
830
+ st.audio(audio_bytes, format="audio/wav")
831
+ return filename
832
+
833
+ # 16. Speech transcription to file output
834
+ def transcribe_audio(filename):
835
+ output = query(filename)
836
+ return output
837
+
838
+ def whisper_main():
839
+ #st.title("Speech to Text")
840
+ #st.write("Record your speech and get the text.")
841
+
842
+ # Audio, transcribe, GPT:
843
+ filename = save_and_play_audio(audio_recorder)
844
+ if filename is not None:
845
+ transcription = transcribe_audio(filename)
846
+ try:
847
+ transcript = transcription['text']
848
+ st.write(transcript)
849
+
850
+ except:
851
+ transcript=''
852
+ st.write(transcript)
853
+
854
+
855
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
856
+ st.write('Reasoning with your inputs with GPT..')
857
+ response = chat_with_model(transcript)
858
+ st.write('Response:')
859
+ st.write(response)
860
+
861
+ filename = generate_filename(response, "txt")
862
+ create_file(filename, transcript, response, should_save)
863
+ # Whisper to GPT: New!! ---------------------------------------------------------------------
864
+
865
+
866
+ # Whisper to Llama:
867
+ response = StreamLLMChatResponse(transcript)
868
+ filename_txt = generate_filename(transcript, "md")
869
+ create_file(filename_txt, transcript, response, should_save)
870
+
871
+ filename_wav = filename_txt.replace('.txt', '.wav')
872
+ import shutil
873
+ try:
874
+ if os.path.exists(filename):
875
+ shutil.copyfile(filename, filename_wav)
876
+ except:
877
+ st.write('.')
878
+
879
+ if os.path.exists(filename):
880
+ os.remove(filename)
881
+
882
+ #st.experimental_rerun()
883
+ #except:
884
+ # st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
885
+
886
+
887
+
888
+ # Sample function to demonstrate a response, replace with your own logic
889
+ def StreamMedChatResponse(topic):
890
+ st.write(f"Showing resources or questions related to: {topic}")
891
+
892
+
893
+ # 17. Main
894
+ def main():
895
+ prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
896
+ # Add Wit and Humor buttons
897
+ # add_witty_humor_buttons()
898
+ # add_medical_exam_buttons()
899
+
900
+ with st.expander("Prompts 📚", expanded=False):
901
+ example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
902
+ if st.button("Run Prompt With Llama model", help="Click to run the prompt."):
903
+ try:
904
+ response=StreamLLMChatResponse(example_input)
905
+ create_file(filename, example_input, response, should_save)
906
+ except:
907
+ st.write('Llama model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
908
+
909
+ openai.api_key = os.getenv('OPENAI_API_KEY')
910
+ if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
911
+
912
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
913
+ choice = st.sidebar.selectbox("Output File Type:", menu)
914
+
915
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
916
+
917
+ user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
918
+ collength, colupload = st.columns([2,3]) # adjust the ratio as needed
919
+ with collength:
920
+ max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
921
+ with colupload:
922
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
923
+ document_sections = deque()
924
+ document_responses = {}
925
+ if uploaded_file is not None:
926
+ file_content = read_file_content(uploaded_file, max_length)
927
+ document_sections.extend(divide_document(file_content, max_length))
928
+ if len(document_sections) > 0:
929
+ if st.button("👁️ View Upload"):
930
+ st.markdown("**Sections of the uploaded file:**")
931
+ for i, section in enumerate(list(document_sections)):
932
+ st.markdown(f"**Section {i+1}**\n{section}")
933
+ st.markdown("**Chat with the model:**")
934
+ for i, section in enumerate(list(document_sections)):
935
+ if i in document_responses:
936
+ st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
937
+ else:
938
+ if st.button(f"Chat about Section {i+1}"):
939
+ st.write('Reasoning with your inputs...')
940
+ #response = chat_with_model(user_prompt, section, model_choice)
941
+ st.write('Response:')
942
+ st.write(response)
943
+ document_responses[i] = response
944
+ filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
945
+ create_file(filename, user_prompt, response, should_save)
946
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
947
+
948
+
949
+ if st.button('💬 Chat'):
950
+ st.write('Reasoning with your inputs...')
951
+ user_prompt_sections = divide_prompt(user_prompt, max_length)
952
+ full_response = ''
953
+ for prompt_section in user_prompt_sections:
954
+ response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
955
+ full_response += response + '\n' # Combine the responses
956
+ response = full_response
957
+ st.write('Response:')
958
+ st.write(response)
959
+ filename = generate_filename(user_prompt, choice)
960
+ create_file(filename, user_prompt, response, should_save)
961
+
962
+ # Compose a file sidebar of markdown md files:
963
+ all_files = glob.glob("*.md")
964
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
965
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
966
+ if st.sidebar.button("🗑 Delete All Text"):
967
+ for file in all_files:
968
+ os.remove(file)
969
+ st.experimental_rerun()
970
+ if st.sidebar.button("⬇️ Download All"):
971
+ zip_file = create_zip_of_files(all_files)
972
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
973
+ file_contents=''
974
+ next_action=''
975
+ for file in all_files:
976
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
977
+ with col1:
978
+ if st.button("🌐", key="md_"+file): # md emoji button
979
+ with open(file, 'r') as f:
980
+ file_contents = f.read()
981
+ next_action='md'
982
+ with col2:
983
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
984
+ with col3:
985
+ if st.button("📂", key="open_"+file): # open emoji button
986
+ with open(file, 'r') as f:
987
+ file_contents = f.read()
988
+ next_action='open'
989
+ with col4:
990
+ if st.button("🔍", key="read_"+file): # search emoji button
991
+ with open(file, 'r') as f:
992
+ file_contents = f.read()
993
+ next_action='search'
994
+ with col5:
995
+ if st.button("🗑", key="delete_"+file):
996
+ os.remove(file)
997
+ st.experimental_rerun()
998
+
999
+
1000
+ if len(file_contents) > 0:
1001
+ if next_action=='open':
1002
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1003
+ if next_action=='md':
1004
+ st.markdown(file_contents)
1005
+
1006
+ buttonlabel = '🔍Run with Llama and GPT.'
1007
+ if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
1008
+ user_prompt = file_contents
1009
+
1010
+ # Llama versus GPT Battle!
1011
+ all=""
1012
+ try:
1013
+ st.write('🔍Running with Llama.')
1014
+ response = StreamLLMChatResponse(file_contents)
1015
+ filename = generate_filename(user_prompt, "md")
1016
+ create_file(filename, file_contents, response, should_save)
1017
+ all=response
1018
+ #SpeechSynthesis(response)
1019
+ except:
1020
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1021
+
1022
+ # gpt
1023
+ try:
1024
+ st.write('🔍Running with GPT.')
1025
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1026
+ filename2 = generate_filename(file_contents, choice)
1027
+ create_file(filename2, user_prompt, response, should_save)
1028
+ all=all+response2
1029
+ #SpeechSynthesis(response2)
1030
+ except:
1031
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1032
+
1033
+ SpeechSynthesis(all)
1034
+
1035
+
1036
+ if next_action=='search':
1037
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1038
+ st.write('🔍Running with Llama and GPT.')
1039
+
1040
+ user_prompt = file_contents
1041
+
1042
+ # Llama versus GPT Battle!
1043
+ all=""
1044
+ try:
1045
+ st.write('🔍Running with Llama.')
1046
+ response = StreamLLMChatResponse(file_contents)
1047
+ filename = generate_filename(user_prompt, ".md")
1048
+ create_file(filename, file_contents, response, should_save)
1049
+ all=response
1050
+ #SpeechSynthesis(response)
1051
+ except:
1052
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1053
+
1054
+ # gpt
1055
+ try:
1056
+ st.write('🔍Running with GPT.')
1057
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1058
+ filename2 = generate_filename(file_contents, choice)
1059
+ create_file(filename2, user_prompt, response, should_save)
1060
+ all=all+response2
1061
+ #SpeechSynthesis(response2)
1062
+ except:
1063
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1064
+
1065
+ SpeechSynthesis(all)
1066
+
1067
+
1068
+ # Function to encode file to base64
1069
+ def get_base64_encoded_file(file_path):
1070
+ with open(file_path, "rb") as file:
1071
+ return base64.b64encode(file.read()).decode()
1072
+
1073
+ # Function to create a download link
1074
+ def get_audio_download_link(file_path):
1075
+ base64_file = get_base64_encoded_file(file_path)
1076
+ return f'<a href="data:file/wav;base64,{base64_file}" download="{os.path.basename(file_path)}">⬇️ Download Audio</a>'
1077
+
1078
+ # Compose a file sidebar of past encounters
1079
+ all_files = glob.glob("*.wav")
1080
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1081
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1082
+
1083
+ filekey = 'delall'
1084
+ if st.sidebar.button("🗑 Delete All Audio", key=filekey):
1085
+ for file in all_files:
1086
+ os.remove(file)
1087
+ st.experimental_rerun()
1088
+
1089
+ for file in all_files:
1090
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1091
+ with col1:
1092
+ st.markdown(file)
1093
+ if st.button("🎵", key="play_" + file): # play emoji button
1094
+ audio_file = open(file, 'rb')
1095
+ audio_bytes = audio_file.read()
1096
+ st.audio(audio_bytes, format='audio/wav')
1097
+ #st.markdown(get_audio_download_link(file), unsafe_allow_html=True)
1098
+ #st.text_input(label="", value=file)
1099
+ with col2:
1100
+ if st.button("🗑", key="delete_" + file):
1101
+ os.remove(file)
1102
+ st.experimental_rerun()
1103
+
1104
+
1105
+
1106
+ # Feedback
1107
+ # Step: Give User a Way to Upvote or Downvote
1108
+ GiveFeedback=False
1109
+ if GiveFeedback:
1110
+ with st.expander("Give your feedback 👍", expanded=False):
1111
+
1112
+ feedback = st.radio("Step 8: Give your feedback", ("👍 Upvote", "👎 Downvote"))
1113
+ if feedback == "👍 Upvote":
1114
+ st.write("You upvoted 👍. Thank you for your feedback!")
1115
+ else:
1116
+ st.write("You downvoted 👎. Thank you for your feedback!")
1117
+
1118
+ load_dotenv()
1119
+ st.write(css, unsafe_allow_html=True)
1120
+ st.header("Chat with documents :books:")
1121
+ user_question = st.text_input("Ask a question about your documents:")
1122
+ if user_question:
1123
+ process_user_input(user_question)
1124
+ with st.sidebar:
1125
+ st.subheader("Your documents")
1126
+ docs = st.file_uploader("import documents", accept_multiple_files=True)
1127
+ with st.spinner("Processing"):
1128
+ raw = pdf2txt(docs)
1129
+ if len(raw) > 0:
1130
+ length = str(len(raw))
1131
+ text_chunks = txt2chunks(raw)
1132
+ vectorstore = vector_store(text_chunks)
1133
+ st.session_state.conversation = get_chain(vectorstore)
1134
+ st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
1135
+ filename = generate_filename(raw, 'txt')
1136
+ create_file(filename, raw, '', should_save)
1137
+
1138
+ # Relocated! Hope you like your new space - enjoy!
1139
+ # Display instructions and handle query parameters
1140
+ #st.markdown("## Glossary Lookup\nEnter a term in the URL query, like `?q=Nanotechnology` or `?query=Martian Syndicate`.")
1141
+
1142
+ st.markdown('''
1143
+ ### Mixable AI 🃏🚀📚
1144
+ ''')
1145
+
1146
+ try:
1147
+ query_params = st.query_params
1148
+ #query = (query_params.get('q') or query_params.get('query') or [''])[0]
1149
+ query = (query_params.get('q') or query_params.get('query') or [''])
1150
+ st.markdown('# Running query: ' + query)
1151
+ if query: search_glossary(query)
1152
+ except:
1153
+ st.markdown('No glossary lookup')
1154
+
1155
+ # Display the glossary grid
1156
+ st.title("Card Games Glossary 🎲")
1157
+ display_glossary_grid(roleplaying_glossary)
1158
+ st.title("🎲🗺️ Card Game Universe")
1159
+ st.markdown("## Explore the vast universes of Dungeons and Dragons, Call of Cthulhu, GURPS, and more through interactive storytelling and encyclopedic knowledge.🌠")
1160
+
1161
+ display_buttons_with_scores()
1162
+
1163
+ display_images_and_wikipedia_summaries()
1164
+
1165
+ # Assuming the transhuman_glossary and other setup code remains the same
1166
+ #st.write("Current Query Parameters:", st.query_params)
1167
+ #st.markdown("### Query Parameters - These Deep Link Map to Remixable Methods, Navigate or Trigger Functionalities")
1168
+
1169
+ # Example: Using query parameters to navigate or trigger functionalities
1170
+ if 'action' in st.query_params:
1171
+ action = st.query_params()['action'][0] # Get the first (or only) 'action' parameter
1172
+ if action == 'show_message':
1173
+ st.success("Showing a message because 'action=show_message' was found in the URL.")
1174
+ elif action == 'clear':
1175
+ clear_query_params()
1176
+ st.experimental_rerun()
1177
+
1178
+ # Handling repeated keys
1179
+ if 'multi' in st.query_params:
1180
+ multi_values = get_all_query_params('multi')
1181
+ st.write("Values for 'multi':", multi_values)
1182
+
1183
+ # Manual entry for demonstration
1184
+ st.write("Enter query parameters in the URL like this: ?action=show_message&multi=1&multi=2")
1185
+
1186
+ if 'query' in st.query_params:
1187
+ query = st.query_params['query'][0] # Get the query parameter
1188
+ # Display content or image based on the query
1189
+ display_content_or_image(query)
1190
+
1191
+ # Add a clear query parameters button for convenience
1192
+ if st.button("Clear Query Parameters", key='ClearQueryParams'):
1193
+ # This will clear the browser URL's query parameters
1194
+ st.experimental_set_query_params
1195
+ st.experimental_rerun()
1196
+
1197
+ # 18. Run AI Pipeline
1198
+ if __name__ == "__main__":
1199
+ whisper_main()
1200
+ main()