awacke1 commited on
Commit
263e41a
โ€ข
1 Parent(s): a9b8361

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +922 -0
app.py CHANGED
@@ -237,3 +237,925 @@ if st.button("Clear Query Parameters", key='ClearQueryParams'):
237
  st.experimental_set_query_params
238
  st.experimental_rerun()
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  st.experimental_set_query_params
238
  st.experimental_rerun()
239
 
240
+
241
+
242
+
243
+ # ------------------------------------------------------------------------- Can't Believe I'm Doing This. --------------------------------------------------------
244
+
245
+
246
+
247
+
248
+
249
+
250
+ # Imports
251
+ import base64
252
+ import glob
253
+ import json
254
+ import math
255
+ import openai
256
+ import os
257
+ import pytz
258
+ import re
259
+ import requests
260
+ import streamlit as st
261
+ import textract
262
+ import time
263
+ import zipfile
264
+ import huggingface_hub
265
+ import dotenv
266
+ from audio_recorder_streamlit import audio_recorder
267
+ from bs4 import BeautifulSoup
268
+ from collections import deque
269
+ from datetime import datetime
270
+ from dotenv import load_dotenv
271
+ from huggingface_hub import InferenceClient
272
+ from io import BytesIO
273
+ from langchain.chat_models import ChatOpenAI
274
+ from langchain.chains import ConversationalRetrievalChain
275
+ from langchain.embeddings import OpenAIEmbeddings
276
+ from langchain.memory import ConversationBufferMemory
277
+ from langchain.text_splitter import CharacterTextSplitter
278
+ from langchain.vectorstores import FAISS
279
+ from openai import ChatCompletion
280
+ from PyPDF2 import PdfReader
281
+ from templates import bot_template, css, user_template
282
+ from xml.etree import ElementTree as ET
283
+ import streamlit.components.v1 as components # Import Streamlit Components for HTML5
284
+
285
+
286
+ st.set_page_config(page_title="๐ŸชLlama Whisperer๐Ÿฆ™ Voice Chat๐ŸŒŸ", layout="wide")
287
+
288
+
289
+ def add_Med_Licensing_Exam_Dataset():
290
+ import streamlit as st
291
+ from datasets import load_dataset
292
+ dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split
293
+ st.title("USMLE Step 1 Dataset Viewer")
294
+ if len(dataset) == 0:
295
+ st.write("๐Ÿ˜ข The dataset is empty.")
296
+ else:
297
+ st.write("""
298
+ ๐Ÿ” Use the search box to filter questions or use the grid to scroll through the dataset.
299
+ """)
300
+
301
+ # ๐Ÿ‘ฉโ€๐Ÿ”ฌ Search Box
302
+ search_term = st.text_input("Search for a specific question:", "")
303
+
304
+ # ๐ŸŽ› Pagination
305
+ records_per_page = 100
306
+ num_records = len(dataset)
307
+ num_pages = max(int(num_records / records_per_page), 1)
308
+
309
+ # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page)
310
+ if num_pages > 1:
311
+ page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1)))
312
+ else:
313
+ page_number = 1 # Only one page
314
+
315
+ # ๐Ÿ“Š Display Data
316
+ start_idx = (page_number - 1) * records_per_page
317
+ end_idx = start_idx + records_per_page
318
+
319
+ # ๐Ÿงช Apply the Search Filter
320
+ filtered_data = []
321
+ for record in dataset[start_idx:end_idx]:
322
+ if isinstance(record, dict) and 'text' in record and 'id' in record:
323
+ if search_term:
324
+ if search_term.lower() in record['text'].lower():
325
+ st.markdown(record)
326
+ filtered_data.append(record)
327
+ else:
328
+ filtered_data.append(record)
329
+
330
+ # ๐ŸŒ Render the Grid
331
+ for record in filtered_data:
332
+ st.write(f"## Question ID: {record['id']}")
333
+ st.write(f"### Question:")
334
+ st.write(f"{record['text']}")
335
+ st.write(f"### Answer:")
336
+ st.write(f"{record['answer']}")
337
+ st.write("---")
338
+
339
+ st.write(f"๐Ÿ˜Š Total Records: {num_records} | ๐Ÿ“„ Displaying {start_idx+1} to {min(end_idx, num_records)}")
340
+
341
+ # 1. Constants and Top Level UI Variables
342
+
343
+ # My Inference API Copy
344
+ # API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
345
+ # Meta's Original - Chat HF Free Version:
346
+ API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
347
+ API_KEY = os.getenv('API_KEY')
348
+ MODEL1="meta-llama/Llama-2-7b-chat-hf"
349
+ MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
350
+ HF_KEY = os.getenv('HF_KEY')
351
+ headers = {
352
+ "Authorization": f"Bearer {HF_KEY}",
353
+ "Content-Type": "application/json"
354
+ }
355
+ key = os.getenv('OPENAI_API_KEY')
356
+ prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface."
357
+ should_save = st.sidebar.checkbox("๐Ÿ’พ Save", value=True, help="Save your session data.")
358
+
359
+ # 2. Prompt label button demo for LLM
360
+ def add_witty_humor_buttons():
361
+ with st.expander("Wit and Humor ๐Ÿคฃ", expanded=True):
362
+ # Tip about the Dromedary family
363
+ st.markdown("๐Ÿ”ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.")
364
+
365
+ # Define button descriptions
366
+ descriptions = {
367
+ "Generate Limericks ๐Ÿ˜‚": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ŸŽญ",
368
+ "Wise Quotes ๐Ÿง™": "Generate ten wise quotes that are tweet length ๐Ÿฆ‰",
369
+ "Funny Rhymes ๐ŸŽค": "Create ten funny rhymes that are tweet length ๐ŸŽถ",
370
+ "Medical Jokes ๐Ÿ’‰": "Create ten medical jokes that are tweet length ๐Ÿฅ",
371
+ "Minnesota Humor โ„๏ธ": "Create ten jokes about Minnesota that are tweet length ๐ŸŒจ๏ธ",
372
+ "Top Funny Stories ๐Ÿ“–": "Create ten funny stories that are tweet length ๐Ÿ“š",
373
+ "More Funny Rhymes ๐ŸŽ™๏ธ": "Create ten more funny rhymes that are tweet length ๐ŸŽต"
374
+ }
375
+
376
+ # Create columns
377
+ col1, col2, col3 = st.columns([1, 1, 1], gap="small")
378
+
379
+ # Add buttons to columns
380
+ if col1.button("Wise Limericks ๐Ÿ˜‚"):
381
+ StreamLLMChatResponse(descriptions["Generate Limericks ๐Ÿ˜‚"])
382
+
383
+ if col2.button("Wise Quotes ๐Ÿง™"):
384
+ StreamLLMChatResponse(descriptions["Wise Quotes ๐Ÿง™"])
385
+
386
+ #if col3.button("Funny Rhymes ๐ŸŽค"):
387
+ # StreamLLMChatResponse(descriptions["Funny Rhymes ๐ŸŽค"])
388
+
389
+ col4, col5, col6 = st.columns([1, 1, 1], gap="small")
390
+
391
+ if col4.button("Top Ten Funniest Clean Jokes ๐Ÿ’‰"):
392
+ StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐Ÿ’‰"])
393
+
394
+ if col5.button("Minnesota Humor โ„๏ธ"):
395
+ StreamLLMChatResponse(descriptions["Minnesota Humor โ„๏ธ"])
396
+
397
+ if col6.button("Origins of Medical Science True Stories"):
398
+ StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"])
399
+
400
+ col7 = st.columns(1, gap="small")
401
+
402
+ if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐ŸŽ™๏ธ"):
403
+ StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐ŸŽ™๏ธ"])
404
+
405
+ def SpeechSynthesis(result):
406
+ documentHTML5='''
407
+ <!DOCTYPE html>
408
+ <html>
409
+ <head>
410
+ <title>Read It Aloud</title>
411
+ <script type="text/javascript">
412
+ function readAloud() {
413
+ const text = document.getElementById("textArea").value;
414
+ const speech = new SpeechSynthesisUtterance(text);
415
+ window.speechSynthesis.speak(speech);
416
+ }
417
+ </script>
418
+ </head>
419
+ <body>
420
+ <h1>๐Ÿ”Š Read It Aloud</h1>
421
+ <textarea id="textArea" rows="10" cols="80">
422
+ '''
423
+ documentHTML5 = documentHTML5 + result
424
+ documentHTML5 = documentHTML5 + '''
425
+ </textarea>
426
+ <br>
427
+ <button onclick="readAloud()">๐Ÿ”Š Read Aloud</button>
428
+ </body>
429
+ </html>
430
+ '''
431
+
432
+ components.html(documentHTML5, width=1280, height=300)
433
+ #return result
434
+
435
+
436
+ # 3. Stream Llama Response
437
+ # @st.cache_resource
438
+ def StreamLLMChatResponse(prompt):
439
+ try:
440
+ endpoint_url = API_URL
441
+ hf_token = API_KEY
442
+ st.write('Running client ' + endpoint_url)
443
+ client = InferenceClient(endpoint_url, token=hf_token)
444
+ gen_kwargs = dict(
445
+ max_new_tokens=512,
446
+ top_k=30,
447
+ top_p=0.9,
448
+ temperature=0.2,
449
+ repetition_penalty=1.02,
450
+ stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
451
+ )
452
+ stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
453
+ report=[]
454
+ res_box = st.empty()
455
+ collected_chunks=[]
456
+ collected_messages=[]
457
+ allresults=''
458
+ for r in stream:
459
+ if r.token.special:
460
+ continue
461
+ if r.token.text in gen_kwargs["stop_sequences"]:
462
+ break
463
+ collected_chunks.append(r.token.text)
464
+ chunk_message = r.token.text
465
+ collected_messages.append(chunk_message)
466
+ try:
467
+ report.append(r.token.text)
468
+ if len(r.token.text) > 0:
469
+ result="".join(report).strip()
470
+ res_box.markdown(f'*{result}*')
471
+
472
+ except:
473
+ st.write('Stream llm issue')
474
+ SpeechSynthesis(result)
475
+ return result
476
+ except:
477
+ st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
478
+
479
+ # 4. Run query with payload
480
+ def query(payload):
481
+ response = requests.post(API_URL, headers=headers, json=payload)
482
+ st.markdown(response.json())
483
+ return response.json()
484
+ def get_output(prompt):
485
+ return query({"inputs": prompt})
486
+
487
+ # 5. Auto name generated output files from time and content
488
+ def generate_filename(prompt, file_type):
489
+ central = pytz.timezone('US/Central')
490
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
491
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
492
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:255] # 255 is linux max, 260 is windows max
493
+ #safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:45]
494
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
495
+
496
+ # 6. Speech transcription via OpenAI service
497
+ def transcribe_audio(openai_key, file_path, model):
498
+ openai.api_key = openai_key
499
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
500
+ headers = {
501
+ "Authorization": f"Bearer {openai_key}",
502
+ }
503
+ with open(file_path, 'rb') as f:
504
+ data = {'file': f}
505
+ st.write('STT transcript ' + OPENAI_API_URL)
506
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
507
+ if response.status_code == 200:
508
+ st.write(response.json())
509
+ chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
510
+ transcript = response.json().get('text')
511
+ filename = generate_filename(transcript, 'txt')
512
+ response = chatResponse
513
+ user_prompt = transcript
514
+ create_file(filename, user_prompt, response, should_save)
515
+ return transcript
516
+ else:
517
+ st.write(response.json())
518
+ st.error("Error in API call.")
519
+ return None
520
+
521
+ # 7. Auto stop on silence audio control for recording WAV files
522
+ def save_and_play_audio(audio_recorder):
523
+ audio_bytes = audio_recorder(key='audio_recorder')
524
+ if audio_bytes:
525
+ filename = generate_filename("Recording", "wav")
526
+ with open(filename, 'wb') as f:
527
+ f.write(audio_bytes)
528
+ st.audio(audio_bytes, format="audio/wav")
529
+ return filename
530
+ return None
531
+
532
+ # 8. File creator that interprets type and creates output file for text, markdown and code
533
+ def create_file(filename, prompt, response, should_save=True):
534
+ if not should_save:
535
+ return
536
+ base_filename, ext = os.path.splitext(filename)
537
+ if ext in ['.txt', '.htm', '.md']:
538
+ with open(f"{base_filename}.md", 'w') as file:
539
+ try:
540
+ content = prompt.strip() + '\r\n' + response
541
+ file.write(content)
542
+ except:
543
+ st.write('.')
544
+
545
+ #has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)
546
+ #has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response))
547
+ #if has_python_code:
548
+ # python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
549
+ # with open(f"{base_filename}-Code.py", 'w') as file:
550
+ # file.write(python_code)
551
+ # with open(f"{base_filename}.md", 'w') as file:
552
+ # content = prompt.strip() + '\r\n' + response
553
+ # file.write(content)
554
+
555
+ def truncate_document(document, length):
556
+ return document[:length]
557
+ def divide_document(document, max_length):
558
+ return [document[i:i+max_length] for i in range(0, len(document), max_length)]
559
+
560
+ # 9. Sidebar with UI controls to review and re-run prompts and continue responses
561
+ @st.cache_resource
562
+ def get_table_download_link(file_path):
563
+ with open(file_path, 'r') as file:
564
+ data = file.read()
565
+
566
+ b64 = base64.b64encode(data.encode()).decode()
567
+ file_name = os.path.basename(file_path)
568
+ ext = os.path.splitext(file_name)[1] # get the file extension
569
+ if ext == '.txt':
570
+ mime_type = 'text/plain'
571
+ elif ext == '.py':
572
+ mime_type = 'text/plain'
573
+ elif ext == '.xlsx':
574
+ mime_type = 'text/plain'
575
+ elif ext == '.csv':
576
+ mime_type = 'text/plain'
577
+ elif ext == '.htm':
578
+ mime_type = 'text/html'
579
+ elif ext == '.md':
580
+ mime_type = 'text/markdown'
581
+ elif ext == '.wav':
582
+ mime_type = 'audio/wav'
583
+ else:
584
+ mime_type = 'application/octet-stream' # general binary data type
585
+ href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
586
+ return href
587
+
588
+
589
+ def CompressXML(xml_text):
590
+ root = ET.fromstring(xml_text)
591
+ for elem in list(root.iter()):
592
+ if isinstance(elem.tag, str) and 'Comment' in elem.tag:
593
+ elem.parent.remove(elem)
594
+ return ET.tostring(root, encoding='unicode', method="xml")
595
+
596
+ # 10. Read in and provide UI for past files
597
+ @st.cache_resource
598
+ def read_file_content(file,max_length):
599
+ if file.type == "application/json":
600
+ content = json.load(file)
601
+ return str(content)
602
+ elif file.type == "text/html" or file.type == "text/htm":
603
+ content = BeautifulSoup(file, "html.parser")
604
+ return content.text
605
+ elif file.type == "application/xml" or file.type == "text/xml":
606
+ tree = ET.parse(file)
607
+ root = tree.getroot()
608
+ xml = CompressXML(ET.tostring(root, encoding='unicode'))
609
+ return xml
610
+ elif file.type == "text/markdown" or file.type == "text/md":
611
+ md = mistune.create_markdown()
612
+ content = md(file.read().decode())
613
+ return content
614
+ elif file.type == "text/plain":
615
+ return file.getvalue().decode()
616
+ else:
617
+ return ""
618
+
619
+ # 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
620
+ @st.cache_resource
621
+ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
622
+ model = model_choice
623
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
624
+ conversation.append({'role': 'user', 'content': prompt})
625
+ if len(document_section)>0:
626
+ conversation.append({'role': 'assistant', 'content': document_section})
627
+ start_time = time.time()
628
+ report = []
629
+ res_box = st.empty()
630
+ collected_chunks = []
631
+ collected_messages = []
632
+
633
+ st.write('LLM stream ' + 'gpt-3.5-turbo')
634
+ for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
635
+ collected_chunks.append(chunk)
636
+ chunk_message = chunk['choices'][0]['delta']
637
+ collected_messages.append(chunk_message)
638
+ content=chunk["choices"][0].get("delta",{}).get("content")
639
+ try:
640
+ report.append(content)
641
+ if len(content) > 0:
642
+ result = "".join(report).strip()
643
+ res_box.markdown(f'*{result}*')
644
+ except:
645
+ st.write(' ')
646
+ full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
647
+ st.write("Elapsed time:")
648
+ st.write(time.time() - start_time)
649
+ return full_reply_content
650
+
651
+ # 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain
652
+ @st.cache_resource
653
+ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
654
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
655
+ conversation.append({'role': 'user', 'content': prompt})
656
+ if len(file_content)>0:
657
+ conversation.append({'role': 'assistant', 'content': file_content})
658
+ response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
659
+ return response['choices'][0]['message']['content']
660
+
661
+ def extract_mime_type(file):
662
+ if isinstance(file, str):
663
+ pattern = r"type='(.*?)'"
664
+ match = re.search(pattern, file)
665
+ if match:
666
+ return match.group(1)
667
+ else:
668
+ raise ValueError(f"Unable to extract MIME type from {file}")
669
+ elif isinstance(file, streamlit.UploadedFile):
670
+ return file.type
671
+ else:
672
+ raise TypeError("Input should be a string or a streamlit.UploadedFile object")
673
+
674
+ def extract_file_extension(file):
675
+ # get the file name directly from the UploadedFile object
676
+ file_name = file.name
677
+ pattern = r".*?\.(.*?)$"
678
+ match = re.search(pattern, file_name)
679
+ if match:
680
+ return match.group(1)
681
+ else:
682
+ raise ValueError(f"Unable to extract file extension from {file_name}")
683
+
684
+ # Normalize input as text from PDF and other formats
685
+ @st.cache_resource
686
+ def pdf2txt(docs):
687
+ text = ""
688
+ for file in docs:
689
+ file_extension = extract_file_extension(file)
690
+ st.write(f"File type extension: {file_extension}")
691
+ if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
692
+ text += file.getvalue().decode('utf-8')
693
+ elif file_extension.lower() == 'pdf':
694
+ from PyPDF2 import PdfReader
695
+ pdf = PdfReader(BytesIO(file.getvalue()))
696
+ for page in range(len(pdf.pages)):
697
+ text += pdf.pages[page].extract_text() # new PyPDF2 syntax
698
+ return text
699
+
700
+ def txt2chunks(text):
701
+ text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
702
+ return text_splitter.split_text(text)
703
+
704
+ # Vector Store using FAISS
705
+ @st.cache_resource
706
+ def vector_store(text_chunks):
707
+ embeddings = OpenAIEmbeddings(openai_api_key=key)
708
+ return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
709
+
710
+ # Memory and Retrieval chains
711
+ @st.cache_resource
712
+ def get_chain(vectorstore):
713
+ llm = ChatOpenAI()
714
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
715
+ return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
716
+
717
+ def process_user_input(user_question):
718
+ response = st.session_state.conversation({'question': user_question})
719
+ st.session_state.chat_history = response['chat_history']
720
+ for i, message in enumerate(st.session_state.chat_history):
721
+ template = user_template if i % 2 == 0 else bot_template
722
+ st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
723
+ filename = generate_filename(user_question, 'txt')
724
+ response = message.content
725
+ user_prompt = user_question
726
+ create_file(filename, user_prompt, response, should_save)
727
+
728
+ def divide_prompt(prompt, max_length):
729
+ words = prompt.split()
730
+ chunks = []
731
+ current_chunk = []
732
+ current_length = 0
733
+ for word in words:
734
+ if len(word) + current_length <= max_length:
735
+ current_length += len(word) + 1
736
+ current_chunk.append(word)
737
+ else:
738
+ chunks.append(' '.join(current_chunk))
739
+ current_chunk = [word]
740
+ current_length = len(word)
741
+ chunks.append(' '.join(current_chunk))
742
+ return chunks
743
+
744
+
745
+ # 13. Provide way of saving all and deleting all to give way of reviewing output and saving locally before clearing it
746
+
747
+ @st.cache_resource
748
+ def create_zip_of_files(files):
749
+ zip_name = "all_files.zip"
750
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
751
+ for file in files:
752
+ zipf.write(file)
753
+ return zip_name
754
+
755
+ @st.cache_resource
756
+ def get_zip_download_link(zip_file):
757
+ with open(zip_file, 'rb') as f:
758
+ data = f.read()
759
+ b64 = base64.b64encode(data).decode()
760
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
761
+ return href
762
+
763
+ # 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
764
+ # My Inference Endpoint
765
+ API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
766
+ # Original
767
+ API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
768
+ MODEL2 = "openai/whisper-small.en"
769
+ MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
770
+ #headers = {
771
+ # "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
772
+ # "Content-Type": "audio/wav"
773
+ #}
774
+ # HF_KEY = os.getenv('HF_KEY')
775
+ HF_KEY = st.secrets['HF_KEY']
776
+ headers = {
777
+ "Authorization": f"Bearer {HF_KEY}",
778
+ "Content-Type": "audio/wav"
779
+ }
780
+
781
+ #@st.cache_resource
782
+ def query(filename):
783
+ with open(filename, "rb") as f:
784
+ data = f.read()
785
+ response = requests.post(API_URL_IE, headers=headers, data=data)
786
+ return response.json()
787
+
788
+ def generate_filename(prompt, file_type):
789
+ central = pytz.timezone('US/Central')
790
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
791
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
792
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
793
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
794
+
795
+ # 15. Audio recorder to Wav file
796
+ def save_and_play_audio(audio_recorder):
797
+ audio_bytes = audio_recorder()
798
+ if audio_bytes:
799
+ filename = generate_filename("Recording", "wav")
800
+ with open(filename, 'wb') as f:
801
+ f.write(audio_bytes)
802
+ st.audio(audio_bytes, format="audio/wav")
803
+ return filename
804
+
805
+ # 16. Speech transcription to file output
806
+ def transcribe_audio(filename):
807
+ output = query(filename)
808
+ return output
809
+
810
+ def whisper_main():
811
+ #st.title("Speech to Text")
812
+ #st.write("Record your speech and get the text.")
813
+
814
+ # Audio, transcribe, GPT:
815
+ filename = save_and_play_audio(audio_recorder)
816
+ if filename is not None:
817
+ transcription = transcribe_audio(filename)
818
+ try:
819
+ transcript = transcription['text']
820
+ st.write(transcript)
821
+ response = StreamLLMChatResponse(transcript)
822
+ filename_txt = generate_filename(transcript, ".txt")
823
+ create_file(filename_txt, transcript, response, should_save)
824
+ filename_wav = filename_txt.replace('.txt', '.wav')
825
+ import shutil
826
+ shutil.copyfile(filename, filename_wav)
827
+ if os.path.exists(filename):
828
+ os.remove(filename)
829
+ except:
830
+ st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
831
+
832
+
833
+ import streamlit as st
834
+
835
+ # Sample function to demonstrate a response, replace with your own logic
836
+ def StreamMedChatResponse(topic):
837
+ st.write(f"Showing resources or questions related to: {topic}")
838
+
839
+
840
+
841
+ def add_medical_exam_buttons():
842
+ # Medical exam terminology descriptions
843
+ descriptions = {
844
+ "White Blood Cells ๐ŸŒŠ": "3 Q&A with emojis about types, facts, function, inputs and outputs of white blood cells ๐ŸŽฅ",
845
+ "CT Imaging๐Ÿฆ ": "3 Q&A with emojis on CT Imaging post surgery, how to, what to look for ๐Ÿ’Š",
846
+ "Hematoma ๐Ÿ’‰": "3 Q&A with emojis about hematoma and infection care and study including bacteria cultures and tests or labs๐Ÿ’ช",
847
+ "Post Surgery Wound Care ๐ŸŒ": "3 Q&A with emojis on wound care, and good bedside manner ๐Ÿฉธ",
848
+ "Healing and humor ๐Ÿ’Š": "3 Q&A with emojis on stories and humor about healing and caregiving ๐Ÿš‘",
849
+ "Psychology of bedside manner ๐Ÿงฌ": "3 Q&A with emojis on bedside manner and how to make patients feel at ease๐Ÿ› ",
850
+ "CT scan ๐Ÿ’Š": "3 Q&A with analysis on infection using CT scan and packing for skin, cellulitus and fascia ๐Ÿฉบ"
851
+ }
852
+
853
+ # Expander for medical topics
854
+ with st.expander("Medical Licensing Exam Topics ๐Ÿ“š", expanded=False):
855
+ st.markdown("๐Ÿฉบ **Important**: Variety of topics for medical licensing exams.")
856
+
857
+ # Create buttons for each description with unique keys
858
+ for idx, (label, content) in enumerate(descriptions.items()):
859
+ button_key = f"button_{idx}"
860
+ if st.button(label, key=button_key):
861
+ st.write(f"Running {label}")
862
+ input='Create markdown outline for definition of topic ' + label + ' also short quiz with appropriate emojis and definitions for: ' + content
863
+ response=StreamLLMChatResponse(input)
864
+ filename = generate_filename(response, 'txt')
865
+ create_file(filename, input, response, should_save)
866
+
867
+ def add_medical_exam_buttons2():
868
+ with st.expander("Medical Licensing Exam Topics ๐Ÿ“š", expanded=False):
869
+ st.markdown("๐Ÿฉบ **Important**: This section provides a variety of medical topics that are often encountered in medical licensing exams.")
870
+
871
+ # Define medical exam terminology descriptions
872
+ descriptions = {
873
+ "White Blood Cells ๐ŸŒŠ": "3 Questions and Answers with emojis about white blood cells ๐ŸŽฅ",
874
+ "CT Imaging๐Ÿฆ ": "3 Questions and Answers with emojis about CT Imaging of post surgery abscess, hematoma, and cerosanguiness fluid ๐Ÿ’Š",
875
+ "Hematoma ๐Ÿ’‰": "3 Questions and Answers with emojis about hematoma and infection and how heat helps white blood cells ๐Ÿ’ช",
876
+ "Post Surgery Wound Care ๐ŸŒ": "3 Questions and Answers with emojis about wound care and how to help as a caregiver๐Ÿฉธ",
877
+ "Healing and humor ๐Ÿ’Š": "3 Questions and Answers with emojis on the use of stories and humor to help patients and family ๐Ÿš‘",
878
+ "Psychology of bedside manner ๐Ÿงฌ": "3 Questions and Answers with emojis about good bedside manner ๐Ÿ› ",
879
+ "CT scan ๐Ÿ’Š": "3 Questions and Answers with analysis of bacteria and understanding infection using cultures and CT scan ๐Ÿฉบ"
880
+ }
881
+
882
+ # Create columns
883
+ col1, col2, col3, col4 = st.columns([1, 1, 1, 1], gap="small")
884
+
885
+ # Add buttons to columns
886
+ if col1.button("Ultrasound with Doppler ๐ŸŒŠ"):
887
+ StreamLLMChatResponse(descriptions["Ultrasound with Doppler ๐ŸŒŠ"])
888
+
889
+ if col2.button("Oseltamivir ๐Ÿฆ "):
890
+ StreamLLMChatResponse(descriptions["Oseltamivir ๐Ÿฆ "])
891
+
892
+ if col3.button("IM Epinephrine ๐Ÿ’‰"):
893
+ StreamLLMChatResponse(descriptions["IM Epinephrine ๐Ÿ’‰"])
894
+
895
+ if col4.button("Hypokalemia ๐ŸŒ"):
896
+ StreamLLMChatResponse(descriptions["Hypokalemia ๐ŸŒ"])
897
+
898
+ col5, col6, col7, col8 = st.columns([1, 1, 1, 1], gap="small")
899
+
900
+ if col5.button("Succinylcholine ๐Ÿ’Š"):
901
+ StreamLLMChatResponse(descriptions["Succinylcholine ๐Ÿ’Š"])
902
+
903
+ if col6.button("Phosphoinositol System ๐Ÿงฌ"):
904
+ StreamLLMChatResponse(descriptions["Phosphoinositol System ๐Ÿงฌ"])
905
+
906
+ if col7.button("Ramipril ๐Ÿ’Š"):
907
+ StreamLLMChatResponse(descriptions["Ramipril ๐Ÿ’Š"])
908
+
909
+
910
+
911
+ # 17. Main
912
+ def main():
913
+
914
+ #st.title("GAIA - Medical License Exam Testing")
915
+ prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
916
+
917
+ # Add Wit and Humor buttons
918
+ # add_witty_humor_buttons()
919
+ add_medical_exam_buttons()
920
+
921
+
922
+ with st.expander("Prompts ๐Ÿ“š", expanded=False):
923
+
924
+ example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
925
+ if st.button("Run Prompt With Llama model", help="Click to run the prompt."):
926
+ try:
927
+ response=StreamLLMChatResponse(example_input)
928
+ create_file(filename, example_input, response, should_save)
929
+ except:
930
+ st.write('Llama model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
931
+
932
+ openai.api_key = os.getenv('OPENAI_API_KEY')
933
+ if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
934
+
935
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
936
+ choice = st.sidebar.selectbox("Output File Type:", menu)
937
+
938
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
939
+
940
+ user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
941
+ collength, colupload = st.columns([2,3]) # adjust the ratio as needed
942
+ with collength:
943
+ max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
944
+ with colupload:
945
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
946
+ document_sections = deque()
947
+ document_responses = {}
948
+ if uploaded_file is not None:
949
+ file_content = read_file_content(uploaded_file, max_length)
950
+ document_sections.extend(divide_document(file_content, max_length))
951
+ if len(document_sections) > 0:
952
+ if st.button("๐Ÿ‘๏ธ View Upload"):
953
+ st.markdown("**Sections of the uploaded file:**")
954
+ for i, section in enumerate(list(document_sections)):
955
+ st.markdown(f"**Section {i+1}**\n{section}")
956
+ st.markdown("**Chat with the model:**")
957
+ for i, section in enumerate(list(document_sections)):
958
+ if i in document_responses:
959
+ st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
960
+ else:
961
+ if st.button(f"Chat about Section {i+1}"):
962
+ st.write('Reasoning with your inputs...')
963
+ #response = chat_with_model(user_prompt, section, model_choice)
964
+ st.write('Response:')
965
+ st.write(response)
966
+ document_responses[i] = response
967
+ filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
968
+ create_file(filename, user_prompt, response, should_save)
969
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
970
+ if st.button('๐Ÿ’ฌ Chat'):
971
+ st.write('Reasoning with your inputs...')
972
+ user_prompt_sections = divide_prompt(user_prompt, max_length)
973
+ full_response = ''
974
+ for prompt_section in user_prompt_sections:
975
+ response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
976
+ full_response += response + '\n' # Combine the responses
977
+ response = full_response
978
+ st.write('Response:')
979
+ st.write(response)
980
+ filename = generate_filename(user_prompt, choice)
981
+ create_file(filename, user_prompt, response, should_save)
982
+ #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
983
+
984
+ # Compose a file sidebar of markdown md files:
985
+ all_files = glob.glob("*.md")
986
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
987
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
988
+ if st.sidebar.button("๐Ÿ—‘ Delete All Text"):
989
+ for file in all_files:
990
+ os.remove(file)
991
+ st.experimental_rerun()
992
+ if st.sidebar.button("โฌ‡๏ธ Download All"):
993
+ zip_file = create_zip_of_files(all_files)
994
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
995
+ file_contents=''
996
+ next_action=''
997
+ for file in all_files:
998
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
999
+ with col1:
1000
+ if st.button("๐ŸŒ", key="md_"+file): # md emoji button
1001
+ with open(file, 'r') as f:
1002
+ file_contents = f.read()
1003
+ next_action='md'
1004
+ with col2:
1005
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
1006
+ with col3:
1007
+ if st.button("๐Ÿ“‚", key="open_"+file): # open emoji button
1008
+ with open(file, 'r') as f:
1009
+ file_contents = f.read()
1010
+ next_action='open'
1011
+ with col4:
1012
+ if st.button("๐Ÿ”", key="read_"+file): # search emoji button
1013
+ with open(file, 'r') as f:
1014
+ file_contents = f.read()
1015
+ next_action='search'
1016
+ with col5:
1017
+ if st.button("๐Ÿ—‘", key="delete_"+file):
1018
+ os.remove(file)
1019
+ st.experimental_rerun()
1020
+
1021
+
1022
+ if len(file_contents) > 0:
1023
+ if next_action=='open':
1024
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1025
+ if next_action=='md':
1026
+ st.markdown(file_contents)
1027
+
1028
+ buttonlabel = '๐Ÿ”Run with Llama and GPT.'
1029
+ if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
1030
+ user_prompt = file_contents
1031
+
1032
+ # Llama versus GPT Battle!
1033
+ all=""
1034
+ try:
1035
+ st.write('๐Ÿ”Running with Llama.')
1036
+ response = StreamLLMChatResponse(file_contents)
1037
+ filename = generate_filename(user_prompt, ".md")
1038
+ create_file(filename, file_contents, response, should_save)
1039
+ all=response
1040
+ #SpeechSynthesis(response)
1041
+ except:
1042
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1043
+
1044
+ # gpt
1045
+ try:
1046
+ st.write('๐Ÿ”Running with GPT.')
1047
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1048
+ filename2 = generate_filename(file_contents, choice)
1049
+ create_file(filename2, user_prompt, response, should_save)
1050
+ all=all+response2
1051
+ #SpeechSynthesis(response2)
1052
+ except:
1053
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1054
+
1055
+ SpeechSynthesis(all)
1056
+
1057
+
1058
+ if next_action=='search':
1059
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
1060
+ st.write('๐Ÿ”Running with Llama and GPT.')
1061
+
1062
+ user_prompt = file_contents
1063
+
1064
+ # Llama versus GPT Battle!
1065
+ all=""
1066
+ try:
1067
+ st.write('๐Ÿ”Running with Llama.')
1068
+ response = StreamLLMChatResponse(file_contents)
1069
+ filename = generate_filename(user_prompt, ".md")
1070
+ create_file(filename, file_contents, response, should_save)
1071
+ all=response
1072
+ #SpeechSynthesis(response)
1073
+ except:
1074
+ st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
1075
+
1076
+ # gpt
1077
+ try:
1078
+ st.write('๐Ÿ”Running with GPT.')
1079
+ response2 = chat_with_model(user_prompt, file_contents, model_choice)
1080
+ filename2 = generate_filename(file_contents, choice)
1081
+ create_file(filename2, user_prompt, response, should_save)
1082
+ all=all+response2
1083
+ #SpeechSynthesis(response2)
1084
+ except:
1085
+ st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
1086
+
1087
+ SpeechSynthesis(all)
1088
+
1089
+
1090
+ # Function to encode file to base64
1091
+ def get_base64_encoded_file(file_path):
1092
+ with open(file_path, "rb") as file:
1093
+ return base64.b64encode(file.read()).decode()
1094
+
1095
+ # Function to create a download link
1096
+ def get_audio_download_link(file_path):
1097
+ base64_file = get_base64_encoded_file(file_path)
1098
+ return f'<a href="data:file/wav;base64,{base64_file}" download="{os.path.basename(file_path)}">โฌ‡๏ธ Download Audio</a>'
1099
+
1100
+ # Compose a file sidebar of past encounters
1101
+ all_files = glob.glob("*.wav")
1102
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1103
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1104
+
1105
+ filekey = 'delall'
1106
+ if st.sidebar.button("๐Ÿ—‘ Delete All Audio", key=filekey):
1107
+ for file in all_files:
1108
+ os.remove(file)
1109
+ st.experimental_rerun()
1110
+
1111
+ for file in all_files:
1112
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1113
+ with col1:
1114
+ st.markdown(file)
1115
+ if st.button("๐ŸŽต", key="play_" + file): # play emoji button
1116
+ audio_file = open(file, 'rb')
1117
+ audio_bytes = audio_file.read()
1118
+ st.audio(audio_bytes, format='audio/wav')
1119
+ #st.markdown(get_audio_download_link(file), unsafe_allow_html=True)
1120
+ #st.text_input(label="", value=file)
1121
+ with col2:
1122
+ if st.button("๐Ÿ—‘", key="delete_" + file):
1123
+ os.remove(file)
1124
+ st.experimental_rerun()
1125
+
1126
+
1127
+
1128
+ # Feedback
1129
+ # Step: Give User a Way to Upvote or Downvote
1130
+ with st.expander("Give your feedback ๐Ÿ‘", expanded=False):
1131
+
1132
+ feedback = st.radio("Step 8: Give your feedback", ("๐Ÿ‘ Upvote", "๐Ÿ‘Ž Downvote"))
1133
+ if feedback == "๐Ÿ‘ Upvote":
1134
+ st.write("You upvoted ๐Ÿ‘. Thank you for your feedback!")
1135
+ else:
1136
+ st.write("You downvoted ๐Ÿ‘Ž. Thank you for your feedback!")
1137
+
1138
+ load_dotenv()
1139
+ st.write(css, unsafe_allow_html=True)
1140
+ st.header("Chat with documents :books:")
1141
+ user_question = st.text_input("Ask a question about your documents:")
1142
+ if user_question:
1143
+ process_user_input(user_question)
1144
+ with st.sidebar:
1145
+ st.subheader("Your documents")
1146
+ docs = st.file_uploader("import documents", accept_multiple_files=True)
1147
+ with st.spinner("Processing"):
1148
+ raw = pdf2txt(docs)
1149
+ if len(raw) > 0:
1150
+ length = str(len(raw))
1151
+ text_chunks = txt2chunks(raw)
1152
+ vectorstore = vector_store(text_chunks)
1153
+ st.session_state.conversation = get_chain(vectorstore)
1154
+ st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
1155
+ filename = generate_filename(raw, 'txt')
1156
+ create_file(filename, raw, '', should_save)
1157
+
1158
+ # 18. Run AI Pipeline
1159
+ if __name__ == "__main__":
1160
+ whisper_main()
1161
+ main()