awacke1 commited on
Commit
4b5bb0f
1 Parent(s): f568a9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -96,6 +96,7 @@ def add_witty_humor_buttons():
96
 
97
 
98
  # Function to Stream Inference Client for Inference Endpoint Responses
 
99
  def StreamLLMChatResponse(prompt):
100
 
101
  try:
@@ -174,8 +175,7 @@ def StreamLLMChatResponse(prompt):
174
  except:
175
  st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
176
 
177
-
178
-
179
  def query(payload):
180
  response = requests.post(API_URL, headers=headers, json=payload)
181
  st.markdown(response.json())
@@ -191,6 +191,7 @@ def generate_filename(prompt, file_type):
191
  safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
192
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
193
 
 
194
  def transcribe_audio(openai_key, file_path, model):
195
  openai.api_key = openai_key
196
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
@@ -224,6 +225,7 @@ def save_and_play_audio(audio_recorder):
224
  return filename
225
  return None
226
 
 
227
  def create_file(filename, prompt, response, should_save=True):
228
  if not should_save:
229
  return
@@ -248,6 +250,7 @@ def truncate_document(document, length):
248
  def divide_document(document, max_length):
249
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
250
 
 
251
  def get_table_download_link(file_path):
252
  with open(file_path, 'r') as file:
253
  try:
@@ -282,6 +285,7 @@ def CompressXML(xml_text):
282
  elem.parent.remove(elem)
283
  return ET.tostring(root, encoding='unicode', method="xml")
284
 
 
285
  def read_file_content(file,max_length):
286
  if file.type == "application/json":
287
  content = json.load(file)
@@ -303,6 +307,7 @@ def read_file_content(file,max_length):
303
  else:
304
  return ""
305
 
 
306
  def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
307
  model = model_choice
308
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
@@ -331,6 +336,7 @@ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
331
  st.write(time.time() - start_time)
332
  return full_reply_content
333
 
 
334
  def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
335
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
336
  conversation.append({'role': 'user', 'content': prompt})
@@ -362,6 +368,7 @@ def extract_file_extension(file):
362
  else:
363
  raise ValueError(f"Unable to extract file extension from {file_name}")
364
 
 
365
  def pdf2txt(docs):
366
  text = ""
367
  for file in docs:
@@ -383,10 +390,12 @@ def txt2chunks(text):
383
  text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
384
  return text_splitter.split_text(text)
385
 
 
386
  def vector_store(text_chunks):
387
  embeddings = OpenAIEmbeddings(openai_api_key=key)
388
  return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
389
 
 
390
  def get_chain(vectorstore):
391
  llm = ChatOpenAI()
392
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
@@ -419,6 +428,7 @@ def divide_prompt(prompt, max_length):
419
  chunks.append(' '.join(current_chunk))
420
  return chunks
421
 
 
422
  def create_zip_of_files(files):
423
  zip_name = "all_files.zip"
424
  with zipfile.ZipFile(zip_name, 'w') as zipf:
@@ -426,6 +436,7 @@ def create_zip_of_files(files):
426
  zipf.write(file)
427
  return zip_name
428
 
 
429
  def get_zip_download_link(zip_file):
430
  with open(zip_file, 'rb') as f:
431
  data = f.read()
@@ -440,6 +451,7 @@ headers = {
440
  "Content-Type": "audio/wav"
441
  }
442
 
 
443
  def query(filename):
444
  with open(filename, "rb") as f:
445
  data = f.read()
@@ -454,6 +466,7 @@ def generate_filename(prompt, file_type):
454
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
455
 
456
  # 10. Audio recorder to Wav file:
 
457
  def save_and_play_audio(audio_recorder):
458
  audio_bytes = audio_recorder()
459
  if audio_bytes:
@@ -506,14 +519,7 @@ def main():
506
  openai.api_key = os.getenv('OPENAI_KEY')
507
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
508
  choice = st.sidebar.selectbox("Output File Type:", menu)
509
- model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
510
-
511
- #filename = save_and_play_audio(audio_recorder)
512
- #if filename is not None:
513
- # transcription = transcribe_audio(key, filename, "whisper-1")
514
- # st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
515
- # filename = None
516
-
517
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
518
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
519
  with collength:
 
96
 
97
 
98
  # Function to Stream Inference Client for Inference Endpoint Responses
99
+ @st.cache_resource
100
  def StreamLLMChatResponse(prompt):
101
 
102
  try:
 
175
  except:
176
  st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
177
 
178
+ @st.cache_resource
 
179
  def query(payload):
180
  response = requests.post(API_URL, headers=headers, json=payload)
181
  st.markdown(response.json())
 
191
  safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
192
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
193
 
194
+ @st.cache_resource
195
  def transcribe_audio(openai_key, file_path, model):
196
  openai.api_key = openai_key
197
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
 
225
  return filename
226
  return None
227
 
228
+ @st.cache_resource
229
  def create_file(filename, prompt, response, should_save=True):
230
  if not should_save:
231
  return
 
250
  def divide_document(document, max_length):
251
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
252
 
253
+ @st.cache_resource
254
  def get_table_download_link(file_path):
255
  with open(file_path, 'r') as file:
256
  try:
 
285
  elem.parent.remove(elem)
286
  return ET.tostring(root, encoding='unicode', method="xml")
287
 
288
+ @st.cache_resource
289
  def read_file_content(file,max_length):
290
  if file.type == "application/json":
291
  content = json.load(file)
 
307
  else:
308
  return ""
309
 
310
+ @st.cache_resource
311
  def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
312
  model = model_choice
313
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
 
336
  st.write(time.time() - start_time)
337
  return full_reply_content
338
 
339
+ @st.cache_resource
340
  def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
341
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
342
  conversation.append({'role': 'user', 'content': prompt})
 
368
  else:
369
  raise ValueError(f"Unable to extract file extension from {file_name}")
370
 
371
+ @st.cache_resource
372
  def pdf2txt(docs):
373
  text = ""
374
  for file in docs:
 
390
  text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
391
  return text_splitter.split_text(text)
392
 
393
+ @st.cache_resource
394
  def vector_store(text_chunks):
395
  embeddings = OpenAIEmbeddings(openai_api_key=key)
396
  return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
397
 
398
+ @st.cache_resource
399
  def get_chain(vectorstore):
400
  llm = ChatOpenAI()
401
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
 
428
  chunks.append(' '.join(current_chunk))
429
  return chunks
430
 
431
+ @st.cache_resource
432
  def create_zip_of_files(files):
433
  zip_name = "all_files.zip"
434
  with zipfile.ZipFile(zip_name, 'w') as zipf:
 
436
  zipf.write(file)
437
  return zip_name
438
 
439
+ @st.cache_resource
440
  def get_zip_download_link(zip_file):
441
  with open(zip_file, 'rb') as f:
442
  data = f.read()
 
451
  "Content-Type": "audio/wav"
452
  }
453
 
454
+ @st.cache_resource
455
  def query(filename):
456
  with open(filename, "rb") as f:
457
  data = f.read()
 
466
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
467
 
468
  # 10. Audio recorder to Wav file:
469
+ @st.cache_resource
470
  def save_and_play_audio(audio_recorder):
471
  audio_bytes = audio_recorder()
472
  if audio_bytes:
 
519
  openai.api_key = os.getenv('OPENAI_KEY')
520
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
521
  choice = st.sidebar.selectbox("Output File Type:", menu)
522
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
 
 
 
 
 
 
 
523
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
524
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
525
  with collength: