asitts commited on
Commit
6b87a1d
Β·
1 Parent(s): dd7204b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -28
app.py CHANGED
@@ -1,24 +1,19 @@
1
  import streamlit as st
2
  from llama_index import VectorStoreIndex, ServiceContext, Document
3
  from llama_index.llms import OpenAI
4
- from langchain.llms import OpenAI
5
  import openai
6
  from llama_index import SimpleDirectoryReader
7
  import os
8
 
9
  st.set_page_config(page_title="HUD Audit Guide", page_icon="πŸ‚", layout="centered", initial_sidebar_state="auto", menu_items=None)
10
- st.title("Ask the HUD Audit Guide πŸ’¬πŸ€–")
11
- st.info("Check out more info on the complete HUD Audit Guide at the official [website](https://www.hudoig.gov/library/single-audit-guidance/hud-consolidated-audit-guide)", icon="πŸ“ƒ")
12
-
13
-
14
- #openai_api_key = st.sidebar.text_input('OpenAI API Key', type='password')
15
- #openai_api_key = os.environ['OPENAI_KEY']
16
- openai_api_key = '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'
17
 
18
- def generate_response(input_text):
19
- llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
20
- st.info(llm(input_text))
21
 
 
 
 
 
22
  if "messages" not in st.session_state.keys(): # Initialize the chat messages history
23
  st.session_state.messages = [
24
  {"role": "assistant", "content": "Ask me a question about the HUD Audit Guide - Chapter 6 - Ginnie Mae Issuers of Mortgage-Backed Securities Audit Guidance!"}
@@ -29,19 +24,15 @@ def load_data():
29
  with st.spinner(text="Loading and indexing the HUD Audit Guide – hang tight! This should take 1-2 minutes."):
30
  reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
31
  docs = reader.load_data()
32
- service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."))
33
  index = VectorStoreIndex.from_documents(docs, service_context=service_context)
34
  return index
35
 
36
  index = load_data()
37
- #chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True, system_prompt="You are an expert on the HUD Audit Guide and the Ginnie Mae MBS Guide. Your job is to answer technical questions. Assume that all questions are related to the HUD Audit Guide's impact on Ginnie Mae Issuers. Keep your answers technical and based on facts – do not hallucinate features.")
38
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
39
 
40
  if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
41
- #if not openai_api_key.startswith('sk-'):
42
- # st.warning('Please enter your OpenAI API key!', icon='⚠')
43
- #else: # Only allow the user to proceed if it appears they've entered a valid OpenAI API key
44
- # st.session_state.messages.append({"role": "user", "content": prompt})
45
  st.session_state.messages.append({"role": "user", "content": prompt})
46
 
47
  for message in st.session_state.messages: # Display the prior chat messages
@@ -55,14 +46,4 @@ if st.session_state.messages[-1]["role"] != "assistant":
55
  response = chat_engine.chat(prompt)
56
  st.write(response.response)
57
  message = {"role": "assistant", "content": response.response}
58
- st.session_state.messages.append(message) # Add response to message history
59
-
60
- #try:
61
- # if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
62
- # if not openai_api_key.startswith('sk-'):
63
- # st.warning('Please enter your OpenAI API key!', icon='⚠')
64
- # else: # Only allow the user to proceed if it appears they've entered a valid OpenAI API key
65
- # st.session_state.messages.append({"role": "user", "content": prompt})
66
- # # ... (rest of the code)
67
- #except Exception as e:
68
- # st.error(f"An error occurred: {e}")
 
1
  import streamlit as st
2
  from llama_index import VectorStoreIndex, ServiceContext, Document
3
  from llama_index.llms import OpenAI
 
4
  import openai
5
  from llama_index import SimpleDirectoryReader
6
  import os
7
 
8
  st.set_page_config(page_title="HUD Audit Guide", page_icon="πŸ‚", layout="centered", initial_sidebar_state="auto", menu_items=None)
 
 
 
 
 
 
 
9
 
10
+ test_key_print = os.environ['OPENAI_KEY']
11
+ st.write(test_key_print)
 
12
 
13
+ openai.api_key = "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"
14
+ st.title("Ask the HUD Audit Guide πŸ’¬πŸ€–")
15
+ st.info("Check out more info on the complete HUD Audit Guide at the official [website](https://www.hudoig.gov/library/single-audit-guidance/hud-consolidated-audit-guide)", icon="πŸ“ƒ")
16
+
17
  if "messages" not in st.session_state.keys(): # Initialize the chat messages history
18
  st.session_state.messages = [
19
  {"role": "assistant", "content": "Ask me a question about the HUD Audit Guide - Chapter 6 - Ginnie Mae Issuers of Mortgage-Backed Securities Audit Guidance!"}
 
24
  with st.spinner(text="Loading and indexing the HUD Audit Guide – hang tight! This should take 1-2 minutes."):
25
  reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
26
  docs = reader.load_data()
27
+ service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the HUD Audit Guide and your job is to answer technical questions. Assume that all questions are related to the HUD Audit Guide and Ginnie Mae Issuers. Keep your answers technical and based on facts – do not hallucinate features."))
28
  index = VectorStoreIndex.from_documents(docs, service_context=service_context)
29
  return index
30
 
31
  index = load_data()
32
+ #chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.")
33
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
34
 
35
  if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
 
 
 
 
36
  st.session_state.messages.append({"role": "user", "content": prompt})
37
 
38
  for message in st.session_state.messages: # Display the prior chat messages
 
46
  response = chat_engine.chat(prompt)
47
  st.write(response.response)
48
  message = {"role": "assistant", "content": response.response}
49
+ st.session_state.messages.append(message) # Add response to message history