kdevoe commited on
Commit
8f8323f
·
verified ·
1 Parent(s): 430000e

Reverting to version before hyperparameter changes

Browse files
Files changed (1) hide show
  1. app.py +13 -15
app.py CHANGED
@@ -14,27 +14,26 @@ from langchain.vectorstores import Chroma
14
  openai_api_key = os.getenv("openai_token")
15
  embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
16
 
17
- @st.cache_resource
18
- def get_vectordb():
19
- embedding = OpenAIEmbeddings(openai_api_key=os.getenv("openai_token"))
20
- return Chroma(persist_directory="./chroma_db", embedding_function=embedding)
21
-
22
- vectordb = get_vectordb()
23
-
24
- # # Setup vector database
25
- # persist_directory = './chroma_db'
26
- # vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
27
 
28
  llm_name = "gpt-3.5-turbo"
29
 
30
- llm = ChatOpenAI(model_name=llm_name, temperature=0.7,
31
  openai_api_key=openai_api_key)
32
 
33
  qa_chain = RetrievalQA.from_chain_type(
34
  llm,
35
- retriever=vectordb.as_retriever(search_kwargs={"k": 5})
36
  )
37
 
 
 
 
 
 
 
38
 
39
  # Streamed response emulator
40
  def response_generator(prompt):
@@ -45,7 +44,7 @@ def response_generator(prompt):
45
  time.sleep(0.05)
46
 
47
 
48
- st.title("Technical Support Chatbot")
49
 
50
  # Initialize chat history
51
  if "messages" not in st.session_state:
@@ -57,7 +56,7 @@ for message in st.session_state.messages:
57
  st.markdown(message["content"])
58
 
59
  # Accept user input
60
- if prompt := st.chat_input("Enter your question here"):
61
  # Add user message to chat history
62
  st.session_state.messages.append({"role": "user", "content": prompt})
63
  # Display user message in chat message container
@@ -69,4 +68,3 @@ if prompt := st.chat_input("Enter your question here"):
69
  response = st.write_stream(response_generator(prompt))
70
  # Add assistant response to chat history
71
  st.session_state.messages.append({"role": "assistant", "content": response})
72
-
 
14
  openai_api_key = os.getenv("openai_token")
15
  embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
16
 
17
+ # Setup vector database
18
+ persist_directory = './chroma_db'
19
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
 
 
 
 
 
 
 
20
 
21
  llm_name = "gpt-3.5-turbo"
22
 
23
+ llm = ChatOpenAI(model_name=llm_name, temperature=0,
24
  openai_api_key=openai_api_key)
25
 
26
  qa_chain = RetrievalQA.from_chain_type(
27
  llm,
28
+ retriever=vectordb.as_retriever()
29
  )
30
 
31
+ question = "production is broken how do I fix it?"
32
+
33
+ result = qa_chain({"query": question})
34
+
35
+ print(result['result'])
36
+
37
 
38
  # Streamed response emulator
39
  def response_generator(prompt):
 
44
  time.sleep(0.05)
45
 
46
 
47
+ st.title("Simple chat")
48
 
49
  # Initialize chat history
50
  if "messages" not in st.session_state:
 
56
  st.markdown(message["content"])
57
 
58
  # Accept user input
59
+ if prompt := st.chat_input("What is up?"):
60
  # Add user message to chat history
61
  st.session_state.messages.append({"role": "user", "content": prompt})
62
  # Display user message in chat message container
 
68
  response = st.write_stream(response_generator(prompt))
69
  # Add assistant response to chat history
70
  st.session_state.messages.append({"role": "assistant", "content": response})