Mishab commited on
Commit
5b4b6c8
1 Parent(s): 9b9bf46

Update utils and app files

Browse files

Removed agent from the chain

Files changed (2) hide show
  1. app.py +10 -12
  2. utils.py +20 -19
app.py CHANGED
@@ -30,7 +30,7 @@ from langchain.agents.agent_toolkits import create_conversational_retrieval_agen
30
  from langchain.utilities import SerpAPIWrapper
31
 
32
  from utils import build_embedding_model, build_llm
33
- from utils import load_ensemble_retriver, load_text_chunks, load_vectorstore, load_conversational_retrievel_agent
34
 
35
  load_dotenv()
36
  # Getting current timestamp to keep track of historical conversations
@@ -57,8 +57,8 @@ if "text_chunks" not in st.session_state:
57
  if "ensemble_retriver" not in st.session_state:
58
  st.session_state["ensemble_retriver"] = load_ensemble_retriver(text_chunks=st.session_state["text_chunks"], embeddings=st.session_state["embeddings"], chroma_vectorstore=st.session_state["vector_db"] )
59
 
60
- if "agent_executor" not in st.session_state:
61
- st.session_state["agent_executor"] = load_conversational_retrievel_agent(retriever=st.session_state["ensemble_retriver"], llm=st.session_state["llm"])
62
 
63
 
64
 
@@ -192,9 +192,9 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
192
  st.write(message)
193
 
194
 
195
- def generate_llm_response(agent_executor, prompt_input):
196
- result = agent_executor({"input": prompt_input})
197
- return [result['output'], result['intermediate_steps']]
198
 
199
 
200
  # User-provided prompt
@@ -208,10 +208,10 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
208
  with st.chat_message("assistant"):
209
  with st.spinner("Searching..."):
210
  start = timeit.default_timer()
211
- response = generate_llm_response(agent_executor=st.session_state["agent_executor"], prompt_input=prompt)
212
  placeholder = st.empty()
213
  full_response = ''
214
- for item in response[0]:
215
  full_response += item
216
  placeholder.markdown(full_response)
217
  # The following logic will work in the way given below.
@@ -219,7 +219,7 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
219
  # -- If not, we can conclude that, agent has used internet search as tool.
220
  # -- Check if intermediary steps are present in the output of the prompt.
221
  # -- If intermediary steps are present, it means agent has used exising custom knowledge base for iformation retrival and therefore we need to give souce docs as output along with LLM's reponse.
222
- if len(response[1])>0:
223
  st.text("-------------------------------------")
224
  docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
225
  source_doc_list= []
@@ -237,8 +237,6 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
237
  st.write("---") # Add a separator between entries
238
  message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
239
  st.session_state.messages.append(message)
240
- else:
241
- message = {"role": "assistant", "content": full_response, "Source":""}
242
- st.session_state.messages.append(message)
243
  end = timeit.default_timer()
244
  print(f"Time to retrieve response: {end - start}")
 
30
  from langchain.utilities import SerpAPIWrapper
31
 
32
  from utils import build_embedding_model, build_llm
33
+ from utils import load_ensemble_retriver, load_text_chunks, load_vectorstore, load_conversational_retrievel_chain
34
 
35
  load_dotenv()
36
  # Getting current timestamp to keep track of historical conversations
 
57
  if "ensemble_retriver" not in st.session_state:
58
  st.session_state["ensemble_retriver"] = load_ensemble_retriver(text_chunks=st.session_state["text_chunks"], embeddings=st.session_state["embeddings"], chroma_vectorstore=st.session_state["vector_db"] )
59
 
60
+ if "conversation_chain" not in st.session_state:
61
+ st.session_state["conversation_chain"] = load_conversational_retrievel_chain(retriever=st.session_state["ensemble_retriver"], llm=st.session_state["llm"])
62
 
63
 
64
 
 
192
  st.write(message)
193
 
194
 
195
+ def generate_llm_response(conversation_chain, prompt_input):
196
+ output= conversation_chain({'question': prompt_input})
197
+ return output['answer']
198
 
199
 
200
  # User-provided prompt
 
208
  with st.chat_message("assistant"):
209
  with st.spinner("Searching..."):
210
  start = timeit.default_timer()
211
+ response = generate_llm_response(conversation_chain=st.session_state["conversation_chain"], prompt_input=prompt)
212
  placeholder = st.empty()
213
  full_response = ''
214
+ for item in response:
215
  full_response += item
216
  placeholder.markdown(full_response)
217
  # The following logic will work in the way given below.
 
219
  # -- If not, we can conclude that, agent has used internet search as tool.
220
  # -- Check if intermediary steps are present in the output of the prompt.
221
  # -- If intermediary steps are present, it means agent has used exising custom knowledge base for iformation retrival and therefore we need to give souce docs as output along with LLM's reponse.
222
+ if response:
223
  st.text("-------------------------------------")
224
  docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
225
  source_doc_list= []
 
237
  st.write("---") # Add a separator between entries
238
  message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
239
  st.session_state.messages.append(message)
240
+
 
 
241
  end = timeit.default_timer()
242
  print(f"Time to retrieve response: {end - start}")
utils.py CHANGED
@@ -257,23 +257,24 @@ def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
257
  return ensemble_retriever
258
 
259
 
260
- def load_conversational_retrievel_agent(retriever, llm):
261
- '''Load Conversational Retrievel agent with following tasks as tools,
262
- 1) OPM Knowledge base query
263
- 2) INternet search with SerpAPI
264
- This agent combines RAG, chat interfaces, agents.
265
- '''
266
- retriever_tool = create_retriever_tool(
267
- retriever,
268
- "Search_US_Office_of_Personnel_Management_Document",
269
- "Searches and returns documents regarding the U.S. Office of Personnel Management (OPM).")
270
- search_api = SerpAPIWrapper()
271
- search_api_tool = Tool(
272
- name = "Current_Search",
273
- func=search_api.run,
274
- description="useful for when you need to answer questions about current events or the current state of the world"
275
- )
276
- tools = [retriever_tool]
277
- agent_executor = create_conversational_retrieval_agent(llm, tools, verbose=True, max_token_limit=512)
278
- return agent_executor
 
279
 
 
257
  return ensemble_retriever
258
 
259
 
260
+ def load_conversational_retrievel_chain(retriever, llm):
261
+ '''Load Conversational Retrievel chain,'''
262
+ _template= """
263
+ You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
264
+ Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
265
+ Chat History:
266
+ {chat_history}
267
+ Follow Up Input: {question}
268
+ Standalone question:"""
269
+
270
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
271
+ memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
272
+ conversation_chain = ConversationalRetrievalChain.from_llm(
273
+ llm=st.session_state["llm"],
274
+ retriever=st.session_state["ensemble_retriver"],
275
+ condense_question_prompt=CONDENSE_QUESTION_PROMPT,
276
+ memory=memory,
277
+ verbose=True,
278
+ )
279
+ return conversation_chain
280