Update utils and app files
Browse filesRemoved agent from the chain
app.py
CHANGED
@@ -30,7 +30,7 @@ from langchain.agents.agent_toolkits import create_conversational_retrieval_agen
|
|
30 |
from langchain.utilities import SerpAPIWrapper
|
31 |
|
32 |
from utils import build_embedding_model, build_llm
|
33 |
-
from utils import load_ensemble_retriver, load_text_chunks, load_vectorstore,
|
34 |
|
35 |
load_dotenv()
|
36 |
# Getting current timestamp to keep track of historical conversations
|
@@ -57,8 +57,8 @@ if "text_chunks" not in st.session_state:
|
|
57 |
if "ensemble_retriver" not in st.session_state:
|
58 |
st.session_state["ensemble_retriver"] = load_ensemble_retriver(text_chunks=st.session_state["text_chunks"], embeddings=st.session_state["embeddings"], chroma_vectorstore=st.session_state["vector_db"] )
|
59 |
|
60 |
-
if "
|
61 |
-
st.session_state["
|
62 |
|
63 |
|
64 |
|
@@ -192,9 +192,9 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
192 |
st.write(message)
|
193 |
|
194 |
|
195 |
-
def generate_llm_response(
|
196 |
-
|
197 |
-
return
|
198 |
|
199 |
|
200 |
# User-provided prompt
|
@@ -208,10 +208,10 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
208 |
with st.chat_message("assistant"):
|
209 |
with st.spinner("Searching..."):
|
210 |
start = timeit.default_timer()
|
211 |
-
response = generate_llm_response(
|
212 |
placeholder = st.empty()
|
213 |
full_response = ''
|
214 |
-
for item in response
|
215 |
full_response += item
|
216 |
placeholder.markdown(full_response)
|
217 |
# The following logic will work in the way given below.
|
@@ -219,7 +219,7 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
219 |
# -- If not, we can conclude that, agent has used internet search as tool.
|
220 |
# -- Check if intermediary steps are present in the output of the prompt.
|
221 |
# -- If intermediary steps are present, it means agent has used exising custom knowledge base for iformation retrival and therefore we need to give souce docs as output along with LLM's reponse.
|
222 |
-
if
|
223 |
st.text("-------------------------------------")
|
224 |
docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
|
225 |
source_doc_list= []
|
@@ -237,8 +237,6 @@ if st.session_state["vector_db"] and st.session_state["llm"]:
|
|
237 |
st.write("---") # Add a separator between entries
|
238 |
message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
|
239 |
st.session_state.messages.append(message)
|
240 |
-
|
241 |
-
message = {"role": "assistant", "content": full_response, "Source":""}
|
242 |
-
st.session_state.messages.append(message)
|
243 |
end = timeit.default_timer()
|
244 |
print(f"Time to retrieve response: {end - start}")
|
|
|
30 |
from langchain.utilities import SerpAPIWrapper
|
31 |
|
32 |
from utils import build_embedding_model, build_llm
|
33 |
+
from utils import load_ensemble_retriver, load_text_chunks, load_vectorstore, load_conversational_retrievel_chain
|
34 |
|
35 |
load_dotenv()
|
36 |
# Getting current timestamp to keep track of historical conversations
|
|
|
57 |
if "ensemble_retriver" not in st.session_state:
|
58 |
st.session_state["ensemble_retriver"] = load_ensemble_retriver(text_chunks=st.session_state["text_chunks"], embeddings=st.session_state["embeddings"], chroma_vectorstore=st.session_state["vector_db"] )
|
59 |
|
60 |
+
if "conversation_chain" not in st.session_state:
|
61 |
+
st.session_state["conversation_chain"] = load_conversational_retrievel_chain(retriever=st.session_state["ensemble_retriver"], llm=st.session_state["llm"])
|
62 |
|
63 |
|
64 |
|
|
|
192 |
st.write(message)
|
193 |
|
194 |
|
195 |
+
def generate_llm_response(conversation_chain, prompt_input):
|
196 |
+
output= conversation_chain({'question': prompt_input})
|
197 |
+
return output['answer']
|
198 |
|
199 |
|
200 |
# User-provided prompt
|
|
|
208 |
with st.chat_message("assistant"):
|
209 |
with st.spinner("Searching..."):
|
210 |
start = timeit.default_timer()
|
211 |
+
response = generate_llm_response(conversation_chain=st.session_state["conversation_chain"], prompt_input=prompt)
|
212 |
placeholder = st.empty()
|
213 |
full_response = ''
|
214 |
+
for item in response:
|
215 |
full_response += item
|
216 |
placeholder.markdown(full_response)
|
217 |
# The following logic will work in the way given below.
|
|
|
219 |
# -- If not, we can conclude that, agent has used internet search as tool.
|
220 |
# -- Check if intermediary steps are present in the output of the prompt.
|
221 |
# -- If intermediary steps are present, it means agent has used exising custom knowledge base for iformation retrival and therefore we need to give souce docs as output along with LLM's reponse.
|
222 |
+
if response:
|
223 |
st.text("-------------------------------------")
|
224 |
docs= st.session_state["ensemble_retriver"].get_relevant_documents(prompt)
|
225 |
source_doc_list= []
|
|
|
237 |
st.write("---") # Add a separator between entries
|
238 |
message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
|
239 |
st.session_state.messages.append(message)
|
240 |
+
|
|
|
|
|
241 |
end = timeit.default_timer()
|
242 |
print(f"Time to retrieve response: {end - start}")
|
utils.py
CHANGED
@@ -257,23 +257,24 @@ def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
|
|
257 |
return ensemble_retriever
|
258 |
|
259 |
|
260 |
-
def
|
261 |
-
'''Load Conversational Retrievel
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
"
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
279 |
|
|
|
257 |
return ensemble_retriever
|
258 |
|
259 |
|
260 |
+
def load_conversational_retrievel_chain(retriever, llm):
|
261 |
+
'''Load Conversational Retrievel chain,'''
|
262 |
+
_template= """
|
263 |
+
You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
|
264 |
+
Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
|
265 |
+
Chat History:
|
266 |
+
{chat_history}
|
267 |
+
Follow Up Input: {question}
|
268 |
+
Standalone question:"""
|
269 |
+
|
270 |
+
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
271 |
+
memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
|
272 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
273 |
+
llm=st.session_state["llm"],
|
274 |
+
retriever=st.session_state["ensemble_retriver"],
|
275 |
+
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
276 |
+
memory=memory,
|
277 |
+
verbose=True,
|
278 |
+
)
|
279 |
+
return conversation_chain
|
280 |
|