Rauhan commited on
Commit
d912ba1
1 Parent(s): 6e09a79

UPDATE: chat history retention

Browse files
Files changed (1) hide show
  1. functions.py +3 -2
functions.py CHANGED
@@ -180,6 +180,7 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
180
  global prompt
181
  global client
182
  global embeddings
 
183
  vectorstore = QdrantVectorStore.from_existing_collection(
184
  embedding = embeddings,
185
  collection_name=vectorstore,
@@ -197,7 +198,7 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
197
  base_compressor=compressor, base_retriever=retriever
198
  )
199
  baseChain = (
200
- {"context": retriever | RunnableLambda(format_docs), "question": RunnablePassthrough(), "chatHistory": RunnablePassthrough()}
201
  | prompt
202
  | ChatGroq(model = llmModel, temperature = 0.75, max_tokens = 512)
203
  | StrOutputParser()
@@ -212,7 +213,7 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
212
  return {
213
  "output": chain.invoke(
214
  {"question": query},
215
- {"configurable": {"session_id": vectorstore}}
216
  )
217
  }
218
 
 
180
  global prompt
181
  global client
182
  global embeddings
183
+ vectorStoreName = vectorstore
184
  vectorstore = QdrantVectorStore.from_existing_collection(
185
  embedding = embeddings,
186
  collection_name=vectorstore,
 
198
  base_compressor=compressor, base_retriever=retriever
199
  )
200
  baseChain = (
201
+ {"context": RunnableLambda(lambda x: x["question"]) | retriever | RunnableLambda(format_docs), "question": RunnablePassthrough(), "chatHistory": RunnablePassthrough()}
202
  | prompt
203
  | ChatGroq(model = llmModel, temperature = 0.75, max_tokens = 512)
204
  | StrOutputParser()
 
213
  return {
214
  "output": chain.invoke(
215
  {"question": query},
216
+ {"configurable": {"session_id": vectorStoreName}}
217
  )
218
  }
219