Spaces:
Sleeping
Sleeping
UPDATE: chat history retention
Browse files- functions.py +8 -5
functions.py
CHANGED
@@ -196,21 +196,24 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
|
|
196 |
retriever = ContextualCompressionRetriever(
|
197 |
base_compressor=compressor, base_retriever=retriever
|
198 |
)
|
199 |
-
|
200 |
{"context": retriever | RunnableLambda(format_docs), "question": RunnablePassthrough(), "chatHistory": RunnablePassthrough()}
|
201 |
| prompt
|
202 |
| ChatGroq(model = llmModel, temperature = 0.75, max_tokens = 512)
|
203 |
| StrOutputParser()
|
204 |
)
|
205 |
-
|
206 |
-
|
207 |
get_session_history,
|
208 |
input_messages_key = "question",
|
209 |
history_messages_key = "chatHistory"
|
210 |
)
|
211 |
-
chain = RunnablePassthrough.assign(messages_trimmed = trimMessages) |
|
212 |
return {
|
213 |
-
"output": chain.invoke(
|
|
|
|
|
|
|
214 |
}
|
215 |
|
216 |
|
|
|
196 |
retriever = ContextualCompressionRetriever(
|
197 |
base_compressor=compressor, base_retriever=retriever
|
198 |
)
|
199 |
+
baseChain = (
|
200 |
{"context": retriever | RunnableLambda(format_docs), "question": RunnablePassthrough(), "chatHistory": RunnablePassthrough()}
|
201 |
| prompt
|
202 |
| ChatGroq(model = llmModel, temperature = 0.75, max_tokens = 512)
|
203 |
| StrOutputParser()
|
204 |
)
|
205 |
+
messageChain = RunnableWithMessageHistory(
|
206 |
+
baseChain,
|
207 |
get_session_history,
|
208 |
input_messages_key = "question",
|
209 |
history_messages_key = "chatHistory"
|
210 |
)
|
211 |
+
chain = RunnablePassthrough.assign(messages_trimmed = trimMessages) | messageChain
|
212 |
return {
|
213 |
+
"output": chain.invoke(
|
214 |
+
{"question": query},
|
215 |
+
{"configurable": {"session_id": vectorstore}}
|
216 |
+
)
|
217 |
}
|
218 |
|
219 |
|