Spaces:
Sleeping
Sleeping
changed chain and system prompt
Browse files- app.py +3 -1
- utils/chain.py +10 -5
app.py
CHANGED
@@ -122,7 +122,7 @@ async def main(message: cl.Message):
|
|
122 |
memory: ConversationBufferMemory = tools['memory']
|
123 |
|
124 |
# using query search for ArXiv documents and index files(on message)
|
125 |
-
await cl.make_async(search_and_index)(message=message, quantity=
|
126 |
|
127 |
text_field = "source_document"
|
128 |
index = pinecone.Index(INDEX_NAME)
|
@@ -145,6 +145,8 @@ async def main(message: cl.Message):
|
|
145 |
async for chunk in retrieval_augmented_qa_chain.astream({"question": f"{message.content}", "chat_history": memory.buffer_as_messages}):
|
146 |
if res:= chunk.get('response'):
|
147 |
await sys_message.stream_token(res.content)
|
|
|
|
|
148 |
await sys_message.send()
|
149 |
|
150 |
memory.chat_memory.add_user_message(message.content)
|
|
|
122 |
memory: ConversationBufferMemory = tools['memory']
|
123 |
|
124 |
# using query search for ArXiv documents and index files(on message)
|
125 |
+
await cl.make_async(search_and_index)(message=message, quantity=10, embedder=embedder, index=index)
|
126 |
|
127 |
text_field = "source_document"
|
128 |
index = pinecone.Index(INDEX_NAME)
|
|
|
145 |
async for chunk in retrieval_augmented_qa_chain.astream({"question": f"{message.content}", "chat_history": memory.buffer_as_messages}):
|
146 |
if res:= chunk.get('response'):
|
147 |
await sys_message.stream_token(res.content)
|
148 |
+
if chunk.get("context"):
|
149 |
+
pprint(chunk.get("context"))
|
150 |
await sys_message.send()
|
151 |
|
152 |
memory.chat_memory.add_user_message(message.content)
|
utils/chain.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
from operator import itemgetter
|
2 |
from langchain_core.vectorstores import VectorStoreRetriever
|
3 |
from langchain.schema.runnable import RunnableLambda, RunnableParallel, RunnableSequence
|
4 |
-
from langchain.chat_models import ChatOpenAI
|
5 |
from langchain.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
6 |
from langchain_core.documents import Document
|
7 |
from langchain_core.messages.ai import AIMessage
|
@@ -22,13 +22,17 @@ User question: {question}
|
|
22 |
prompt = PromptTemplate.from_template(template=template)
|
23 |
chat_prompt = ChatPromptTemplate.from_messages([
|
24 |
("system", """
|
25 |
-
You are a helpful assistant, your job is to answer the user's question using the relevant context
|
|
|
|
|
26 |
=========
|
27 |
CONTEXT:
|
28 |
{context}
|
29 |
=========
|
|
|
|
|
30 |
"""),
|
31 |
-
|
32 |
("human", "{question}")
|
33 |
])
|
34 |
|
@@ -52,18 +56,19 @@ def create_chain(**kwargs) -> RunnableSequence:
|
|
52 |
"""
|
53 |
|
54 |
retriever: VectorStoreRetriever = kwargs["retriever"]
|
55 |
-
llm:
|
56 |
|
57 |
|
58 |
if not isinstance(retriever, VectorStoreRetriever):
|
59 |
raise ValueError
|
60 |
-
if not isinstance(llm,
|
61 |
raise ValueError
|
62 |
|
63 |
docs_chain = (itemgetter("question") | retriever).with_config(config={"run_name": "docs"})
|
64 |
self_knowledge_chain = (itemgetter("question") | llm | to_doc).with_config(config={"run_name": "self knowledge"})
|
65 |
response_chain = (chat_prompt | llm).with_config(config={"run_name": "response"})
|
66 |
merge_docs_link = RunnableLambda(merge_docs).with_config(config={"run_name": "merge docs"})
|
|
|
67 |
context_chain = (
|
68 |
RunnableParallel(
|
69 |
{
|
|
|
1 |
from operator import itemgetter
|
2 |
from langchain_core.vectorstores import VectorStoreRetriever
|
3 |
from langchain.schema.runnable import RunnableLambda, RunnableParallel, RunnableSequence
|
4 |
+
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
|
5 |
from langchain.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
6 |
from langchain_core.documents import Document
|
7 |
from langchain_core.messages.ai import AIMessage
|
|
|
22 |
prompt = PromptTemplate.from_template(template=template)
|
23 |
chat_prompt = ChatPromptTemplate.from_messages([
|
24 |
("system", """
|
25 |
+
You are a helpful assistant, your job is to answer the user's question using the relevant context in the context section and in the conversation history.
|
26 |
+
Make sure to relate the question to the conversation history and the context in the context section. If the question, the context and the conversation history
|
27 |
+
does not align please let the user know about this and ask for further clarification.
|
28 |
=========
|
29 |
CONTEXT:
|
30 |
{context}
|
31 |
=========
|
32 |
+
PREVIOUS CONVERSATION HISTORY:
|
33 |
+
{chat_history}
|
34 |
"""),
|
35 |
+
# MessagesPlaceholder(variable_name="chat_history"),
|
36 |
("human", "{question}")
|
37 |
])
|
38 |
|
|
|
56 |
"""
|
57 |
|
58 |
retriever: VectorStoreRetriever = kwargs["retriever"]
|
59 |
+
llm: AzureChatOpenAI = kwargs.get("llm", None)
|
60 |
|
61 |
|
62 |
if not isinstance(retriever, VectorStoreRetriever):
|
63 |
raise ValueError
|
64 |
+
if not isinstance(llm, AzureChatOpenAI):
|
65 |
raise ValueError
|
66 |
|
67 |
docs_chain = (itemgetter("question") | retriever).with_config(config={"run_name": "docs"})
|
68 |
self_knowledge_chain = (itemgetter("question") | llm | to_doc).with_config(config={"run_name": "self knowledge"})
|
69 |
response_chain = (chat_prompt | llm).with_config(config={"run_name": "response"})
|
70 |
merge_docs_link = RunnableLambda(merge_docs).with_config(config={"run_name": "merge docs"})
|
71 |
+
|
72 |
context_chain = (
|
73 |
RunnableParallel(
|
74 |
{
|