Update utils.py
Browse files
utils.py
CHANGED
@@ -93,6 +93,64 @@ def get_qachain(llm_name = "gpt-3.5-turbo-0125", chain_type = "stuff", retriever
|
|
93 |
retriever=retriever,
|
94 |
return_source_documents=return_source_documents)
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
def process_llm_response(llm_response):
|
97 |
print(llm_response['result'])
|
98 |
print('\n\nSources:')
|
@@ -105,3 +163,4 @@ def process_llm_response(llm_response):
|
|
105 |
|
106 |
|
107 |
|
|
|
|
93 |
retriever=retriever,
|
94 |
return_source_documents=return_source_documents)
|
95 |
|
96 |
+
|
97 |
+
def summarize_messages(demo_ephemeral_chat_history, llm):
|
98 |
+
stored_messages = demo_ephemeral_chat_history.messages
|
99 |
+
if len(stored_messages) == 0:
|
100 |
+
return False
|
101 |
+
summarization_prompt = ChatPromptTemplate.from_messages(
|
102 |
+
[
|
103 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
104 |
+
(
|
105 |
+
"user", os.environ['SUMARY_MESSAGE_PROMPT'],
|
106 |
+
),
|
107 |
+
]
|
108 |
+
)
|
109 |
+
summarization_chain = summarization_prompt | llm
|
110 |
+
|
111 |
+
summary_message = summarization_chain.invoke({"chat_history": stored_messages})
|
112 |
+
|
113 |
+
demo_ephemeral_chat_history.clear()
|
114 |
+
|
115 |
+
demo_ephemeral_chat_history.add_message(summary_message)
|
116 |
+
|
117 |
+
return demo_ephemeral_chat_history
|
118 |
+
|
119 |
+
def get_question_from_summarize(summary, question, llm):
|
120 |
+
new_qa_prompt = ChatPromptTemplate.from_messages([
|
121 |
+
("system", os.environ['NEW_QUESTION_PROMPT']),
|
122 |
+
("human",
|
123 |
+
'''
|
124 |
+
Sumary: {summary}
|
125 |
+
Question: {question}
|
126 |
+
Output:
|
127 |
+
'''
|
128 |
+
)
|
129 |
+
]
|
130 |
+
)
|
131 |
+
|
132 |
+
new_qa_chain = new_qa_prompt | llm
|
133 |
+
return new_qa_chain.invoke({'summary': summary, 'question': question}).content
|
134 |
+
|
135 |
+
def get_final_answer(question, context, chat_history, prompt, llm):
|
136 |
+
qa_prompt = ChatPromptTemplate.from_messages(
|
137 |
+
[
|
138 |
+
MessagesPlaceholder("chat_history"),
|
139 |
+
("system", prompt),
|
140 |
+
("human", '''
|
141 |
+
Context: {context}
|
142 |
+
Question: {question}
|
143 |
+
Output:
|
144 |
+
'''),
|
145 |
+
]
|
146 |
+
)
|
147 |
+
|
148 |
+
answer_chain = qa_prompt | llm
|
149 |
+
|
150 |
+
answer = answer_chain.invoke({'question': question, 'context': context, 'chat_history': chat_history})
|
151 |
+
|
152 |
+
return answer.content
|
153 |
+
|
154 |
def process_llm_response(llm_response):
|
155 |
print(llm_response['result'])
|
156 |
print('\n\nSources:')
|
|
|
163 |
|
164 |
|
165 |
|
166 |
+
|