Update tailor_chain.py
Browse files- tailor_chain.py +13 -12
tailor_chain.py
CHANGED
@@ -1,20 +1,21 @@
|
|
1 |
-
# tailor_chain.py
|
2 |
-
import os
|
3 |
from langchain.chains import LLMChain
|
4 |
-
from langchain_groq import ChatGroq
|
5 |
-
|
6 |
from prompts import tailor_prompt
|
7 |
|
8 |
def get_tailor_chain() -> LLMChain:
|
9 |
"""
|
10 |
-
|
11 |
"""
|
12 |
-
|
13 |
-
|
14 |
-
groq_api_key=os.environ["GROQ_API_KEY"]
|
15 |
-
)
|
16 |
-
chain = LLMChain(
|
17 |
-
llm=chat_groq_model,
|
18 |
prompt=tailor_prompt
|
19 |
)
|
20 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from langchain.chains import LLMChain
|
|
|
|
|
2 |
from prompts import tailor_prompt
|
3 |
|
4 |
def get_tailor_chain() -> LLMChain:
|
5 |
"""
|
6 |
+
Creates the tailor chain to simplify and personalize the assistant's responses.
|
7 |
"""
|
8 |
+
tailor_chain = LLMChain(
|
9 |
+
llm=your_llm, # Update this with your actual LLM model
|
|
|
|
|
|
|
|
|
10 |
prompt=tailor_prompt
|
11 |
)
|
12 |
+
return tailor_chain
|
13 |
+
|
14 |
+
def tailor_with_history(response: str, chat_history: list) -> str:
|
15 |
+
"""
|
16 |
+
Tailors the assistant's response based on the history context.
|
17 |
+
"""
|
18 |
+
context = "\n".join([f"User: {msg['content']}" for msg in chat_history]) + "\nAssistant: " + response
|
19 |
+
# Use the context along with the response for tailoring
|
20 |
+
tailored_response = get_tailor_chain().run({"response": context})
|
21 |
+
return tailored_response
|