Spaces:
Sleeping
Sleeping
Kelechi Osuji
Define the prompt template for both general conversation and weather retrieval
e861aef
from langchain.schema import BaseMemory | |
from langchain.prompts import PromptTemplate | |
from langchain.memory import ConversationBufferMemory | |
from langchain_core.runnables.history import RunnableWithMessageHistory | |
from config import get_chat_model | |
def get_session_history(memory: BaseMemory): | |
"""Retrieve the session history from memory.""" | |
return memory.chat_memory.messages if hasattr(memory, "chat_memory") else [] | |
def get_workflow(): | |
"""Set up the chatbot workflow with memory and prompt template.""" | |
# Define the prompt template for both general conversation and weather retrieval | |
prompt_template = PromptTemplate( | |
input_variables=["input", "previous_conversation"], | |
template=""" | |
You are a helpful assistant. You should answer the user's question or have a normal conversation. If the user asks about the weather, | |
please respond with the current weather information based on their input location. Otherwise, answer to the best of your ability. | |
If the user's input is about the weather, you should respond with details about the weather. | |
For example: | |
- "What is the weather in Paris?" | |
- "How's the weather in New York?" | |
Example conversation flow: | |
User: What's the weather like today in London? | |
Assistant: Let me check the weather for you. The current weather in London is [weather details]. | |
If the input is not weather-related, just respond with a conversational response. | |
The user said: {input} | |
Previous conversation: {previous_conversation} | |
""") | |
# Create memory for conversation | |
memory = ConversationBufferMemory(memory_key="input", return_messages=True) | |
# Fetch the chat model | |
chat_model = get_chat_model() | |
# Use RunnableWithMessageHistory for session memory | |
conversation_chain = RunnableWithMessageHistory( | |
runnable=chat_model, | |
get_session_history=lambda: get_session_history(memory), | |
memory=memory, | |
verbose=True, | |
prompt=prompt_template | |
) | |
return conversation_chain | |