receptionist-agent / src /receptionist_agent.py
datasaur-dev's picture
add agent implementation
a759596
from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import StateGraph, END, MessagesState
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode
from langchain_core.messages import HumanMessage
from dotenv import load_dotenv
from tools import transfer_call, leave_message, go_to
from prompts import system_prompt, system_prompt_original
load_dotenv()
llm = ChatOpenAI(
model="gpt-4.1",
temperature=0.3,
)
# 1. Define the tools
tools = [transfer_call, leave_message, go_to]
tool_node = ToolNode(tools)
# 2. Create the graph
graph = StateGraph(MessagesState)
# 3. Define a new prompt
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt_original,
),
("placeholder", "{messages}"),
]
)
# 4. Define the agent
agent = prompt | llm.bind_tools(tools)
# 5. Define the nodes
def agent_node(state: MessagesState):
response = agent.invoke(state)
return {"messages": [response]}
# 6. Define the edges
def should_continue(state: MessagesState):
last_message = state["messages"][-1]
if last_message.tool_calls:
return "continue"
return "end"
# 7. Build the graph
graph.add_node("agent", agent_node)
graph.add_node("action", tool_node)
graph.set_entry_point("agent")
graph.add_conditional_edges(
"agent",
should_continue,
{
"continue": "action",
"end": END,
},
)
graph.add_edge("action", "agent")
# 8. Compile the graph
app = graph.compile()
# This is a simplified conversation loop for demonstration.
def get_agent_response(conversation: list):
response = app.invoke({"messages": conversation})
return response["messages"][len(conversation) :]