from langdev_helper.llm.lcex import llm_lcex as model

from langgraph.graph import MessagesState, START
from langgraph.graph import END, StateGraph
from langgraph.checkpoint.memory import MemorySaver

from tool import tools, tool_node

model = model.bind_tools(tools)

def should_continue(state):
    messages = state["messages"]
    last_message = messages[-1]
    if not last_message.tool_calls:
        return "end"
    else:
        return "continue"

def call_model(state):
    messages = state["messages"]
    response = model.invoke(messages)
    return {"messages": [response]}

workflow = StateGraph(MessagesState)

workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)

workflow.add_edge(START, "agent")

workflow.add_conditional_edges(
    "agent",
    should_continue,
    {
        "continue": "action",
        "end": END,
    },
)

workflow.add_edge("action", "agent")

memory = MemorySaver()
app = workflow.compile(checkpointer=memory, interrupt_before=["action"])
print(app.get_graph().draw_mermaid())
