from langchain_core.messages import AIMessage
from langgraph.prebuilt import ToolNode
from langchain_core.tools import tool
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, MessagesState, START, END

from langchain_ollama import ChatOllama

llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

# Define tools
@tool
def get_weather(location: str):
    """Call to get the current weather."""
    if location.lower() in ["sf", "san francisco"]:
        return "It's 60 degrees and foggy."
    else:
        return "It's 90 degrees and sunny."

def get_coolest_cities():
    """Get a list of coolest cities"""
    return "nyc, sf"

@tool
def get_weather1(location: str, config: RunnableConfig):
    """Call to get the current weather."""
    user_id = config["configurable"].get("user_id")
    print(f"User ID: {user_id}")
    if location.lower() in ["sf", "san francisco"]:
        return "It's 60 degrees and foggy."
    else:
        return "It's 90 degrees and sunny."

# case 1: Single tool call example

# tool_node = ToolNode([get_weather])
# message_with_single_tool_call = AIMessage(
#     content="",
#     tool_calls=[
#         {
#             "name": "get_weather",
#             "args": {"location": "sf"},
#             "id": "tool_call_id",
#             "type": "tool_call",
#         }
#     ],
# )
# result = tool_node.invoke({"messages": [message_with_single_tool_call]})
# print(result) 

# case 2: Multiple tool calls example
# tool_node = ToolNode([get_weather, get_coolest_cities])
# message_with_multiple_tool_calls = AIMessage(
#     content="",
#     tool_calls=[
#         {
#             "name": "get_coolest_cities",
#             "args": {},
#             "id": "tool_call_id_1",
#             "type": "tool_call",
#         },
#         {
#             "name": "get_weather",
#             "args": {"location": "sf"},
#             "id": "tool_call_id_2",
#             "type": "tool_call",
#         },
#     ],
# )
# result = tool_node.invoke({"messages": [message_with_multiple_tool_calls]})
# print(result)

# case 3: Use toolnode with a chat model
# tool_node = ToolNode([get_weather])
# model_with_tools = llm.bind_tools([get_weather])  
# response_message = model_with_tools.invoke("what's the weather in sf?")
# result = tool_node.invoke({"messages": [response_message]})
# print(result)

# case 4: Use toolnode in a tool-calling agent and with static config
tool_node = ToolNode([get_weather1])
model_with_tools = llm.bind_tools([get_weather1])

def should_continue(state: MessagesState):
    messages = state["messages"]
    last_message = messages[-1] # 获取列表最后一个元素的写法
    if last_message.tool_calls:
        return "tools"
    return END

def call_model(state: MessagesState):
    messages = state["messages"]
    response = model_with_tools.invoke(messages)
    return {"messages": [response]}

builder = StateGraph(MessagesState)

# Define the two nodes we will cycle between
builder.add_node("call_model", call_model)
builder.add_node("tools", tool_node)
builder.add_edge(START, "call_model")
builder.add_conditional_edges("call_model", should_continue, ["tools", END])
builder.add_edge("tools", "call_model")
graph = builder.compile()
result = graph.invoke(
    input={"messages": [{"role": "user", "content": "what's the weather in sf?"}]},
    config={"configurable": {"user_id": "user_7798"}}
)
print(result)
