from langchain_core.tools import tool
from langchain_core.messages import AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import MessagesState,StateGraph,START,END
from langchain_openai import ChatOpenAI


@tool
def get_weather(location:str):
    """用户接收当前城市"""
    if location.lower() in ['sf','san francisco']:
        return "It's 16 degress and foggy"
    else:
        return "It's not 32 degress"

@tool
def get_coolest_cities():
    """获取最冷城市名称"""
    return "哈尔滨"

message_with_multiple_tool_calls = AIMessage(
    content="",
    tool_calls=[
        {
            "name": "get_weather",
            "args": {"location": "sf"},
            "id": "4343",
            "type": "tool_call"
        },
        {
            "name": "get_coolest_cities",
            "args": {},
            "id": "43431",
            "type": "tool_call"
        }
    ]
)
tools = [get_weather,get_coolest_cities]
tool_node = ToolNode(tools)

model_with_tools = ChatOpenAI(
        model="qwen-plus",  # 阿里云千问-plus模型
        openai_api_key='sk-965dc39b016c49ecbe29de180f4db2b6',
        openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
        temperature=0.7,  # 控制生成文本的随机性
        max_tokens=2048,  # 最大生成长度
    ).bind_tools(tools)

def should_continue(state: MessagesState): # 条件路由函数，判断是否继续工具调用
    messages = state["messages"]
    last_message = messages[-1] # 获取最后一个消息 (LLM 模型的输出)
    if last_message.tool_calls: # 判断最后一个消息是否包含 tool_calls (工具调用请求)
        return "tools" # 如果包含 tool_calls, 则路由到 "tools" 节点 (ToolNode)，执行工具调用
    return END # 如果不包含 tool_calls,  则路由到 END 节点，结束流程

def call_model(state: MessagesState): #  LLM 模型节点函数
    messages = state["messages"]
    response = model_with_tools.invoke(messages) # 调用 LLM 模型，生成 AI 消息 (可能包含 tool_calls)
    return {"messages": [response]} # 返回包含 AI 消息的状态更新

workflow = StateGraph(MessagesState) # 创建 StateGraph 实例，状态 Schema 为 MessagesState

workflow.add_node("agent", call_model) # 添加 LLM 模型节点，节点名为 "agent"
workflow.add_node("tools", tool_node) # 添加 ToolNode 节点，节点名为 "tools"

workflow.add_edge(START, "agent") # 定义从 START 节点到 "agent" 节点的边 (流程入口)
workflow.add_conditional_edges("agent", should_continue, ["tools", END]) #  定义条件边，根据 "agent" 节点的输出，动态路由到 "tools" 节点或 END 节点
workflow.add_edge("tools", "agent") # 定义从 "tools" 节点到 "agent" 节点的边 (ReAct 循环)

app = workflow.compile() # 编译 LangGraph 图

output = app.invoke({"messages": [{"role": "user", "content": "what is the weather in sf"}]})
print(output)