import os
import operator
import asyncio
from dotenv import load_dotenv
from typing import Annotated, Any, TypedDict

from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langchain_core.tools import tool
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_openai import ChatOpenAI

from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
# 1. 核心修改：导入 AsyncSqliteSaver
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver

# --- 环境、工具、状态、节点定义 (与 tutorial_8 完全相同) ---
load_dotenv()
try:
    risky_search_tool = TavilySearchResults(max_results=2)
except ImportError:
    from langchain_community.tools import DuckDuckGoSearchRun
    risky_search_tool = DuckDuckGoSearchRun()
@tool
def python_calculator(expression: str) -> str:
    """一个可以执行 Python 数学表达式的计算器。"""
    try:
        result = eval(expression, {"__builtins__": None}, {})
        return f"表达式 '{expression}' 的计算结果是: {result}"
    except Exception as e:
        return f"执行表达式 '{expression}' 时出错: {e}"
safe_tools = [python_calculator]
risky_tools = [risky_search_tool]
all_tools = safe_tools + risky_tools
class AgentState(TypedDict):
    messages: Annotated[list[AnyMessage], operator.add]
    iteration_count: int
llm = ChatOpenAI(model="qwen-plus-latest", base_url=os.getenv("OPENAI_BASE_URL"))
model_with_tools = llm.bind_tools(all_tools)
async def agent_node(state: AgentState):
    print("\n---AGENT: 思考中...---")
    response = await model_with_tools.ainvoke(state["messages"])
    return {"messages": [response], "iteration_count": state.get("iteration_count", 0) + 1}
safe_tool_node = ToolNode(safe_tools)
risky_tool_node = ToolNode(risky_tools)
MAX_ITERATIONS = 5
async def router_node(state: AgentState) -> str:
    print("---ROUTER: 决策中...---")
    if state["iteration_count"] > MAX_ITERATIONS:
        print(f"已达到最大循环次数 {MAX_ITERATIONS}，流程终止。")
        return END
    last_message = state["messages"][-1]
    if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
        return END
    tool_name = last_message.tool_calls[0]["name"]
    print(f"ROUTER: 计划调用工具 '{tool_name}'")
    if tool_name in [t.name for t in safe_tools]:
        return "safe_tools"
    else:
        return "risky_tools"

# --- 构建图 (与 tutorial_8 完全相同) ---
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent_node)
workflow.add_node("safe_tools", safe_tool_node)
workflow.add_node("risky_tools", risky_tool_node)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", router_node, {"safe_tools": "safe_tools", "risky_tools": "risky_tools", END: END})
workflow.add_edge("safe_tools", "agent")
workflow.add_edge("risky_tools", "agent")

# --- 2. 编译图的步骤将被移至 main 函数中 ---

# --- 3. 持久化交互演示 ---
async def run_agent(app, thread_id: str, question: str):
    """一个封装好的函数，用于运行代理并处理中断。"""
    
    # 3a. 配置 `thread_id`
    # 这是关键！`thread_id` 告诉 LangGraph 我们要处理哪个对话。
    # 所有使用相同 `thread_id` 的调用都会共享同一个状态历史。
    config = {"configurable": {"thread_id": thread_id}}
    
    inputs = {
        "messages": [HumanMessage(content=question)],
        "iteration_count": 0
    }
    
    print(f"\n--- 开始新一轮执行 (Thread ID: {thread_id}) ---")
    
    async for event in app.astream_events(inputs, config=config, version="v2"):
        kind = event["event"]
        if kind == "on_chat_model_stream":
            content = event["data"]["chunk"].content
            if content:
                print(content, end="", flush=True)

    snapshot = await app.aget_state(config)
    if snapshot.next:
        print(f"\n\n--- 流程暂停 (Thread ID: {thread_id}) ---")
        print(f"下一步计划: {snapshot.next[0]}")
    else:
        print("\n\n--- 流程已完成！---")
        final_message = snapshot.values["messages"][-1]
        print("最终答案:")
        final_message.pretty_print()

async def resume_agent(app, thread_id: str):
    """一个封装好的函数，用于恢复中断的代理。"""
    config = {"configurable": {"thread_id": thread_id}}
    
    snapshot = await app.aget_state(config)
    if not snapshot.next:
        print("--- 该对话已完成，无需恢复 ---")
        return
        
    print(f"\n--- 恢复执行 (Thread ID: {thread_id}) ---")
    
    async for event in app.astream_events(None, config=config, version="v2"):
        kind = event["event"]
        if kind == "on_chat_model_stream":
            content = event["data"]["chunk"].content
            if content:
                print(content, end="", flush=True)

    snapshot = await app.aget_state(config)
    if not snapshot.next:
        print("\n\n--- 流程已完成！---")
        final_message = snapshot.values["messages"][-1]
        print("最终答案:")
        final_message.pretty_print()

async def main():
    # 核心修正: 使用 async with 语句正确管理异步的数据库连接
    async with AsyncSqliteSaver.from_conn_string("langgraph_tutorial.db") as checkpointer:
        
        # 在 with 代码块内部编译应用，确保 checkpointer 是有效的
        app = workflow.compile(
            checkpointer=checkpointer,
            interrupt_before=["risky_tools"]
        )

        # 场景 1: 用户 "Alice" 开始一个任务，但中途离开
        print("===== 场景 1: Alice 开始任务并暂停 =====")
        alice_thread_id = "alice_session_14"
        await run_agent(
            app,  # 将编译好的 app 传入
            alice_thread_id,
            "你好，请帮我搜索 LangGraph 的 GitHub 星标数。"
        )
        
        # 场景 2: 用户 "Bob" 开始一个完全不同的任务，并让其完成
        print("\n\n===== 场景 2: Bob 开始并完成他的任务 =====")
        bob_thread_id = "bob_session_14"
        await run_agent(
            app, # 将编译好的 app 传入
            bob_thread_id,
            "你好，请用计算器计算 1024 * 3 等于多少。"
        )
        
        # 场景 3: Alice 回来了，我们恢复她的任务
        print("\n\n===== 场景 3: Alice 回来，我们恢复并完成她的任务 =====")
        # 注意，我们不需要再次提供初始问题，只需提供 thread_id
        # 因为状态已经保存在 langgraph_tutorial.db 中了
        await resume_agent(app, alice_thread_id)

if __name__ == "__main__":
    # 在运行脚本前，可以尝试删除 langgraph_tutorial.db 文件，观察它的自动创建。
    asyncio.run(main()) 