import os
import asyncio
import uuid
from contextlib import asynccontextmanager
from typing import Annotated, Dict, List, Sequence, TypedDict, Any, Optional, Union

from langchain_core.messages import BaseMessage, AIMessage, HumanMessage, ToolMessage,SystemMessage
from langchain_core.tools import BaseTool
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.checkpoint.memory import MemorySaver, InMemorySaver
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langgraph.types import interrupt, Command
from langchain_openai import ChatOpenAI

# 创建LLM
model = ChatOpenAI(
    model='gpt-4o-mini',
)

class State(TypedDict):
    messages: Annotated[Sequence[BaseMessage], add_messages]
    tool_calls: Optional[List[Dict[str, Any]]]
    human_approved: Optional[bool]
    approval_count: int

# 使用MCP上下文创建图
@asynccontextmanager
async def make_graph():
    mcp_client = MultiServerMCPClient(
            {
                "tavily-mcp": {
                    "command": "npx",
                    "args": [
                        "-y",
                        "tavily-mcp"
                    ],
                    "transport": "stdio",
                    "env": {**os.environ},
                }
            }
        )
    
    def agent(state: State):
        """LLM代理节点 - 处理消息并生成工具调用或回复"""
        messages = state["messages"]
        response = llm_with_tool.invoke(messages)
        
        return {
            "messages": [response],
        }
    
        
    # 高风险工具列表
    HIGH_RISK_TOOLS = ["tavily-search", "python-repl", "shell", "file_write"]
        
    def human_approval_node(state: State):
        """人类审核节点 - 从消息中获取工具调用信息并等待人工确认"""
        # 从最新的AI消息中获取工具调用
        messages = state["messages"]
        last_message = messages[-1] if messages else None
        
        print(f"消息历史：{[msg.content for msg in messages]}")
        tool_calls = []
        if last_message and isinstance(last_message, AIMessage) and hasattr(last_message, "tool_calls") and last_message.tool_calls:
            tool_calls = last_message.tool_calls
        
        # 如果没有工具调用，自动批准
        if not tool_calls:
            return {"human_approved": True}
        tool_calls_info = []
        
        # 获取第一个工具调用信息
        tc = tool_calls[0]
        tool_id = tc.get("id", "未知工具ID")
        tool_name = tc.get("name", "未知工具")
        tool_args = tc.get("args", {})
        tool_calls_info.append(f"{tool_name}({tool_args})")
            
        print(f"工具调用信息: {tool_calls_info}")
        # 非高风险工具自动批准
        if tool_name not in HIGH_RISK_TOOLS:
            return {"human_approved": True}
        
        tool_calls_str = "\n - ".join(tool_calls_info)
        print("\n*********进入人工审核节点*********")
        print(f"AI想要使用以下工具:\n - {tool_calls_str}")
        print(f"审核次数: {state.get('approval_count', 0)}")
        
        # 中断并等待人工输入
        value = interrupt({
            "tool_calls": tool_calls_str,
            "message": "请输入 'ok' 批准工具使用，或输入 'reject' 拒绝"
        })
        
        # 更新审核计数
        approval_count = state.get('approval_count', 0) + 1
        human_approved = (value.lower() == 'ok')
        
        if not human_approved:
            # 如果人类拒绝，添加一条消息告知AI
            messages = state["messages"]
            messages.append(ToolMessage(content="工具请求被拒绝，请直接回答问题或尝试其他方法，或者拒绝回答问题。",
                                        tool_call_id=tool_id))
        
        return {
            "human_approved": human_approved,
            "approval_count": approval_count
        }

    def approval_router(state: State):
        """根据人类是否批准决定下一步"""
        if state.get("human_approved"):
            # 如果批准，使用工具
            return "tools"
        else:
            # 如果不批准，回到代理
            return "agent"

    mcp_tools = await mcp_client.get_tools()
    print(f"可用工具: {[tool.name for tool in mcp_tools]}")
    llm_with_tool = model.bind_tools(mcp_tools)

    # 编译应用并测试
    graph_builder = StateGraph(State)
    graph_builder.add_node("agent", agent)
    graph_builder.add_node("tool", ToolNode(mcp_tools))
    graph_builder.add_node("human_approval", human_approval_node)

    graph_builder.add_edge(START, "agent")

    # 决定是否需要使用工具
    graph_builder.add_conditional_edges(
        "agent",
        tools_condition,
        {
            # 如果需要工具，则进入人类审核环节
            "tools": "human_approval", 
            END: END,
        },
    )
    
    # 根据人类审核结果决定下一步操作
    graph_builder.add_conditional_edges(
        "human_approval",
        approval_router,
        {
            "tools": "tool",
            "agent": "agent"
        }
    )
    
    graph_builder.add_edge("tool", "agent")

    checkpointer = InMemorySaver()
    graph = graph_builder.compile(checkpointer=checkpointer)
    graph.name = "工具代理"

    yield graph

# 运行人类参与的工具使用流程
async def run_with_hitl(query: str):
    config = {"configurable": {"thread_id": str(uuid.uuid4())}}
    
    # 初始状态
    initial_state = {
        "messages": [SystemMessage(content="你是一个使用工具回答问题的助手，如果工具被拒绝，请不要使用。"), HumanMessage(content=query)], 
        "tool_calls": None, 
        "human_approved": None, 
        "approval_count": 0
    }
    
    print("=== MCP工具使用人类参与演示 ===")
    
    async with make_graph() as graph:
        
        # 开始流程
        result = await graph.ainvoke(initial_state, config=config)
        
        # 处理可能的中断
        while "__interrupt__" in result:
            interrupt_info = result["__interrupt__"][0].value
            print(f"\n{interrupt_info.get('message')}: ", end="")
            user_input = input()
            
            # 恢复执行
            result = await graph.ainvoke(
                Command(resume=user_input),
                config=config
            )
        
        # 打印最终结果
        if result.get("messages"):
            print("\n=== 最终回答 ===")
            print(result['messages'][-1].content)
        
        return result

# 主函数
async def main():
    await run_with_hitl("搜索下黑神话悟空的最新消息")

# 运行主函数
if __name__ == "__main__":
    asyncio.run(main())