import asyncio
import json
import os
import time
from typing import List, Dict, Any
from mcp import ClientSession
from mcp.client.stdio import stdio_client, StdioServerParameters
from openai import AsyncOpenAI
import tkinter as tk
from tkinter import messagebox
import asyncio, json, time, random
from typing import List, Dict, Any
from openai import AsyncOpenAI, APIStatusError



# ---------- 配置 ----------

CONFIG_PATH = "mcp.json"
# 读取Config.json
with open("Config.json", "r", encoding="utf-8") as f:
    config = json.load(f)
    MOONSHOT_API_KEY = config["api_key"]
    MODEL_NAME = config["model"]
    BASEURL = config["base_url"]
    Rules = config["Rules"]

MAX_RETRIES = 5         # 最大重试次数
BASE_DELAY   = 20        # 基础退火时间（秒）

client = AsyncOpenAI(
    api_key=MOONSHOT_API_KEY,
    base_url=BASEURL
)

# ---------- 以下保持原样 ----------
# 定义一个函数，用于加载mcp配置
def load_mcp_config() -> Dict[str, StdioServerParameters]:
    # 打开配置文件
    with open(CONFIG_PATH, "r", encoding="utf-8") as f:
        # 加载配置文件
        cfg = json.load(f)
    # 创建一个空字典，用于存储服务器参数
    servers = {}
    # 遍历配置文件中的mcpServers
    for name, info in cfg["mcpServers"].items():
        # 将服务器参数存储到字典中
        servers[name] = StdioServerParameters(
            command=info["command"],
            args=info.get("args", []),
            env=info.get("env")
        )
    # 返回服务器参数字典
    #print("配置文件："+json.dumps(servers))
    return servers

def ask_permission(tool_name: str, tool_args: dict) -> bool:
    """返回 True 表示用户授权"""
    root = tk.Tk()
    root.withdraw()
    root.lift()
    root.attributes("-topmost", True)
    ok = messagebox.askyesno(
        title="AI 工具调用请求",
        message=f"AI 即将调用工具：{tool_name}\n\n参数：{json.dumps(tool_args, ensure_ascii=False, indent=2)}\n\n是否允许？",
        default=messagebox.YES,
    )
    root.destroy()
    return ok

async def invoke_tool_with_confirm(tool_name: str, arguments: dict) -> str:
    """包装器：先弹窗确认，再真正调用工具"""
    loop = asyncio.get_running_loop()
    ok = await loop.run_in_executor(None, ask_permission, tool_name, arguments)
    if not ok:
        return "用户拒绝执行该工具"
    return await invoke_tool(tool_name, arguments)     # 原来的 invoke_tool

# 定义一个异步函数，用于收集工具
async def collect_tools() -> List[Dict[str, Any]]:
    tools = []
    for _, params in load_mcp_config().items():
        async with stdio_client(params) as (read, write):
            async with ClientSession(read, write) as session:
                await session.initialize()

                # FastMCP 2.0 返回的是 List[Tool]
                for item in await session.list_tools():
                    #print("当前项",item)
                    if item[0]=='tools':
                        # FastMCP 2.0 返回的是 Tool 对象
                        for tool in item[1]:
                            tools.append({
                                "type": "function",
                                "function": {
                                    "name": tool.name,
                                    "description": tool.description or "",
                                    "parameters": tool.inputSchema or {}
                                }
                            })
    #print(tools)
    return tools

# 定义一个异步函数，用于调用工具
async def invoke_tool(tool_name: str, arguments: dict) -> str:
    if tool_name not in TOOL_ROUTE:
        raise RuntimeError("工具未找到：" + tool_name)

    params = TOOL_ROUTE[tool_name]
    async with stdio_client(params) as (read, write):
        async with ClientSession(read, write) as session:
            await session.initialize()
            result = await session.call_tool(tool_name, arguments)
            if result.isError:
                raise RuntimeError("".join(c.text for c in result.content))
            return "".join(c.text for c in result.content)

# --------------------------------------------------
# 递归对话轮次
# --------------------------------------------------
async def chat_turn(
    messages: List[Dict[str, Any]],
    tools: List[Dict[str, Any]],
    retries: int = 0
) -> List[Dict[str, Any]]:
    """
    完成一次“可能带工具调用”的对话轮次。
    返回更新后的 messages（已把 AI 回复、工具结果、最终总结全部 append 进去）。
    """
    try:
        response = await client.chat.completions.create(
            model=MODEL_NAME,
            messages=messages,
            tools=tools,
            tool_choice="auto"
        )
    except APIStatusError as e:
        # 被限流或其他 5xx
        if e.status_code in (429, 500, 502, 503, 504) and retries < MAX_RETRIES:
            delay = BASE_DELAY * (2 ** retries) + random.uniform(0, 1)
            print(f"[限流/错误] 等待 {delay:.1f}s 后重试({retries+1}/{MAX_RETRIES})")
            await asyncio.sleep(delay)
            return await chat_turn(messages, tools, retries + 1)
        raise

    msg = response.choices[0].message
    messages.append(msg)  # 先把 AI 的回复/工具调用塞进历史

    # 1. 有工具调用 -> 执行后把结果写回，再递归
    if msg.tool_calls:
        for call in msg.tool_calls:
            print(f"🛠️ 调用工具 {call.function.name} 参数：{call.function.arguments}")
            result = await invoke_tool_with_confirm(
                call.function.name,
                json.loads(call.function.arguments)
            )
            messages.append({
                "role": "tool",
                "content": result,
                "tool_call_id": call.id
            })
            print(f"📦 工具返回：{result}")

        # 让 AI 继续思考下一步
        return await chat_turn(messages, tools)

    # 2. 没有工具调用，直接给出自然语言答案
    print("📢 AI：", msg.content)
    return messages

# --------------------------------------------------
# 主循环
# --------------------------------------------------
async def chat_loop():
    tools = await collect_tools()
    print(Rules)
    messages: List[Dict[str, Any]] = Rules
    print("实际发现工具：", [t["function"]["name"] for t in tools])
    print("🤖 已加载 MCP 工具，输入 exit 退出对话\n")

    while True:
        user = input("👤 你：").strip()
        if user.lower() in {"exit", "quit"}:
            break
        messages.append({"role": "user", "content": user})

        # 一轮递归对话，直到 AI 给出最终自然语言回复
        messages = await chat_turn(messages, tools)

# 全局变量用于保存对话状态
chat_state = {
    "tools": None,
    "client": None,
    "messages": None,
    "tool_route_built": False
}


async def chat(user_input: str) -> str:
    global chat_state

    # 首次调用时需要初始化的部分
    if not chat_state["tool_route_built"]:
        print("首次调用，构建工具路由...")
        await build_tool_route()
        print("工具路由构建完成")
        chat_state["tool_route_built"] = True

    if chat_state["tools"] is None:
        print("收集工具...")
        chat_state["tools"] = await collect_tools()
        print(f"工具收集完成，共 {len(chat_state['tools'])} 个工具")

    if chat_state["client"] is None:
        print("初始化 AsyncOpenAI 客户端...")
        chat_state["client"] = AsyncOpenAI(
            api_key=MOONSHOT_API_KEY,
            base_url=BASEURL
        )
        print("AsyncOpenAI 客户端初始化完成")

    if chat_state["messages"] is None:
        print("初始化消息列表...")
        chat_state["messages"] = Rules
        print("消息列表初始化完成")

    tools = chat_state["tools"]
    client = chat_state["client"]
    messages = chat_state["messages"]

    # 添加用户输入
    print(f"添加用户输入: {user_input}")
    messages.append({"role": "user", "content": user_input})

    # 递归对话处理
    async def recursive_turn(messages: list, tools: list, depth: int = 0) -> list:
        """递归处理对话轮次（含工具调用）"""
        # 防止无限递归
        if depth > 10:
            messages.append({
                "role": "system",
                "content": "警告：达到最大递归深度，停止继续处理工具调用"
            })
            return messages

        try:
            # 调用模型（带退火重试机制）
            for retry in range(MAX_RETRIES + 1):
                try:
                    response = await client.chat.completions.create(
                        model=MODEL_NAME,
                        messages=messages,
                        tools=tools,
                        tool_choice="auto"
                    )
                    break
                except APIStatusError as e:
                    if retry < MAX_RETRIES and e.status_code in (429, 500, 502, 503, 504):
                        delay = BASE_DELAY * (2 ** retry) + random.uniform(0, 1)
                        print(f"[限流/错误] 等待 {delay:.1f}s 后重试({retry + 1}/{MAX_RETRIES})")
                        await asyncio.sleep(delay)
                    else:
                        raise

        except Exception as e:
            error_msg = f"模型调用失败: {str(e)}"
            messages.append({"role": "system", "content": error_msg})
            return messages

        msg = response.choices[0].message
        # 将消息转换为字典格式，这样我们可以在处理时保持一致
        message_dict = {
            "role": msg.role,
            "content": msg.content,
            "tool_calls": msg.tool_calls
        }
        messages.append(message_dict)  # 将回复添加到消息历史

        # 处理工具调用
        if message_dict.get("tool_calls"):
            tool_calls = message_dict["tool_calls"]
            for call in tool_calls:
                print(f"调用工具: {call.function.name}")
                try:
                    result = await invoke_tool_with_confirm(
                        call.function.name,
                        json.loads(call.function.arguments)
                    )
                    print(f"工具返回结果，长度: {len(str(result))}")
                    messages.append({
                        "role": "tool",
                        "content": result,
                        "tool_call_id": call.id
                    })
                except Exception as e:
                    error_msg = f"工具调用失败: {str(e)}"
                    messages.append({"role": "system", "content": error_msg})

            # 继续递归处理工具调用的结果
            return await recursive_turn(messages, tools, depth + 1)

        return messages

    # 进行递归对话处理
    messages = await recursive_turn(messages, tools)

    # 提取AI的最后一条自然语言回复
    ai_reply = ""
    # 反向遍历消息，找到最后一条助手消息
    for msg in reversed(messages):
        if isinstance(msg, dict) and msg.get("role") == "assistant" and "content" in msg and msg["content"]:
            ai_reply = msg["content"]
            break
        elif hasattr(msg, "role") and msg.role == "assistant" and hasattr(msg, "content") and msg.content:
            # 处理直接的消息对象
            ai_reply = msg.content
            break

    # 如果没找到自然语言回复，尝试处理工具返回的内容
    if not ai_reply:
        for msg in reversed(messages):
            if isinstance(msg, dict) and msg.get("role") == "tool" and "content" in msg:
                ai_reply = f"工具执行结果:\n{msg['content']}"
                break
            elif hasattr(msg, "role") and msg.role == "tool" and hasattr(msg, "content"):
                ai_reply = f"工具执行结果:\n{msg.content}"
                break

    return ai_reply or "无法生成回复，请检查工具状态"

# 缓存：tool_name -> server_params
TOOL_ROUTE: Dict[str, Any] = {}

async def build_tool_route() -> None:
    """启动时扫描全部 server，建立 tool -> params 的映射"""
    for name, params in load_mcp_config().items():
        try:
            async with stdio_client(params) as (read, write):
                async with ClientSession(read, write) as session:
                    await session.initialize()
                    for tool in (await session.list_tools()).tools:
                        TOOL_ROUTE[tool.name] = params   # 记录到全局
        except Exception as e:
            print(f"扫描 server {name} 失败: {e}")
    print(TOOL_ROUTE)

async def main():
    await build_tool_route()
    await chat_loop()

if __name__ == "__main__":
    asyncio.run(main())
