import asyncio
import os
import json
import traceback
from typing import Optional, Dict
from contextlib import AsyncExitStack

from langchain import hub
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain.chat_models import init_chat_model
from langchain_core.tools import BaseTool
from openai import OpenAI
from dotenv import load_dotenv
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_mcp_adapters.tools import load_mcp_tools

load_dotenv(override=True)

class MCPClient:
    def __init__(self):
        """管理多个 MCP 服务器的客户端"""
        self.exit_stack = AsyncExitStack()
        self.openai_api_key = os.getenv("OPENAI_API_KEY")
        self.base_url = os.getenv("BASE_URL")
        self.model = os.getenv("MODEL")
        if not self.openai_api_key:
            raise ValueError("❌ 未找到 OPENAI_API_KEY,请在 .env 文件中配置")
        # 初始化 OpenAI Client
        self.client = OpenAI(api_key=self.openai_api_key, base_url=self.base_url)
        # 存储 (server_name -> MCP ClientSession) 映射
        self.sessions: Dict[str, ClientSession] = {}
        # 存储工具信息
        self.tools_by_session: Dict[str, list] = {} # 每个 session 的 tools 列表
        self.all_tools = [] # 合并所有工具的列表
        self.servers:Dict[str, dict] # 存储所有服务器的配置

    async def connect_to_servers(self, servers: dict)->list[BaseTool]:

        client = MultiServerMCPClient(servers)
        tools = await client.get_tools()
        for tool in tools:
            print(f" - {tool.name}")

        return tools
    async def transform_json(self, json2_data):
        """
        将类似 json2 的格式转换为类似 json1 的格式,多余字段会被直接删除｡
        :param json2_data: 一个可被解释为列表的 Python 对象(或已解析的 JSON 数据)
        :return: 转换后的新列表
        """
        result = []
        for item in json2_data:
            # 确保有 "type" 和 "function" 两个关键字段
            if not isinstance(item, dict) or "type" not in item or "function" not in item:
                continue
            old_func = item["function"]
            # 确保 function 下有我们需要的关键子字段
            if not isinstance(old_func, dict) or "name" not in old_func or  "description" not in old_func:
                continue
            # 处理新 function 字段
            new_func = {
                "name": old_func["name"],
                "description": old_func["description"],
                "parameters": {}
            }

            # 读取 input_schema 并转成 parameters
            if "input_schema" in old_func and isinstance(old_func["input_schema"], dict):
                old_schema = old_func["input_schema"]
                # 新的 parameters 保留 type, properties, required 这三个字段
                new_func["parameters"]["type"] = old_schema.get("type", "object")
                new_func["parameters"]["properties"] = old_schema.get("properties", {})
                new_func["parameters"]["required"] = old_schema.get("required",[])
            new_item = {
                "type": item["type"],
                "function": new_func
            }
            result.append(new_item)
        return result
    async def _start_one_server(self, script_path: str) -> ClientSession:
        """启动单个 MCP 服务器子进程,并返回 ClientSession"""
        is_python = script_path.endswith(".py")
        is_js = script_path.endswith(".js")
        if not (is_python or is_js):
            raise ValueError("服务器脚本必须是 .py 或 .js 文件")

        command = "python" if is_python else "node"
        server_params = StdioServerParameters(
            command=command,
            args=[script_path],
            env=None
        )
        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
        read_stream, write_stream = stdio_transport
        session = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
        await session.initialize()
        return session
    async def chat_base(self, query,tools:list[BaseTool],messages) -> list:
        # messages = [{"role": "user", "content": query}]
        # response = self.client.chat.completions.create(
        #     model=self.model,
        #     messages=messages,
        #     tools=self.all_tools
        # )
        # if response.choices[0].finish_reason == "tool_calls":
        #     while True:
        #         messages = await self.create_function_response_messages(messages,response)
        #         response = self.client.chat.completions.create(
        #             model=self.model,
        #             messages=messages,
        #             tools=self.all_tools
        #         )
        #         if response.choices[0].finish_reason != "tool_calls":
        #             break
        # # return response.choices[0].message.content
        # return response


        model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
        # 3️⃣ 构造 LangChain Agent（用通用 prompt）
        prompt = hub.pull("hwchase17/openai-tools-agent")
        agent = create_openai_tools_agent(model, tools, prompt)
        agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

        response = await agent_executor.ainvoke({"input": query})
        return response["output"]

    async def create_function_response_messages(self, messages, response):
        function_call_messages = response.choices[0].message.tool_calls
        messages.append(response.choices[0].message.model_dump())
        for function_call_message in function_call_messages:
            tool_name = function_call_message.function.name
            tool_args = json.loads(function_call_message.function.arguments)

            print(f"\n\n[Calling tool {tool_name} with args {tool_args}]\n\n")

            # 运行外部函数
            function_response = await self._call_mcp_tool(tool_name, tool_args)
            content= None

            #判断 function_response的类型
            if isinstance(function_response, str):
                content = function_response
            elif isinstance(function_response, list):
                content = function_response[0].text
            else:
                if function_response.text:
                    content = function_response.text
                else:
                    content = function_response

            # 拼接消息队列
            messages.append(
                {
                    "role": "tool",
                    "content": content,
                    "tool_call_id": function_call_message.id,
                }
            )
        return messages

    async def _call_mcp_tool(self, tool_full_name: str, tool_args: dict) -> str:
        """
        根据 "serverName_toolName" 调用相应的服务器工具
        """
        parts = tool_full_name.split("_", 1) # 拆分 "weather_query_weather" ->["weather", "query_weather"]
        if len(parts) != 2:
            return f"无效的工具名称: {tool_full_name}"
        server_name, tool_name = parts
        session = self.sessions.get(server_name)
        if not session:
            return f"找不到服务器: {server_name}"
        # 执行 MCP 工具
        resp = await session.call_tool(tool_name, tool_args)
        # print(resp)
        return resp.content if resp.content else "工具执行无输出"
    async def chat_loop(self,tools:list[BaseTool]):
        print("\n🤖 多服务器 MCP + 最新 Function Calling 客户端已启动!输入 'quit' 退出｡")
        messages = []
        while True:
            query = input("\n你: ").strip()
            if query.lower() == "quit":
                break
            try:
                messages.append({"role": "user", "content": query})
                messages = messages[-20: ]
                # print(messages)
                result = await self.chat_base(query,tools,messages)
                # messages.append(response.choices[0].message.model_dump())
                # result = response.choices[0].message.content
                print(f"\nAI: {result}")
            except Exception as e:
                #打印调用堆栈
                traceback.print_exc()
                print(f"\n⚠️ 调用过程出错: {e}")

    async def cleanup(self):
        # 关闭所有资源
        await self.exit_stack.aclose()
async def main():
    # 服务器脚本
    servers = {
        "write": {
            "command":"python",
            "args":["D:/sources/AI Program/MCPProjects/JiuTian/FileMCPServer/src/FileServer.py"],
            "transport":"stdio"
        },
        "weather":  {
            "command":"python",
            "args":["D:/sources/AI Program/MCPProjects/JiuTian/WeatherInfo/weather_server/src/weahter_server.py"],
            "transport":"stdio"
        },
        "SQLServer": {
            "command":"python",
            "args":["D:/sources/AI Program/MCPProjects/JiuTian/NL2SQL/nl2sql_server/src/MCPServer.py"],
            "transport":"stdio"
        },
        "PythonServer": {
            "command":"python",
            "args":["D:/sources/AI Program/MCPProjects/JiuTian/PythonInterpreter/pythoninterpreter-mcpserver/src/MCPServer.py"],
            "transport":"stdio"
        },
        "GraphRAGServer":  {
            "command":"python",
            "args":["D:/sources/AI Program/MCPProjects/JiuTian/GraphRAG/graphrag_server/src/MCPServer.py"],
            "transport":"stdio"
        }
    }
    client = MCPClient()
    try:
        tools = await client.connect_to_servers(servers)
        await client.chat_loop(tools)
    finally:
        await client.cleanup()
if __name__ == "__main__":
    asyncio.run(main())

    # 今天北京和天津的天气，谁更热？
    # 使用本地搜索方法搜索ID3算法的相关知识。
    # 检查本地搜索的状态
    # 查询students_scores表中的所有数据
    # 将表students_scores表中的数据导出到d:\students_scores_2025-04-25_1.csv文件中。
    # 请帮我计算students_scores表中全部学生course1, course2, course3三门学科的平均分数