import asyncio
import os
import json
from typing import Any, Optional, Dict
from openai import OpenAI
from dotenv import load_dotenv
from contextlib import AsyncExitStack
from openai.types.chat import ChatCompletionMessageParam
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client # 导入 ChatCompletionMessageParam 类型
from dashscope import MultiModalConversation

class MCPClient:
    def __init__(self):
        """初始化 MCP 客户端"""
        self.exit_stack = AsyncExitStack()
        self.openai_api_key = os.getenv("OPENAI_API_KEY")  # 读取 OpenAI API Key
        self.base_url = os.getenv("BASE_URL")  # 读取 BASE YRL
        self.model = os.getenv("MODEL")  # 读取 model
        self.useSteam = os.getenv("USE_STREAM")  # 读取 useSteam
        if not self.openai_api_key:
            raise ValueError("❌ 未找到 OpenAI API Key，请在 .env 文件中设置OPENAI_API_KEY")
        if not self.model:
            raise ValueError("❌ 未找到 MODEL，请在 .env 文件中设置MODEL")
        self.client = OpenAI(api_key=self.openai_api_key, base_url=self.base_url)
        self.sessions: Dict[str, ClientSession] = {}
        self.tools_mcp = []
        # self.exit_stack = AsyncExitStack()
        self.tools_by_session: Dict[str, list] = {} # 每个 session 的 tools 列表
        self.all_tools = [] # 合并所有工具的列表
        
    async def get_user_input(self) -> str:
        """获取用户输入"""
        return input("\n你: ").strip()

    async def process_query(self, user_query: str) -> str:
        """
        OpenAI 最新 Function Calling 逻辑:
        1. 发送用户消息 + tools 信息
        2. 若模型 `finish_reason == "tool_calls"`，则解析 toolCalls 并执行相应 MCP
        工具
        3. 把调用结果返回给 OpenAI，让模型生成最终回答
        """
        # 定义消息列表，并将消息转换为 ChatCompletionMessageParam 类型
        # 获取今天的日期
        # today = datetime.date.today()
        # formatted_date = today.strftime("%Y-%m-%d")
        messages: list[ChatCompletionMessageParam] = [
            # {"role": "system", "content": "你是一个智能助手，帮助用户回答问题."},
            {"role": "user", "content": user_query},
            # {"role": "user", "content": "今天的日期是:" + formatted_date},
        ]
        response = self.client.chat.completions.create(
            model=self.model if self.model else "gpt-4o-mini",
            messages=messages,
            tools= self.all_tools
        )
        # 处理返回的内容
        content = response.choices[0]
        print("🚀 ~ content:", content, self.all_tools)
        if content.finish_reason == "tool_calls":
            # 如何是需要使用工具，就解析工具
            if not content.message.tool_calls:
                return "⚠️ 未收到有效的工具调用"
            tool_call = content.message.tool_calls[0]
            tool_name = tool_call.function.name
            tool_args = json.loads(tool_call.function.arguments)

            # 执行工具
            print(f"🚀Calling tool {tool_name} with args {tool_args}")
            result = await self._call_mcp_tool(tool_name, tool_args)
            print("🚀 ~ result:", result)
            if tool_name == "jump_page":
                return {
                    "tool_name": tool_name,
                    "result": result.content[0].text,
                }
            # 将模型返回的调用哪个工具数据和工具执行完成后的数据都存入messages中
            message_to_append: Any = content.message.model_dump()
            contentTest: Any = result.content[0]
            messages.append(message_to_append)
            messages.append({
                "role": "tool",
                "content": contentTest.text,
                "tool_call_id": tool_call.id,
            })
            # print("🚀 ~ messages-append:", messages)
            # 将上面的结果再返回给大模型用于生产最终的结果
            response = self.client.chat.completions.create(
                model=self.model if self.model else "gpt-4o-mini",
                messages=messages,
            )
            if not response.choices[0].message.content:
                return "⚠️ 未收到有效的回复"
            return {
                "tool_name": tool_name,
                "result": response.choices[0].message.content,
            }
        if not content.message.content:
            return "⚠️ 未收到有效的回复"
        return {
            "tool_name": 'msg',
            "result": content.message.content,
        }
    async def _call_mcp_tool(self, tool_full_name: str, tool_args: dict) -> str:
        """
        根据 "serverName_toolName" 调用相应的服务器工具
        """
        parts = tool_full_name.split("_", 1) # 拆分 "weather_query_weather" ->["weather", "query_weather"]
        if len(parts) != 2:
            return f"无效的工具名称: {tool_full_name}"
        server_name, tool_name = parts
        session = self.sessions.get(server_name)
        if not session:
            return f"找不到服务器: {server_name}"
        # 执行 MCP 工具
        resp = await session.call_tool(tool_name, tool_args)
        return resp.content if resp.content else "工具执行无输出"


    async def connect_to_server(self, servers: dict):
        """
        同时启动多个服务器并获取工具
        servers: 形如 {"weather": "weather_server.py", "rag": "rag_server.py"}
        """
        self.all_tools = [] # 清空 all_tools
        for server_name, script_path in servers.items():
            session = await self._start_one_server(script_path)
            self.sessions[server_name] = session
            resp = await session.list_tools()
            self.tools_by_session[server_name] = resp.tools # 保存到self.tools_by_session
            for tool in resp.tools:
                # OpenAI Function Calling 格式修正
                function_name = f"{server_name}_{tool.name}"
                # print(tool.name)
                self.all_tools.append({
                    "type": "function",
                    "function": {
                        "name": function_name,
                        "description": tool.description,
                        "input_schema": tool.inputSchema
                    }
                })
            # 转化function calling格式
            self.all_tools = await self.transform_json(self.all_tools)
            # print(self.all_tools)
            print("\n✅ 已连接到下列服务器:")
            for name in servers:
                print(f" - {name}: {servers[name]}")
            print("\n汇总的工具:")
            for t in self.all_tools:
                print(f" - {t['function']['name']}")

    async def transform_json(self, json2_data):
        """
        将类似 json2 的格式转换为类似 json1 的格式，多余字段会被直接删除。
        :param json2_data: 一个可被解释为列表的 Python 对象（或已解析的 JSON 数据）
        :return: 转换后的新列表
        """
        result = []
        for item in json2_data:
            # 确保有 "type" 和 "function" 两个关键字段
            if not isinstance(item, dict) or "type" not in item or "function" not in item:
                continue
            old_func = item["function"]
            # 确保 function 下有我们需要的关键子字段
            if not isinstance(old_func, dict) or "name" not in old_func or "description" not in old_func:
                continue
            # 处理新 function 字段
            new_func = {
                "name": old_func["name"],
                "description": old_func["description"],
                "parameters": {}
            }
            # 读取 input_schema 并转成 parameters
            if "input_schema" in old_func and isinstance(old_func["input_schema"], dict):
                old_schema = old_func["input_schema"]
                # 新的 parameters 保留 type, properties, required 这三个字段
                new_func["parameters"]["type"] = old_schema.get("type", "object")
                new_func["parameters"]["properties"] = old_schema.get("properties", {})
                new_func["parameters"]["required"] = old_schema.get("required", [])

            new_item = {
                "type": item["type"],
                "function": new_func
            }
            result.append(new_item)
        return result

    async def chat_loop(self):
        """运行交互式聊天循环"""
        print("\nMCP 客户端已启动！输入 'quit' 退出")
        messages = []
        while True:
            query = input("\n你: ").strip()
            if query.lower() == 'quit':
                break
            try:
                messages.append({"role": "user", "content": query})
                messages = messages[-20: ]
                # print(messages)
                response = await self.chat_base(messages)
                if self.useSteam == True:
                    for chunk in response.result:
                        if hasattr(chunk, 'choices'):
                            choice = chunk.choices[0]
                            if hasattr(choice, 'delta') and hasattr(choice.delta, 'content'):
                                print(choice.delta.content, end="", flush=True)
                else:
                    messages.append(response['result'].choices[0].message.model_dump())
                    result = response['result'].choices[0].message.content
                    print(f"\nAI: {result}")
            except Exception as e:
                print(f"\n⚠️ 调用过程出错: {e}")

    async def cleanup(self):
        """清理资源"""
        await self.exit_stack.aclose()

    async def api_call(self, query: str) -> str:
        """提供给前端调用的API接口"""
        messages = []
        messages.append({"role": "user", "content": query})
        messages = messages[-20: ]
        response = await self.chat_base(messages)
        if response['tool_name'] != 'server_jump_page':
            result = response['result'].choices[0].message.content
            response['result'] = result
        return response

    async def _start_one_server(self, script_path: str) -> ClientSession:
        """启动单个 MCP 服务器子进程，并返回 ClientSession"""
        is_python = script_path.endswith(".py")
        is_js = script_path.endswith(".js")
        if not (is_python or is_js):
            raise ValueError("服务器脚本必须是 .py 或 .js 文件")
        command = "python" if is_python else "node"
        current_env = os.environ.copy()
        server_params = StdioServerParameters(
            command=command,
            args=[script_path],
            env=current_env
        )
        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
        read_stream, write_stream = stdio_transport
        session = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
        await session.initialize()
        return session
    # 对话轮询工具
    async def chat_base(self, messages: list) -> list:
        # messages = [{"role": "user", "content": query}]
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            tools=self.all_tools,
        )
        response_result = {
            "tool_name":'msg',
            "result": response,
        }
        if response.choices[0].finish_reason == "tool_calls":
            while True:
                messages, tools_name, tools_response = await self.create_function_response_messages(messages,response)
                if tools_name == 'server_jump_page':
                    response_result = {
                        "tool_name": tools_name,
                        "result": tools_response[0].text,
                    }
                    break
                elif tools_name =='server_get_chat':
                    if self.useSteam == True:
                        response = self.client.chat.completions.create(
                            model=self.model,
                            messages=messages,
                            tools=self.all_tools,
                            stream=True
                        )
                        response_result = {
                            "tool_name": tools_name,
                            "chat_url": tools_response[0].text,
                            "result": response,
                        }
                        break
                    
                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    tools=self.all_tools,
                )
                if response.choices[0].finish_reason != "tool_calls":
                    if self.useSteam == True:
                        response = self.client.chat.completions.create(
                            model=self.model,
                            messages=messages,
                            tools=self.all_tools,
                            stream=True
                        )
                    response_result = {
                        "tool_name": tools_name,
                        "result": response,
                    }
                    break
        else:
            if self.useSteam == True:
                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    tools=self.all_tools,
                    stream=True
                )
                response_result = {
                    "tool_name":'stream',
                    "result": response,
                }
        return response_result

    async def create_function_response_messages(self, messages, response):
        function_call_messages = response.choices[0].message.tool_calls
        messages.append(response.choices[0].message.model_dump())
        tools_name = ''
        tools_response = ''
        for function_call_message in function_call_messages:
            tool_name = function_call_message.function.name
            tool_args = json.loads(function_call_message.function.arguments)
            # 运行外部函数
            function_response = await self._call_mcp_tool(tool_name, tool_args)
            tools_name = tool_name
            tools_response = function_response
            print(f"🚀Calling tool {tool_name} with args {tool_args}", function_response)
            # if tool_name != "server_get_chat":
            # 拼接消息队列
            messages.append(
                {
                "role": "tool",
                "content": function_response,
                "tool_call_id": function_call_message.id,
                }
            )
        return messages, tools_name, tools_response

    def audio_to_text(self, file_path):
        messages = [
            {
                "role": "user",
                "content": [{"audio": file_path}],
            }
        ]
        response = MultiModalConversation.call(model="qwen-audio-asr", messages=messages, api_key= self.openai_api_key)
        return response.output.choices[0].message.content[0]['text']

async def main():
    servers = {
        "server": "server.py"
    }
    client = MCPClient()
    try:
        await client.connect_to_server(servers)
        await client.chat_loop()
    finally:
        await client.cleanup()

if __name__ == "__main__":
    import sys
    asyncio.run(main())