#1.读取本客户端支持的所有Client
#2.连接所有的MCPServer，获取所有MCPServer的Tool、Prompt、Resource
# 3.调用大模型，让大模型自由选择哪些函数（TooL/Prompt/Resource）
#4.其他的：读取用户的输入、MCP与大模型交互等
import asyncio
import json
import os
from contextlib import AsyncExitStack
from typing import Any

from dotenv import load_dotenv
from mcp import McpError
from openai import OpenAI

from server import MCPServerManager

load_dotenv()  # 会寻找当前项目的.env环境变量文件


class MCPMain:
    def __init__(self):
        self._llm: OpenAI | None = None # 属性加加下划线是可以懒加载，不然初步初始化就一定要self.llm = OpenAI()
        self._exit_stack = AsyncExitStack()
        self.server_manager: MCPServerManager | None = None
        self._functions: list[dict[str, Any]] = []

    async def initialize(self):
        with open("mcp.json", mode="r", encoding="utf-8") as fp:
            content = fp.read()
        config = json.loads(content)
        mcp_dicts = config["mcpServers"]
        self.server_manager = await self._exit_stack.enter_async_context(MCPServerManager(mcp_dicts))
        for name, function in self.server_manager.all_functions.items():
            self._functions.append({
                "type": "function",
                "function": {
                    "name": name,
                    "description": function.description,
                    "input_schema": function.input_schema
                }
            })

    @property  # @property装饰器表示当成一个属性使用，就不需要self.llm()去使用了，只需要self.llm即可
    def llm(self) -> OpenAI:
        if self._llm is None:
            self._llm = OpenAI(
                api_key=os.getenv("LLM_API_KEY"),
                base_url=os.getenv("LLM_BASE_URL"),
            )

        return self._llm

    async def run(self):
        # 要不断读取用户输入的值，并与大模型和MCP互动
        assert self.server_manager is not None
        messages = []
        while True:
            query = input("请输入:")
            if query == "exit":
                break
            messages.append({"role": "user", "content": query})
            # 由于用书输入的query可能需要多次调用大模型才能完成，所以大模型调用也要放到循环中
            while True:
                response = self.llm.chat.completions.create(
                    messages=messages,
                    model=os.getenv("LLM_MODEL"),
                    tools=self._functions,
                )
                choice = response.choices[0]
                if choice.finish_reason == "stop":
                    message = choice.message
                    print(f"AI回复: {message.content}")
                    break
                elif choice.finish_reason == "tool_calls":
                    message = choice.message
                    # 将大模型选择了哪个tool添加到messages中
                    messages.append(message.model_dump())
                    # 调用大模型选择的工具
                    for tool_call in message.tool_calls:
                        tool_call_id = tool_call.id
                        function = tool_call.function
                        function_arguments = json.loads(function.arguments)
                        function_name = function.name

                        try:
                            result = await self.server_manager.call_function(
                                name=function_name,
                                arguments=function_arguments
                            )
                        except McpError as e:
                            messages.append({
                                "role": "tool",
                                "content": f"执行异常{str(e)}",
                                "tool_call_id": tool_call_id
                            })
                        else:
                            messages.append({
                                "role": "tool",
                                "content": result,
                                "tool_call_id": tool_call_id
                            })
                        print(f"AI执行了{function_name}")

    async def aclose(self):
        await self._exit_stack.aclose()

    async def __aenter__(self):
        await self.initialize()
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.aclose()

async def main():
    try:
        async with MCPMain() as app:
            await app.run()
    except asyncio.CancelledError:
        print("捕获到了CancelledError")


if __name__ == '__main__':
    # asyncio.run(main())
    # 按照以下方式运行就不会报 I/O操作在已经关闭的管道 这个错误了
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())
    # 如果是测试自己写的python脚本的话就需要去到mcp.json那里command改成python，args里面就是你的脚本文件，本质上运行成python server.py