import asyncio
import os
from contextlib import AsyncExitStack

from mcp import ClientSession
from mcp.client.sse import sse_client
# pip install openai
from openai import OpenAI


class MCPClient:
    # os.getenv("DEEPSEEK_API_KEY")
    def __init__(self, server_url: str):
        self.deepseek = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com"
        )
        self.server_url = server_url
        self.resources = {}
        self.exit_stack = AsyncExitStack()

    async def run(self, query):
        # 1.创建服务端的参数
        # 用异步上下文堆栈处理
        read_stream, write_stream = await self.exit_stack.enter_async_context(sse_client(self.server_url))
        # 创建session对象
        session = await self.exit_stack.enter_async_context(ClientSession(read_stream=read_stream, write_stream=write_stream))
        # 4.初始化通信
        await session.initialize()

        # 获取可用的资源
        resources = (await session.list_resources()).resources

        functions = []
        for resource in resources:
            uri = resource.uri
            name = resource.name
            description = resource.description
            mime_type = resource.mimeType
            self.resources[name] = {
                "uri": uri,
                "name": name,
                "description": description,
                "mime_type": mime_type,
            }
            # Function Calling的函数形式
            functions.append({
                "type": "function",
                "function": {
                    "name": name,
                    "description": description,
                    # 资源类型,input_schema参数为None,input_schema 好像表示输入源
                    "input_schema": None
                }
            })

        # 7.给大模型构造消息,让大模型自主选择调用哪个工具
        messages = [{
            'role': 'user',
            'content': query,
        }]

        # 调用大模型，让大模型选择什么工具
        deepseek_response = self.deepseek.chat.completions.create(
            model="deepseek-chat",  # 具体什么模型
            messages=messages,
            tools=functions
        )
        # 提取大模型的选择
        model_choice = deepseek_response.choices[0]
        # 如果大模型回复时tool_calls，那么我们就要执行调用工具的代码
        if model_choice.finish_reason == 'tool_calls':
            # 为了当大模型更精确的回复，需要将大模型返回来的message也要添加到messages中
            model_message = model_choice.message
            messages.append(model_message.model_dump())

            tool_calls = model_message.tool_calls
            # 感觉没必要循环,直接取第一个作为tool_call生成最终回复即可。
            # for tool_call in tool_calls:
            tool_call = tool_calls[0]
            tool_call_id = tool_call.id
            function = tool_call.function
            function_arguments = function.arguments
            function_name = function.name
            uri = self.resources[function_name]["uri"]
            # 执行调用，response是MCP Server返回来的
            response = await session.read_resource(uri)
            # print(response)
            # print(response.contents[0].text)
            result = response.contents[0].text
            # result丢给大模型，让大模型返回最终的结果
            messages.append({
                "role": "tool",
                "content": result,
                "tool_call_id": tool_call_id
            })
            # 重新调用大模型，让大模型生成最终的回应
            model_response = self.deepseek.chat.completions.create(
                model="deepseek-chat",
                messages=messages
            )
            print(f"AI回答:{model_response.choices[0].message.content}")
        else:
            print('工具执行失败，未选择工具执行！')

    async def aclose(self):
        await self.exit_stack.aclose()

async def main():
    client = MCPClient(server_url="http://127.0.0.1:8000/sse")
    try:
        await client.run("帮我查找阳神大帝的信息")
    finally:
        await client.aclose()

if __name__ == '__main__':
    asyncio.run(main())