from mcp.client.sse import sse_client
from mcp import ClientSession
from openai import OpenAI
import asyncio
import json
import os
from contextlib import AsyncExitStack

class MCPClient:
    def __init__(self, server_url: str):
        self.deepseek = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com"
        )
        self.server_url = server_url
        self.exit_stack = AsyncExitStack()
        self.prompts = {}

    async def run(self, query: str):
        read_stream, write_stream = await self.exit_stack.enter_async_context(sse_client(self.server_url))
        session: ClientSession = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
        # 初始化
        await session.initialize()

        # 获取服务端所有的resource
        # ! diff: list_prompts   prompts
        prompts = (await session.list_prompts()).prompts
        functions = []
        for prompt in prompts:
            # ! diff: uriTemplate
            # uri = prompt.uriTemplate
            name = prompt.name
            description = prompt.description
            # mime_type = prompt.mimeType
            # Function Calling的函数形式
            functions.append({
                "type": "function",
                "function": {
                    "name": name,
                    "description": description,
                    # 资源类型,参数为None
                    # "input_schema": None,
                    # ! 需要明确告诉大模型工具需要 policy 参数，并引导它从用户输入中提取该参数
                    "input_schema": {
                        "type": "object",
                        "properties": {
                            "policy": {
                                "type": "string",
                                "description": "需要总结的政策文本内容"
                            }
                        },
                        "required": ["policy"]  # 标记为必填参数
                    }
                }
            })
            # ! 保存提示词信息
            self.prompts[name] = {
                "name": name,
                "description": description,
                "arguments": [argument.model_dump() for argument in prompt.arguments],
            }
            # print(self.prompts[name])
        # 创建消息发送给大模型
        messages = [{
            "role": "user",
            "content": query
        }]
        # 根据用户输入让大模型自主选择
        deepseek_response = self.deepseek.chat.completions.create(
            model="deepseek-chat",
            messages=messages,
            tools=functions,
        )
        choice = deepseek_response.choices[0]
        # 如果大模型回复时tool_calls，那么我们就要执行调用工具的代码
        if choice.finish_reason == 'tool_calls':
            # 为了让大模型精确的回复，需要将大模型返回的model_message也添加到message中，model_dump是转换为字典
            model_message = choice.message
            messages.append(model_message.model_dump())
            tool_call = model_message.tool_calls[0]
            tool_call_id = tool_call.id
            function = tool_call.function
            function_name = function.name
            function_arguments = json.loads(function.arguments)

            # 获取提示词
            # result是MCP Server返回来的
            res = await session.get_prompt(name=function_name,arguments=function_arguments)
            prompt_messages = [{"role": message.role, "content": message.content.text} for message in res.messages]
            # 把result丢给大模型，让大模型生成最终结果
            deepseek_response = self.deepseek.chat.completions.create(
                model="deepseek-chat",
                messages=prompt_messages,
            )
            # 最终响应
            choice = deepseek_response.choices[0]
            if choice.finish_reason == 'stop':
                content = choice.message.content
                print(f"最终响应：{content}")
        else:
            print(f'工具执行失败，未选择工具执行,\n大模型回答:{choice.message.content}')

    async def aclose(self):
        await self.exit_stack.aclose()

async def main():
    client = MCPClient(server_url="http://127.0.0.1:8000/sse")
    try:
        with open("../data/policy.txt", mode="r", encoding="utf-8") as fp:
            policy = fp.read()
            # print(policy)
        await client.run(f"总结这个政策：{policy}")
    finally:
        await client.aclose()


if __name__ == '__main__':
    asyncio.run(main())