from mcp.client.stdio import StdioServerParameters, stdio_client
from mcp import ClientSession
# pip install openai
from openai import OpenAI
import asyncio
import json
# from dotenv import load_dotenv
import os
from contextlib import AsyncExitStack

# load_dotenv()

# stdio:在客户端中，启动一个新的子进程来执行服务端的脚本代码

class MCPClient:
    # os.getenv("DEEPSEEK_API_KEY")
    def __init__(self, server_path: str):
        self.deepseek = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com"
        )
        self.server_path = server_path
        self.exit_stack = AsyncExitStack()


    async def _run(self, query):
        # 1.创建服务端的参数
        server_params = StdioServerParameters(
            command="python",
            args=[self.server_path],
        )
        # 2.创建读、写流通道
        async with stdio_client(server_params) as (read_stream, write_stream):
            # 3.创建客户端和服务器通信的session对象
            async with ClientSession(read_stream=read_stream, write_stream=write_stream) as session:
                # 4.初始化通信
                await session.initialize()

                # 5.获取服务器支持函数
                response = await session.list_tools()
                # print(response)
                print('session.list_tools():',response)

                # 6.将服务器提供的tool，封装成满足Function Calling格式的对象
                tools = [{
                    'type': 'function',
                    'function': {
                        'name': tool.name,
                        'description': tool.description,
                        'input_schema': tool.inputSchema
                    }
                } for tool in response.tools]

                # 7.给大模型构造消息,让大模型自主选择调用哪个工具
                """
                role:
                1.user: 用户发送给大模型的消息
                2.assistant: 大模型发送给用户的消息
                3.system: 给大模型的系统提示词
                4.tool: 函数执行完后返回的信息
                """
                messages = [{
                    'role': 'user',
                    'content': query,
                }]

                # 调用大模型，让大模型选择什么工具
                deepseek_response = self.deepseek.chat.completions.create(
                    model="deepseek-chat", # 具体什么模型
                    messages=messages,
                    tools=tools
                )
                # print(deepseek_response)
                print("deepseek_response:", deepseek_response)

                # 提取大模型的选择
                choice = deepseek_response.choices[0]
                # print(choice)
                print("choice:", choice)
                # 判断大模型是否选择了工具，choice.finish_reason == 'tool_calls'说明选择了一个工具
                if choice.finish_reason == 'tool_calls':
                    # 将大模型选择了哪个工具函数添加到messages中，为下次回答添加上下文，提高响应精度
                    messages.append(choice.message.model_dump()) # model_dump是一个方法可以把里面的东西转换为一个对象

                    tool_call = choice.message.tool_calls[0]
                    # 获取函数名字
                    function_name = tool_call.function.name
                    # 获取函数需要的参数
                    function_args = json.loads(tool_call.function.arguments) # 字符串转json

                    # 调用工具函数
                    result = await session.call_tool(name=function_name, arguments=function_args)
                    print("result:", result)

                    # 将工具函数返回的结果，添加到messages中，然后重新交给大模型，让大模型结合数据回答
                    messages.append({
                        'role': 'tool',
                        'content': result.content[0].text,
                        "tool_call_id": tool_call.id
                    })

                    # 重新调用大模型，让大模型生成最终的回应
                    final_response = self.deepseek.chat.completions.create(
                        model="deepseek-chat",
                        messages=messages
                    )
                    # 返回最终结果
                    print(f"AI回答:{final_response.choices[0].message.content}")
                else:
                    print('工具执行失败，未选择工具执行！')

    async def run(self, query):
        # 1.创建服务端的参数
        server_params = StdioServerParameters(
            command="python",
            args=[self.server_path],
        )
        # 改成用异步上下文堆栈处理
        read_steam, write_stream = await self.exit_stack.enter_async_context(stdio_client(server=server_params))
        session = await self.exit_stack.enter_async_context(ClientSession(read_stream=read_steam, write_stream=write_stream))
        # 4.初始化通信
        await session.initialize()

        # 5.获取服务器支持函数
        response = await session.list_tools()
        # print(response)
        print('session.list_tools():', response)

        # 6.将服务器提供的tool，封装成满足Function Calling格式的对象
        tools = [{
            'type': 'function',
            'function': {
                'name': tool.name,
                'description': tool.description,
                'input_schema': tool.inputSchema
            }
        } for tool in response.tools]

        # 7.给大模型构造消息,让大模型自主选择调用哪个工具
        """
        role:
        1.user: 用户发送给大模型的消息
        2.assistant: 大模型发送给用户的消息
        3.system: 给大模型的系统提示词
        4.tool: 函数执行完后返回的信息
        """
        messages = [{
            'role': 'user',
            'content': query,
        }]

        # 调用大模型，让大模型选择什么工具
        deepseek_response = self.deepseek.chat.completions.create(
            model="deepseek-chat",  # 具体什么模型
            messages=messages,
            tools=tools
        )
        # print(deepseek_response)
        print("deepseek_response:", deepseek_response)

        # 提取大模型的选择
        choice = deepseek_response.choices[0]
        # print(choice)
        print("choice:", choice)
        # 判断大模型是否选择了工具，choice.finish_reason == 'tool_calls'说明选择了一个工具
        if choice.finish_reason == 'tool_calls':
            # 将大模型选择了哪个工具函数添加到messages中，为下次回答添加上下文，提高响应精度
            messages.append(choice.message.model_dump())  # model_dump是一个方法可以把里面的东西转换为一个对象

            tool_call = choice.message.tool_calls[0]
            # 获取函数名字
            function_name = tool_call.function.name
            # 获取函数需要的参数
            function_args = json.loads(tool_call.function.arguments)  # 字符串转json

            # 调用工具函数
            result = await session.call_tool(name=function_name, arguments=function_args)
            print("result:", result)

            # 将工具函数返回的结果，添加到messages中，然后重新交给大模型，让大模型结合数据回答
            messages.append({
                'role': 'tool',
                'content': result.content[0].text,
                "tool_call_id": tool_call.id
            })

            # 重新调用大模型，让大模型生成最终的回应
            final_response = self.deepseek.chat.completions.create(
                model="deepseek-chat",
                messages=messages
            )
            # 返回最终结果
            print(f"AI回答:{final_response.choices[0].message.content}")
        else:
            print('工具执行失败，未选择工具执行！')

    async def aclose(self):
        await self.exit_stack.aclose()

async def main():
    client = MCPClient(server_path="server_stdio.py")
    try:
        await client.run("2+3等于多少？")
    finally:
        await client.aclose()

if __name__ == '__main__':
    # client = MCPClient(server_path="./server_stdio.py")
    # deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
    # print(deepseek_api_key)
    # asyncio.run(client.run("2+3等于多少？"))
    asyncio.run(main())