import os
from typing import Any

from mcp import ClientSession
from mcp.client.sse import sse_client
import asyncio

from mcp.shared.context import RequestContext
from mcp.shared.session import RequestResponder
from mcp.types import LoggingMessageNotificationParams, RequestParams, ServerRequest, ClientRequest, ServerNotification, \
    CreateMessageRequestParams, CreateMessageResult, TextContent
from openai import OpenAI


# 日志处理函数
async def logging_handler(params: LoggingMessageNotificationParams):
    print("=" * 15+ "日志输出开始" + "="*15)
    print(params)
    print("=" * 15+ "日志输出结束" + "="*15)


# 消息处理回调, 当服务端的代码在处理一些耗时操作时，可以向客户端实时反馈执行的进度
async def message_handler(message: RequestResponder[ServerRequest,ClientRequest] | ServerNotification | Exception):
    print("=" * 15+ "进度汇报开始" + "="*15)
    print(message)
    print("=" * 15+ "进度汇报开始" + "="*15)

# 采样回调函数，即模型调用
async def handle_sampling_message(
    context: RequestContext["ClientSession",Any],
    params: CreateMessageRequestParams
) -> CreateMessageResult:
    deepseek = OpenAI(
        api_key=os.getenv("DEEPSEEK_API_KEY"),
        base_url="https://api.deepseek.com",
    )

    print(f"context: {context}")
    print(f"params: {params}")
    messages = [{"role": message.role, "content": message.content.text} for message in params.messages]
    response = deepseek.chat.completions.create(
        messages=messages,
        model="deepseek-chat",
    )
    message = response.choices[0].message
    return CreateMessageResult(
        role="assistant", # 表示assistant大模型
        content=TextContent(
            type="text",
            text=message.content,
        ),
        model="deepseek-chat",
        stopReason="stop",
    )

async def main():
    async with sse_client("http://127.0.0.1:8000/sse") as (read_stream, write_stream):
        async with ClientSession(
                read_stream,
                write_stream,
                # 1.指定日志回调，日志输出
                logging_callback=logging_handler,
                # 2.进度汇报
                message_handler=message_handler,
                # 3.模型调用
                sampling_callback=handle_sampling_message
        ) as session:
            # 初始化
            await session.initialize()

            # 获取所有工具
            tools = (await session.list_tools()).tools
            tool = tools[0]
            response = await session.call_tool(name=tool.name, arguments={"files": ['a.txt', 'b.txt']})
            print(response)


if __name__ == '__main__':
    asyncio.run(main())
