import asyncio
from openai import AsyncOpenAI
from fastmcp import Client
import json
from datetime import datetime

async def query_mcp_tool(tool_name: str, params: dict):
    """
    调用MCP工具的统一入口
    :param tool_name: 工具名称
    :param params: 工具参数
    :return: 工具执行结果
    """

    async with Client("http://localhost:4200/demo") as client:
        return await client.call_tool(tool_name, params)


async def chat_with_tools(user_query):
    """
    实现支持工具调用的聊天功能
    1. 连接本地vLLM服务
    2. 获取可用工具列表并转换为OpenAI函数调用格式
    3. 根据用户问题调用适当工具
    4. 整合工具结果生成最终回复
    实现支持工具调用的聊天功能（异步生成器模式）
    返回异步生成器，逐步产出回复内容
    """
    llm_client = AsyncOpenAI(
        base_url="http://10.1.161.53:8005/v1",
        api_key="EMPTY"
    )

    # 动态获取MCP服务提供的工具列表
    async with Client("http://localhost:4200/demo") as mcp_client:
        # 1. 服务心跳检测
        await mcp_client.ping()
        print("服务心跳检测成功")

        # 2. 列出服务端注册的所有工具
        tools = await mcp_client.list_tools()
        tool_names = [tool.name for tool in tools]
        print(f"可用工具列表: {', '.join(tool_names)}")

        tool_schemas = [{
            "type": "function",
            "function": {
                "name": tool.name,
                "description": tool.description,
                "parameters": {
                    "type": tool.inputSchema.get("type", "object"),
                    "properties": {
                        prop_name: prop_def
                        for prop_name, prop_def in tool.inputSchema["properties"].items()
                    },
                    "required": tool.inputSchema.get("required", [])
                }
            }
        } for tool in tools]

    # print(tool_schemas)

    # 第一次调用模型判断是否需要调用工具
    response = await llm_client.chat.completions.create(
        model="qwen3-1.7b",
        messages=[{"role": "user", "content": user_query}],
        tools=tool_schemas,
        tool_choice="auto"
    )

    message = response.choices[0].message

    if message.tool_calls:
        # 执行工具调用
        tool_results = []
        calls_data = []
        for call in message.tool_calls:
            mcp_arguments = call.function.arguments
            mcp_name = call.function.name

            result = await query_mcp_tool(
                mcp_name,
                eval(mcp_arguments)
            )


            tool_results.append({
                "name": mcp_name,
                "content": str(result)
            })

            # 构造单个调用对象
            call_entry = {
                "action_name": mcp_name,
                "params": mcp_arguments
            }
            calls_data.append(call_entry)

        yield calls_data

        # 构建最终回复的对话历史
        messages = [
            {"role": "user", "content": user_query},
            message,
            *[{
                "role": "tool",
                "name": name,
                "content": content
            } for name, content in zip(
                [r["name"] for r in tool_results],
                [r["content"] for r in tool_results]
            )]
        ]

        # 流式生成最终回复
        stream = await llm_client.chat.completions.create(
            model="qwen3-1.7b",
            messages=messages,
            stream=True
        )

        print("\n最终回复:")
        async for chunk in stream:
            if chunk.choices[0].delta.content is not None:
                yield chunk.choices[0].delta.content  # 逐步产出回复内容

    else:
        # 不需要工具调用时直接产出回复
        yield message.content


async def main():
    # user_query = "请查询徐家汇街道2025年5月1日0：00到5月31日23：59的垃圾清运统计报告"
    user_query = "请查询徐家汇街道2025年5月30日到5月31日的垃圾清运统计报告"
    # user_query = "查询指2025年5月25日到5月31日全区清运干湿垃圾小区相关情况"

    async for content in chat_with_tools(user_query):
        print(content, end="", flush=True)  # 逐步打印输出
    print()  # 换行


if __name__ == "__main__":
    asyncio.run(main())