import asyncio
import json
from fastmcp import Client
from openai import AsyncOpenAI

# 1. 初始化 Ollama 的 LLM 客户端
ollama_client = AsyncOpenAI(
    base_url="http://localhost:11434/v1",  # Ollama 默认 API
    api_key="ollama"  # ollama 本地不校验 key，可以随便填
)

# 2. 初始化 MCP 客户端
mcp_client = Client("http://localhost:8090/sse")

async def llm_call_with_mcp(user_query: str):
    async with mcp_client:
        # 先让 LLM 看看有哪些工具
        tools = await mcp_client.list_tools()

        # 把 MCP 工具暴露给 LLM
        tool_defs = [
            {
                "type": "function",
                "function": {
                    "name": tool.name,
                    "description": tool.description,
                    "parameters": tool.inputSchema
                }
            }
            for tool in tools
        ]

        # 调用 LLM（第一次：让它决定要不要调用工具）
        response = await ollama_client.chat.completions.create(
            model="qwen3:8b",
            messages=[{"role": "user", "content": user_query}],
            tools=tool_defs,
            extra_body={  # 禁用 <think>
                "disable_thoughts": True
            }
        )

        message = response.choices[0].message

        # 如果 LLM 想调用 MCP 工具
        if message.tool_calls:
            for call in message.tool_calls:
                tool_name = call.function.name
                args = call.function.arguments
                if isinstance(args, str):  # ← 关键修复
                    args = json.loads(args)

                print(f"👉 LLM 想调用工具: {tool_name}, 参数: {args}")
                # 调用 MCP 工具
                result = await mcp_client.call_tool(tool_name, args)
                print(f"✅ 工具 {tool_name} 执行结果: {result}")

                # 第二次调用 LLM，流式输出结果
                print("💬 LLM 最终回复: ", end="", flush=True)
                stream = await ollama_client.chat.completions.create(
                    model="qwen3:8b",
                    messages=[
                        {"role": "user", "content": user_query},
                        {"role": "assistant", "content": str(result)}
                    ],
                    stream=True,
                    extra_body={  # 禁用 <think>
                        "disable_thoughts": True
                    }
                )

                async for chunk in stream:
                    if chunk.choices[0].delta.content:
                        print(chunk.choices[0].delta.content, end="", flush=True)

                print("\n--- 流式输出结束 ---")
        else:
            print("⚠️ LLM 没有请求调用任何工具。")

async def main():
    await llm_call_with_mcp("请帮我查一下股票代码09988的详细信息")

if __name__ == "__main__":
    asyncio.run(main())
