import asyncio
import os
import shutil
import subprocess
import time
from typing import Any

from agents import Agent, Runner, RunConfig, set_tracing_disabled
from agents.mcp import MCPServer, MCPServerSse
from agents.model_settings import ModelSettings
from dotenv import load_dotenv

from model_providers.deepseek import DeepSeekModelProvider

# 1-环境变量加载相关
load_dotenv()
BASE_URL = os.getenv("BASE_URL") or ""
API_KEY = os.getenv("API_KEY") or ""
MODEL_NAME = os.getenv("MODEL_NAME") or ""
if not BASE_URL or not API_KEY or not MODEL_NAME:
    raise ValueError(
        "请通过环境变量或代码设置EXAMPLE_BASE_URL、EXAMPLE_API_KEY、EXAMPLE_MODEL_NAME。"
    )

# 2-跳过大模型的链路追踪
set_tracing_disabled(disabled=True)

"""
本例使用自定义提供程序调用Runner.run()的部分，并直接调用OpenAI进行其他操作。
步骤：
1. 【实例化LLM】ModelProvider对象-并构建RunConfig
2. 【实例化Agent】创建一个Agent。
3. 在调用Runner.run()结合【LLM】+【Agent】进行问答
- 1）直接和大模型对话
- 2）调用MCPServer_SSE模式-[MCP工具1-数字加和]
- 3）调用MCPServer_SSE模式-[MCP工具2-随机选定的字符]
"""


async def run_mcp_server(mcp_server: MCPServer):
    # 1-【实例化LLM】ModelProvider对象-并构建RunConfig
    run_config = RunConfig(model_provider=DeepSeekModelProvider(BASE_URL, API_KEY, MODEL_NAME))

    # 2-【实例化Agent】创建一个Agent
    agent = Agent(
        name="Assistant",
        instructions="使用工具回答大模型的问题",
        mcp_servers=[mcp_server],
        model_settings=ModelSettings(tool_choice="required"),
    )

    # 3.1-直接和大模型对话
    message = "给我讲一个笑话吧！"
    print(f"\n\n【大模型请求案例】-> {message}")
    result = await Runner.run(starting_agent=agent, input=message, run_config=run_config)
    print(result.final_output)

    # 3.2-调用MCPServer_SSE模式-[MCP工具1-数字加和]
    message = "What's the weather in Tokyo?"
    print(f"\n\n【大模型请求案例】-> {message}")
    result = await Runner.run(starting_agent=agent, input=message, run_config=run_config)
    print(result.final_output)

    # 3.3-调用MCPServer_SSE模式-[MCP工具2-随机选定的字符]
    message = "What's the secret word?"
    print(f"\n\n【大模型请求案例】-> {message}")
    result = await Runner.run(starting_agent=agent, input=message, run_config=run_config)
    print(result.final_output)


async def init_mcp_server() -> MCPServerSse:
    # 1-创建 MCP 服务器连接实例，但不立即运行
    weather_server = MCPServerSse(
        name="SSE Python Server",
        params={
            "url": "http://localhost:8000/sse",
        },
        # 缓存工具列表以减少延迟，不需要每次连接时重新获取工具列表
        cache_tools_list=True
    )

    # 2-手动连接到MCP服务器
    print("正在连接到MCP服务器...")
    await weather_server.connect()
    print("MCP服务器连接成功！")

    # 3-等待服务器连接成功并获取MCP服务可用工具列表
    tools = await weather_server.list_tools()
    print("\n可用工具列表: ")
    for tool in tools:
        print(f" - {tool.name}: {tool.description}")

    return weather_server


if __name__ == "__main__":
    # 1-让我们确保用户已安装 uv
    if not shutil.which("uv"):
        raise RuntimeError(
            "uv is not installed. Please install it: https://docs.astral.sh/uv/getting-started/installation/"
        )

    # 2-我们将以子进程运行SSE服务器。通常这会是远程服务器，但在这个演示中，我们将在本地运行，地址为http://localhost:8000/sse
    process: subprocess.Popen[Any] | None = None
    try:
        # 2.1-运行 `uv run server.py` 以启动 SSE 服务器
        print("Starting SSE server at http://localhost:8000/sse ...")
        this_dir = os.path.dirname(os.path.abspath(__file__))
        mcp_server_file = os.path.join(this_dir, "mcp02_sse.py")
        process = subprocess.Popen(["uv", "run", mcp_server_file])

        # 2.2-给它3秒钟启动
        time.sleep(3)
        print("SSE server started. Running example...\n\n")
    except Exception as e:
        print(f"Error starting SSE server: {e}")
        exit(1)

    # 3-运行示例
    try:
        # 1-获取McpServer
        server = asyncio.run(init_mcp_server())
        # 2-运行McpServer
        asyncio.run(run_mcp_server(server))
    finally:
        if process:
            process.terminate()
