import asyncio
from contextlib import asynccontextmanager

from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent

from mcp_agent.my_llm import llm

# mcp_server_config = {
#     "url": "http://localhost:8000/sse",
#     "transport": "sse"
# }

mcp_server_config = {  # 连接MCP服务器的配置
    "url": "http://127.0.0.1:8000/streamable",
    "transport": "streamable_http"  # MCP的通信机制
}


@asynccontextmanager
async def make_agent():
    """生成一个智能体(langgraph)"""
    # 创建客户端实例（不再使用async with）
    client = MultiServerMCPClient({'lx_mcp': mcp_server_config})

    try:
        # 使用推荐的get_tools方法获取工具（异步调用）
        tools = await client.get_tools()
        print("获取到的工具:", tools)

        # 创建智能体
        agent = create_react_agent(llm, tools=tools)
        yield agent
    finally:
        # 确保关闭客户端连接
        if hasattr(client, 'aclose'):
            await client.aclose()


async def main():
    """在异步环境下，创建智能体，并执行流式输出"""
    async with make_agent() as agent:
        # 使用流式调用
        async for event in agent.astream_events(
                {'messages': '帮我查一下最近股票怎么样'},
                version="v1"
        ):
            # 只处理输出事件
            if event["event"] == "on_llm_stream":
                # 实时打印每个 token
                token = event["data"]["chunk"].content
                if token:
                    print(token, end="", flush=True)  # 实时输出不换行
            elif event["event"] == "on_tool_start":
                # 显示工具调用信息
                print(f"\n\n调用工具: {event['name']} 参数: {event['data'].get('input')}")
            elif event["event"] == "on_tool_end":
                # 显示工具调用结果
                print(f"\n工具结果: {event['data'].get('output')}")

        print("\n\n计算完成")  # 最后添加换行


if __name__ == '__main__':
    asyncio.run(main())