"""
pip install langchain-mcp-adapters -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
"""
import asyncio
from mcp import StdioServerParameters, stdio_client, ClientSession
from langchain_openai import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from langchain_mcp_adapters.tools import load_mcp_tools

from config.load_key import load_key

llm = ChatOpenAI(
    model="Qwen/Qwen2.5-7B-Instruct",
    api_key=load_key("siliconflow_api_key"),
    base_url=load_key("siliconflow_base_url"),
)

server_params = StdioServerParameters(
    command="/Users/zixiu/.conda/envs/commonenv/bin/python",
    args=["/Users/zixiu/Desktop/develop/project/学习时项目/python/LangChainDemo/8_1MCP_Server.py"],
    env=None,
)


async def run():
    async with stdio_client(server_params) as (read, write):
        async with ClientSession(read, write) as session:
            await session.initialize()
            tools = await load_mcp_tools(session)
            agent = initialize_agent(
                tools=tools,
                llm=llm,
                agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
                verbose=True
            )

            print("AI 助手已启动（输入 exit 退出）")
            while True:
                q = input("你：")
                if q.strip().lower() in ("exit", "quit"):
                    break
                a = await agent.ainvoke({"input": q})
                print(f"助手：{a['output']}")


if __name__ == "__main__":
    asyncio.run(run())
