"""
LangGraph Agent With Multi MCP Servers
"""

import asyncio
import json
import logging
import os
from typing import Any, Dict, List

from dotenv import load_dotenv
from langgraph.prebuilt import create_react_agent
from langchain_deepseek import ChatDeepSeek
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.checkpoint.memory import InMemorySaver

# 设置记忆存储
checkpointer = InMemorySaver()

# 读取提示词
with open("agent_prompts.txt", "r", encoding="utf-8") as f:
    prompt = f.read()

# 设置对话配置
config = {
    "configurable": {
        "thread_id": "1"
    }
}

class Configuration:
    """读取 .env 与 servers_config.json 等用以配置环境"""

    def __init__(self) -> None:
        load_dotenv()
        self.api_key: str = os.getenv("LLM_API_KEY") or ""
        self.base_url: str | None = os.getenv("BASE_URL")
        self.model: str = os.getenv("MODEL") or "deepseek-chat"
        if not self.api_key:
            raise ValueError("❌ 未找到 LLM_API_KEY，请在 .env 中配置")

    @staticmethod
    def load_servers(file_path: str = "servers_config.json") -> Dict[str, Any]:
        with open(file_path, "r", encoding="utf-8") as f:
            return json.load(f).get("mcpServers", {})


async def run_chat_loop() -> None:
    """启动 MCP-Agent 聊天循环"""
    cfg = Configuration()
    if cfg.base_url:
        os.environ["DEEPSEEK_API_BASE"] = cfg.base_url
    servers_cfg = Configuration.load_servers()

    # 连接多台 MCP 服务器
    mcp_client = MultiServerMCPClient(servers_cfg)
    tools = await mcp_client.get_tools()  # 工具对象列表
    logging.info(f"✅ 已加载 {len(tools)} 个 MCP 工具： {[t.name for t in tools]}")

    # 初始化大模型
    model = ChatDeepSeek(model="deepseek-chat")

    # 构造 LangGraph Agent
    agent = create_react_agent(model=model,
                               tools=tools,
                               prompt=prompt,
                               checkpointer=checkpointer)

    # CLI 多轮对话
    print("\n🤖 MCP Agent 已启动，输入 'quit' 退出")
    while True:
        user_input = input("\n你: ").strip()
        if user_input.lower() == "quit":
            break
        try:
            # 非流式调用输出
            result = await agent.ainvoke(
                {"messages": [{"role": "user", "content": user_input}]},
                config
            )
            print(f"\nAI: {result['messages'][-1].content}")

            # # 流式调用输出
            # result = agent.stream(
            #     {"messages": [{"role": "user", "content": user_input}]},
            #     config,
            #     stream_mode="messages"
            # )
            # print(f"\nAI: ")
            # for message in result:
            #     print(message[0].content, end="")
        except Exception as exc:
            print(f"\n⚠️  出错: {exc}")

    # 清理
    # await mcp_client.cleanup()
    print("🧹 资源已清理，Bye!")


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
    asyncio.run(run_chat_loop())