# 基于langchain的MCP多服务器
import asyncio
import json
import logging
from typing import Any, Dict, List
import os
from dotenv import load_dotenv
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.chat_models import init_chat_model
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_mcp_adapters.client import MultiServerMCPClient

# 设置日志格式
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s"
)


"环境配置"
class Configuration:
    """读取.env 与 servers_config.json"""
    def __init__(self) -> None:
        load_dotenv()
        self.api_key: str = os.getenv("DEEPSEEK_API_KEY").strip()
        self.base_url: str | None = os.getenv("BASE_URL")
        self.model: str = os.getenv("MODEL") or "deepseek-chat"
        if not self.api_key:
            raise ValueError("未找到DEEPSEEK_API_KEY")

    @staticmethod
    def load_servers(file_path: str = "servers_config2.json") -> Dict[str, Any]:
        with open(file_path, "r", encoding="utf-8") as f:
            return json.load(f).get("mcpServers", {})

async def run_chat_loop() -> None:
    """启动MCP-Agent聊天循环"""
    cfg = Configuration()
    os.environ["DEEPSEEK_API_KEY"] = os.getenv("LLM_API_KEY", "")
    if cfg.base_url:
        os.environ["DEEPSEEK_API_BASE"] = cfg.base_url

    # 把 key 注入环境, LangChain-OpenAI / DeepSeek 会自动使用
    os.environ["OPENAI_API_KEY"] = cfg.api_key
    if cfg.base_url:  # 对 DeepSeek 之类的自定义域名很有用
        os.environ["OPENAI_BASE_URL"] = cfg.base_url

    # 连接多台 MCP 服务器
    servers_cfg = Configuration.load_servers()
    mcp_client = MultiServerMCPClient(servers_cfg)
    tools = await mcp_client.get_tools()  # 获取工具列表
    logging.info(f"✅ 已加载 {len(tools)} 个 MCP 工具")
    for t in tools:
        logging.info(f" - {t.name}")

    # 初始化大模型 (deepseek\openai\和任意兼容openai协议的模型)
    llm_model = init_chat_model(model=cfg.model, model_provider="deepseek" if "deepseek" in cfg.model else "openai")

    prompt = hub.pull("hwchase17/openai-tools-agent")
    agent = create_openai_tools_agent(llm_model, tools, prompt)
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

    # CLI聊天
    print("MCP agent已启动，输入quit退出")
    while True:
        user_input = input("\n你：").strip()
        if user_input.lower() == "quit":
            break
        try:
            result = await agent_executor.ainvoke({"input": user_input})
            print(f"\nAI: {result['output']}")
        except Exception as e:
            print(f"\n 出错啦: {e}")

    await mcp_client.cleanup()
    print("资源已经清理，拜拜！")

if __name__ == '__main__':
    asyncio.run(run_chat_loop())