"""
LLM + MCP Facade 设计
将 LLM 和 MCP 工具封装成与 ChatDeepSeek 相同接口的组件
"""


from typing import List, Dict, Any, Optional, Union
from langchain_core.language_models.chat_models import BaseChatModel

from langchain_core.tools import BaseTool
from langchain_mcp_adapters.client import MultiServerMCPClient
from pydantic import Field


class LLMMCPFacade(BaseChatModel):
    """
    LLM + MCP 工具的 Facade，提供与 ChatDeepSeek 相同的接口
    """

    # 定义 Pydantic 字段
    base_llm: BaseChatModel = Field(...)
    mcp_servers_config: Dict[str, Dict[str, Any]] = Field(...)
    additional_tools: List[BaseTool] = Field(default_factory=list)

    async def _initialize_tools(self):
     
        try:
            # 初始化 MCP 客户端
            mcp_client = MultiServerMCPClient(self.mcp_servers_config)
            
            # 获取 MCP 工具
            self._mcp_tools = await mcp_client.get_tools()
            print(f"✅ 成功加载 {len(self._mcp_tools)} 个 MCP 工具")
            
            # 合并所有工具
            all_tools = self._mcp_tools + self.additional_tools
            
            # 绑定工具到 LLM
            self._llm_with_tools = self.base_llm.bind_tools(all_tools)

            
        except Exception as e:
            print(f"❌ MCP 工具初始化失败: {e}")
            # 如果 MCP 失败，只使用基础 LLM 和额外工具
            self._mcp_tools = []
            self._llm_with_tools = self.base_llm.bind_tools(self.additional_tools)
            self._initialized = True
   
    
    # 实现 BaseChatModel 的抽象方法
    def _generate(self, messages, stop=None, run_manager=None, **kwargs):

        return self._llm_with_tools._generate(messages, stop, run_manager, **kwargs)
    
    async def _agenerate(self, messages, stop=None, run_manager=None, **kwargs):

        return await self._llm_with_tools._agenerate(messages, stop, run_manager, **kwargs)
    
    def invoke(self, input, config=None, **kwargs):

        return self._llm_with_tools.invoke(input, config, **kwargs)
    
    async def ainvoke(self, input, config=None, **kwargs):

        return await self._llm_with_tools.ainvoke(input, config, **kwargs)
    
    def stream(self, input, config=None, **kwargs):

        return self._llm_with_tools.stream(input, config, **kwargs)
    
    async def astream(self, input, config=None, **kwargs):

        return self._llm_with_tools.astream(input, config, **kwargs)
    
    
    def get_tools_info(self):
      
        return {
            "mcp_tools_count": len(self._mcp_tools) if self._mcp_tools else 0,
            "additional_tools_count": len(self.additional_tools),
            "total_tools_count": (len(self._mcp_tools) if self._mcp_tools else 0) + len(self.additional_tools),
            "mcp_servers": list(self.mcp_servers_config.keys())
        }


def create_deepseek_mcp_facade(
    api_key: str,
    mcp_servers_config: Dict[str, Dict[str, Any]],
    additional_tools: Optional[List[BaseTool]] = None,
    api_base: Optional[str] = None,
    model: str = "deepseek-chat",
    **kwargs
) -> LLMMCPFacade:
    """
    创建 DeepSeek + MCP 的 Facade
    
    Args:
        api_key: DeepSeek API 密钥
        mcp_servers_config: MCP 服务器配置
        additional_tools: 额外工具
        api_base: API 基础 URL
        model: 模型名称
        **kwargs: 其他参数
    
    Returns:
        LLMMCPFacade 实例
    """
    from langchain_deepseek import ChatDeepSeek
    
    # 创建基础 LLM
    llm_kwargs = {
        "model": model,
        "api_key": api_key,
        **kwargs
    }
    if api_base:
        llm_kwargs["api_base"] = api_base
    
    base_llm = ChatDeepSeek(**llm_kwargs)
    
    # 创建 Facade
    return LLMMCPFacade(
        base_llm=base_llm,
        mcp_servers_config=mcp_servers_config,
        additional_tools=additional_tools
    )
