import asyncio
# 解析JSON响应
import json
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List, Dict, Any, Optional, Union

from langchain.tools import BaseTool
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.messages import SystemMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langgraph.graph import StateGraph
from langgraph.graph.state import CompiledStateGraph

from config.logging_config import get_logger
# 导入全局工具字典管理
from constant.tool_dict_config import toolDictConfig
from emb import BaseEmb
from emb.EmbFactory import EmbFactory
from llm.LLMFactory import LLMFactory
from mcp_tools.mcpFactory import MCPFactory
from memory.MemoryFactory import MemoryFactory
from modulerag.ragFactory import RAGFactory, ModularRAG
# 导入提示词管理类
from prompt.sys_prompt import SystemPrompts
from tools.ToolFactory import ToolFactory
from utils import JsonUtils
# 导入MCP装饰器
from .decorators.mcpDecorator import mcp_enabled

logger = get_logger(__name__)


@mcp_enabled
class BaseAgent(ABC):
    """Agent基类，定义所有Agent必须实现的接口"""

    def __init__(
            self,
            agent_name: str,
            llm_code: str,
            embedding_code: str,
            agent_id: str = None,
            scene_id: str = None,
            memory_config: Dict[str, Any] = None,
            tools_list: List[Union[str, Dict[str, Any]]] = None,
            mcp_list: List[Union[str, Dict[str, Any]]] = None,
            rag_list: List[Dict[str, Any]] = None,
            sys_prompt: str = "",
            llm_params: Dict[str, Any] = None,
            embedding_params: Dict[str, Any] = None
    ):
        self.agent_name = agent_name
        self.agent_id = agent_id
        self.scene_id = scene_id
        self.sys_prompt = sys_prompt

        # 使用工厂类创建组件
        self.llm = self._create_llm(llm_code, llm_params or {})
        self.embedding = self._create_embedding(embedding_code, embedding_params or {})
        self.memory_manager = self._create_memory_system(memory_config or {})

        # 创建工具和组件
        self.tools = self._create_tools_from_global_dict(tools_list or [])
        self.mcps = self._create_mcps(mcp_list or [])
        self.rags = self._create_rags(rag_list or [])
        
        # MCP状态管理 - 记录通过add_mcp添加的MCP列表，用于后续清理
        self.add_mcp_list = []

        # 构建工作流图
        self.graph = self._build_graph()

    def _create_tools_from_global_dict(self, tools_list: List[Union[str, Dict[str, Any]]]) -> List[BaseTool]:
        """
        从全局工具字典创建工具列表，优先使用toolDictConfig中的信息
        """
        try:
            print(f"🔧 创建工具列表，输入工具数量: {len(tools_list)}")

            # 如果工具字典未初始化，使用传统方法
            if not toolDictConfig.is_initialized():
                print("⚠️ 全局工具字典未初始化，使用传统ToolFactory方法")
                return ToolFactory.create_tools_from_list(tools_list)

            # 使用全局工具字典的信息
            base_tools_dict = toolDictConfig.get_base_tools_dict()
            print(f"📦 可用基础工具数量: {len(base_tools_dict)}")

            created_tools = []

            for tool_item in tools_list:
                category_code = tool_item if isinstance(tool_item, str) else tool_item.get('tool_name', '')

                # 遍历 base_tools_dict，查找匹配的 server_code
                for tool_id, tool_info in base_tools_dict.items():
                    if tool_info.get('server_code') == category_code:
                        tool_name = tool_info.get('tool_name')
                        # 使用匹配到的工具信息创建工具
                        tool = ToolFactory.create_single_tool(category_code, tool_name)
                        if tool:
                            created_tools.append(tool)
                            print(f"   ✅ 工具创建成功: {tool.name}")
                        else:
                            print(f"   ❌ 工具创建失败: {category_code}")

            print(f"🔧 工具创建完成，成功创建 {len(created_tools)} 个工具")
            return created_tools

        except Exception as e:
            print(f"❌ 从全局字典创建工具失败: {e}")
            print("🔄 回退到传统ToolFactory方法")
            return ToolFactory.create_tools_from_list(tools_list)

    def get_tools_info_from_global_dict(self) -> Dict[str, Any]:
        """从全局工具字典获取工具信息"""
        try:
            if not toolDictConfig.is_initialized():
                return {"initialized": False, "error": "全局工具字典未初始化"}

            base_tools_dict = toolDictConfig.get_base_tools_dict()
            mcp_tools_dict = toolDictConfig.get_mcp_tools_dict()

            # 提取当前使用的工具信息
            used_base_tools = {tool.name: base_tools_dict[tool.name]
                               for tool in (self.tools or [])
                               if tool.name in base_tools_dict}

            used_mcp_tools = {getattr(mcp, 'mcp_name', ''): getattr(mcp, 'global_dict_tools')
                              for mcp in (self.mcps or [])
                              if hasattr(mcp, 'global_dict_tools')}

            return {
                "initialized": True,
                "stats": toolDictConfig.get_stats(),
                "used_base_tools": used_base_tools,
                "used_mcp_tools": used_mcp_tools,
                "available_base_tools_count": len(base_tools_dict),
                "available_mcp_tools_count": len(mcp_tools_dict)
            }
        except Exception as e:
            return {"initialized": False, "error": str(e)}

    def _create_llm(self, llm_code: str, params: Dict[str, Any]) -> Optional[BaseChatModel]:
        """创建LLM实例"""
        llm_wrapper = LLMFactory.create_llm(llm_code, **params)
        return llm_wrapper.get_llm_instance() if llm_wrapper else None

    def _create_embedding(self, embedding_code: str, params: Dict[str, Any]) -> Optional[BaseEmb]:
        """创建向量模型实例"""
        return EmbFactory.create_embedding_model(embedding_code, **params)

    def setSceneId(self, new_scene_id: str):
        """设置新的场景ID"""
        try:
            old_scene_id = self.scene_id
            self.scene_id = new_scene_id
            logger.info(f"🔄 Agent场景ID已更新: {old_scene_id} -> {new_scene_id}")

            # 如果有记忆管理器，也需要更新记忆管理器中的场景ID
            if self.memory_manager and hasattr(self.memory_manager, 'update_scene_id'):
                try:
                    self.memory_manager.update_scene_id(new_scene_id)
                    logger.info(f"✅ 记忆管理器场景ID已同步更新")
                except Exception as e:
                    logger.warning(f"⚠️ 更新记忆管理器场景ID失败: {e}")

            return True
        except Exception as e:
            logger.error(f"❌ 设置场景ID失败: {e}")
            return False

    def _create_mcps(self, mcp_list: List[Union[str, Dict[str, Any]]]) -> List[str]:
        """创建MCP连接列表，返回连接成功的服务器名称"""
        if not mcp_list:
            return []

        # 如果装饰器提供了方法，使用装饰器的方法
        if hasattr(self, '_create_mcps_from_global_dict'):
            return self._create_mcps_from_global_dict(mcp_list)
        else:
            return []

    def _create_rags(self, rag_list: List[Dict[str, Any]]) -> List[ModularRAG]:
        """创建RAG列表"""
        return RAGFactory.create_rags_from_list(rag_list, self.embedding) if self.embedding else []

    def set_memory_manager(self, memory_manager):
        """设置记忆管理器"""
        self.memory_manager = memory_manager

    def _create_memory_system(self, memory_config: Dict[str, Any]):
        """创建记忆系统（简化版）"""
        try:
            # 获取配置参数，优先使用实例属性
            scene_id = memory_config.get("scene_id") or self.scene_id
            agent_id = memory_config.get("agent_id") or self.agent_id
            session_id = memory_config.get("session_id")
            user_id = memory_config.get("user_id", "1")

            logger.info(f"🧠 初始化记忆系统: scene_id={scene_id}, agent_id={agent_id}")

            # 检查是否有必要的参数
            if not scene_id or not agent_id:
                logger.warning(f"⚠️ 记忆系统参数不完整: scene_id={scene_id}, agent_id={agent_id}")
                logger.info("🔄 将创建简单LangChain记忆作为备用")

                # 创建简单的LangChain记忆作为备用
                try:
                    simple_memory = MemoryFactory._create_langchain_memory("buffer")
                    if simple_memory:
                        logger.info("✅ 创建简单记忆成功")
                        return simple_memory
                except Exception as simple_error:
                    logger.warning(f"⚠️ 创建简单记忆也失败: {simple_error}")

                return None

            # 获取数据库会话
            db_session = None
            try:
                from config.database import get_db_session
                db_session = next(get_db_session())

                # 使用MemoryFactory创建完整记忆系统
                memory_result = MemoryFactory.create_from_config(
                    config=memory_config,
                    db_session=db_session,
                    scene_id=scene_id,
                    agent_id=agent_id,
                    session_id=session_id,
                    user_id=user_id,
                    agent=self  # 传递Agent实例，自动设置记忆管理器
                )

                if memory_result:
                    # 检查是否是MemoryManager实例
                    if hasattr(memory_result, 'get_memory_stats'):
                        try:
                            stats = memory_result.get_memory_stats()
                            memory_types = stats.get('memory_types', [])
                            logger.info(f"✅ 记忆系统初始化成功: {memory_types}")

                            # 提示Mem0状态
                            if 'mem0' not in memory_types:
                                logger.warning("⚠️ Mem0记忆不可用，仅使用数据库和LangChain记忆")
                        except Exception as stats_error:
                            logger.warning(f"⚠️ 获取记忆统计失败: {stats_error}")
                    else:
                        logger.info("✅ LangChain记忆初始化成功")

                    return memory_result
                else:
                    logger.warning("⚠️ 记忆系统创建返回None")
                    return None

            except Exception as e:
                logger.error(f"❌ 创建记忆系统失败: {e}")
                return None
            finally:
                # 确保数据库会话关闭
                if db_session:
                    try:
                        db_session.close()
                    except Exception as close_error:
                        logger.warning(f"⚠️ 关闭数据库会话失败: {close_error}")

        except Exception as e:
            logger.error(f"❌ 记忆系统初始化失败: {e}")
            return None

    def process_intelligent_memory_workflow(
            self,
            current_input: str,
            custom_config: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        记忆处理工作流
        
        Args:
            current_input: 当前用户输入
            custom_config: 自定义配置参数
            
        Returns:
            包含所有处理结果的字典
        """
        try:

            # 默认配置
            config = {
                "max_history_messages": 10,
                "include_history": True
            }

            # 合并自定义配置
            if custom_config:
                config.update(custom_config)

            # 初始化结果
            result = {
                "success": False,
                "history": [],
                "prompt": None,
                "chain_input": {},
                "error": None
            }

            # 获取对话历史
            history = []
            if config.get("include_history", True):
                history = self._get_conversation_history(config["max_history_messages"])

            result["history"] = history

            # 构建提示词
            if history and config.get("include_history", True):
                # 转换历史消息为LangChain格式
                history_messages = self._convert_history_to_messages(history)

                # 创建包含历史的提示词
                prompt = ChatPromptTemplate.from_messages([
                    SystemMessage(content=self.sys_prompt or "你是一个乐于助人的AI助手。"),
                    MessagesPlaceholder(variable_name="chat_history"),
                    HumanMessage(content="{input}")
                ])

                chain_input = {
                    "chat_history": history_messages,
                    "input": current_input
                }

                logger.info(f"✅ 创建包含 {len(history_messages)} 条历史消息的提示词")
            else:
                # 创建简单提示词
                prompt = ChatPromptTemplate.from_messages([
                    SystemMessage(content=self.sys_prompt or "你是一个乐于助人的AI助手。"),
                    HumanMessage(content="{input}")
                ])

                chain_input = {"input": current_input}
                logger.info("✅ 创建简单提示词（无历史消息）")

            result["prompt"] = prompt
            result["chain_input"] = chain_input
            result["success"] = True

            return result

        except Exception as e:
            logger.error(f"❌ 记忆处理工作流失败: {e}")
            result["error"] = str(e)
            result["success"] = False

            # 返回最基本的结果
            try:

                result["prompt"] = ChatPromptTemplate.from_messages([
                    SystemMessage(content=self.sys_prompt or "你是一个乐于助人的AI助手。"),
                    HumanMessage(content="{input}")
                ])
                result["chain_input"] = {"input": current_input}
                logger.info("🔄 使用备用简单提示词")
            except Exception as final_error:
                logger.error(f"❌ 创建备用提示词也失败: {final_error}")

            return result

    def _get_conversation_history(self, limit: int = 10) -> List[Dict[str, Any]]:
        """获取对话历史"""
        history = []

        # 尝试从记忆管理器获取
        if self.memory_manager and hasattr(self.memory_manager, 'get_conversation_history'):
            try:
                history = self.memory_manager.get_conversation_history(limit=limit)
                logger.info(f"✅ 从记忆管理器获取了 {len(history)} 条历史消息")
                return history
            except Exception as e:
                logger.warning(f"⚠️ 从记忆管理器获取历史失败: {e}")

        # 回退到LangChain记忆
        if hasattr(self, 'memory') and self.memory:
            try:
                from langchain_core.messages import HumanMessage, AIMessage
                memory_vars = self.memory.load_memory_variables({})
                if 'chat_history' in memory_vars:
                    langchain_messages = memory_vars['chat_history']
                    if isinstance(langchain_messages, list):
                        for msg in langchain_messages:
                            if isinstance(msg, HumanMessage):
                                history.append({"role": "user", "content": msg.content})
                            elif isinstance(msg, AIMessage):
                                history.append({"role": "assistant", "content": msg.content})
                logger.info(f"✅ 从LangChain记忆获取了 {len(history)} 条历史消息")
            except Exception as e:
                logger.warning(f"⚠️ 从LangChain记忆获取历史失败: {e}")

        return history[-limit:] if history else []

    def _convert_history_to_messages(self, history: List[Dict[str, Any]]) -> List:
        """转换历史消息为LangChain消息格式"""

        messages = []
        for msg in history:
            role = msg.get("role", "")
            content = msg.get("content", "")

            if role == "user" and content:
                messages.append(HumanMessage(content=content))
            elif role == "assistant" and content:
                messages.append(AIMessage(content=content))

        return messages

    def _add_conversation_to_memory(self, user_message: str, ai_message: str) -> bool:
        """添加对话到记忆系统"""
        try:
            # 1. 优先使用记忆管理器
            if self.memory_manager and hasattr(self.memory_manager, 'add_message'):
                try:
                    success = self.memory_manager.add_message(user_message, ai_message)
                    if success:
                        logger.info("✅ 记忆更新成功: 使用记忆管理器")
                        return True
                except Exception as e:
                    logger.warning(f"⚠️ 记忆管理器添加失败: {e}")

            # 3. 如果记忆管理器有LangChain记忆组件
            if (self.memory_manager and
                    hasattr(self.memory_manager, 'get_langchain_memory')):
                try:
                    langchain_memory = self.memory_manager.get_langchain_memory()
                    if langchain_memory:
                        langchain_memory.save_context({"input": user_message}, {"output": ai_message})
                        logger.info("✅ 记忆更新成功: 使用记忆管理器的LangChain组件")
                        return True
                except Exception as e:
                    logger.warning(f"⚠️ 记忆管理器LangChain组件添加失败: {e}")

            logger.warning("⚠️ 没有可用的记忆系统")
            return False

        except Exception as e:
            logger.error(f"❌ 记忆添加失败: {e}")
            return False

    @abstractmethod
    def _build_graph(self) -> CompiledStateGraph:
        """构建Agent的工作流图"""
        pass

    @abstractmethod
    def _define_nodes(self, workflow: StateGraph) -> Dict[str, Any]:
        """定义工作流中的节点"""
        pass

    @abstractmethod
    def _define_edges(self, workflow: StateGraph) -> List[tuple]:
        """定义节点之间的连接关系"""
        pass

    @abstractmethod
    def process(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
        """处理输入数据并返回结果"""
        pass

    def add_tool(self, tool: BaseTool):
        """添加工具"""
        self.tools.append(tool)

    def add_mcp(self, mcp_list: List[Union[str, Dict[str, Any]]]):
        """添加MCP服务器名称"""
        # 先删除之前添加的MCP
        self.del_add_mcp()
        
        if hasattr(self, '_create_mcps_from_global_dict'):
            connected_servers = self._create_mcps_from_global_dict(mcp_list)
            # 将连接成功的服务器名称逐个添加到mcps列表中
            for server_name in connected_servers:
                if server_name not in self.mcps:
                    self.mcps.append(server_name)
            
            # 记录本次添加的MCP列表到实例属性中
            self.add_mcp_list = connected_servers.copy()
            logger.info(f"✅ 已记录本次添加的MCP列表: {connected_servers}")

    def del_add_mcp(self):
        """删除之前通过add_mcp添加的MCP服务器"""
        try:
            if self.add_mcp_list:
                # 从self.mcps中移除之前添加的MCP
                for server_name in self.add_mcp_list:
                    if server_name in self.mcps:
                        self.mcps.remove(server_name)
                        logger.info(f"🗑️ 已从mcps中移除: {server_name}")
                
                logger.info(f"✅ 已清理之前添加的{len(self.add_mcp_list)}个MCP服务器")
                # 清空记录
                self.add_mcp_list = []
            else:
                logger.info("ℹ️ 没有需要清理的之前添加的MCP服务器")
                
        except Exception as e:
            logger.error(f"❌ 清理之前添加的MCP失败: {e}")
            # 发生错误时也要清空记录，避免状态不一致
            self.add_mcp_list = []

    def get_add_mcp_list(self) -> List[str]:
        """获取当前通过add_mcp添加的MCP服务器列表"""
        return self.add_mcp_list.copy()
    
    def has_add_mcp(self) -> bool:
        """检查是否有通过add_mcp添加的MCP服务器"""
        return len(self.add_mcp_list) > 0
    
    def clear_add_mcp(self):
        """清空add_mcp记录但不删除实际的MCP连接"""
        self.add_mcp_list = []
        logger.info("🧹 已清空add_mcp记录")

    def add_rag(self, rag: ModularRAG):
        """添加RAG"""
        self.rags.append(rag)

    def get_config(self) -> Dict[str, Any]:
        """获取Agent配置信息"""
        return {
            "agent_name": self.agent_name,
            "agent_id": self.agent_id,
            "scene_id": self.scene_id,
            "tools_count": len(self.tools),
            "mcp_count": len(self.mcps),
            "add_mcp_count": len(self.add_mcp_list),
            "add_mcp_list": self.add_mcp_list.copy(),
            "sys_prompt": self.sys_prompt,
            "llm_available": self.llm is not None,
            "embedding_available": self.embedding is not None,
            "memory_available": self.memory_manager is not None,
            "global_tools_dict": self.get_tools_info_from_global_dict()
        }

    def get_available_components(self) -> Dict[str, Dict[str, str]]:
        """获取所有可用的组件类型"""
        return {
            "memory_types": MemoryFactory.get_available_types(),
            "embedding_types": EmbFactory.get_supported_models(),
            "llm_types": LLMFactory.get_available_strategies(),
            "tool_types": ToolFactory.get_available_types(),
            "mcp_types": MCPFactory.get_available_types(),
            "rag_types": RAGFactory.get_available_types()
        }

    def log_component_status(self):
        """记录组件状态的公共方法"""
        logger.info(f"📊 {self.__class__.__name__}组件状态检查:")
        logger.info(f"   - Agent名称: {self.agent_name}")
        logger.info(f"   - Agent ID: {self.agent_id}")
        logger.info(f"   - Scene ID: {self.scene_id}")

        # 检查LLM
        if self.llm:
            logger.info(f"   ✅ LLM: 已配置 ({type(self.llm).__name__})")
        else:
            logger.warning("   ❌ LLM: 未配置")

        # 检查嵌入模型
        if self.embedding:
            logger.info(f"   ✅ 嵌入模型: 已配置 ({type(self.embedding).__name__})")
        else:
            logger.warning("   ❌ 嵌入模型: 未配置")

        # 检查工具
        if self.tools:
            logger.info(f"   ✅ 工具: {len(self.tools)} 个")
            for i, tool in enumerate(self.tools[:3]):  # 只显示前3个
                logger.info(f"      {i + 1}. {tool.name}: {getattr(tool, 'description', '无描述')[:50]}...")
            if len(self.tools) > 3:
                logger.info(f"      ... 还有 {len(self.tools) - 3} 个工具")
        else:
            logger.warning("   ❌ 工具: 无可用工具")

        # 检查RAG
        if self.rags:
            logger.info(f"   ✅ RAG: {len(self.rags)} 个")
            for i, rag in enumerate(self.rags[:2]):  # 只显示前2个
                logger.info(f"      {i + 1}. RAG模块已配置")
            if len(self.rags) > 2:
                logger.info(f"      ... 还有 {len(self.rags) - 2} 个RAG模块")
        else:
            logger.warning("   ❌ RAG: 无可用RAG")

        # 检查MCP
        if self.mcps:
            logger.info(f"   ✅ MCP: {len(self.mcps)} 个")
            for i, mcp in enumerate(self.mcps[:2]):  # 只显示前2个
                mcp_name = getattr(mcp, 'mcp_name', f'MCP_{i + 1}')
                logger.info(f"      {i + 1}. {mcp_name}")
            if len(self.mcps) > 2:
                logger.info(f"      ... 还有 {len(self.mcps) - 2} 个MCP")
        else:
            logger.warning("   ❌ MCP: 无可用MCP")

        # 检查记忆
        if self.memory_manager:
            logger.info(f"   ✅ 记忆: 已配置 ({type(self.memory_manager).__name__})")
            # 显示记忆统计
            try:
                stats = self.memory_manager.get_memory_stats()
                logger.info(f"      - 记忆类型: {stats.get('memory_types', [])}")
            except Exception:
                pass
        else:
            logger.warning("   ❌ 记忆: 未配置")

        # 检查图（对于使用工作流的Agent）
        if hasattr(self, 'graph') and self.graph:
            logger.info("   ✅ 工作流图: 已构建")
        elif hasattr(self, 'graph'):
            logger.warning("   ❌ 工作流图: 未构建")

        # 显示全局工具字典状态
        global_tools_info = self.get_tools_info_from_global_dict()
        if global_tools_info.get("initialized"):
            stats = global_tools_info.get("stats", {})
            logger.info("   📚 全局工具字典: 已初始化")
            logger.info(f"      - 基础工具: {stats.get('base_tools_count', 0)} 个")
            logger.info(f"      - MCP工具: {stats.get('mcp_tools_count', 0)} 个")
        else:
            logger.warning("   ❌ 全局工具字典: 未初始化")

        # 系统提示词状态
        if self.sys_prompt:
            prompt_length = len(self.sys_prompt)
            logger.info(f"   ✅ 系统提示词: 已配置 ({prompt_length} 字符)")
        else:
            logger.warning("   ❌ 系统提示词: 未配置")

    def _invoke_llm_with_tools(self, prompt_text: str, custom_system_prompt: str = None,
                               context: Dict[str, Any] = None) -> Any:
        """
        智能调用LLM，能够自动判断是否需要使用工具/MCP并执行相应操作
        
        Args:
            prompt_text: 用户输入的提示词文本
            custom_system_prompt: 自定义系统提示词，如果不提供则使用智能提示词
            context: 上下文信息
        """
        if not self.llm:
            raise ValueError("LLM未初始化，无法调用")

        try:
            # 构建智能系统提示词
            if custom_system_prompt:
                base_sys = self._create_enhanced_system_prompt(context)
                enhanced_system_prompt = f"{base_sys}\n{custom_system_prompt}"
            else:
                enhanced_system_prompt = self._create_enhanced_system_prompt(context)

            # 创建提示词模板
            prompt = ChatPromptTemplate.from_messages([
                SystemMessage(content=enhanced_system_prompt),
                HumanMessage(content=prompt_text)
            ])

            # 使用绑定工具的LLM
            target_llm = self.llm.bind_tools(self.tools)
            chain = prompt | target_llm

            # 执行调用
            result = chain.invoke({})

            # 解析智能响应并执行相应操作
            processed_result = self._process_intelligent_response(result, context)

            # 记录调用信息
            logger.debug(f"🤖 智能LLM调用完成，响应类型: {type(processed_result)}")

            return processed_result

        except Exception as e:
            logger.error(f"❌ 智能LLM调用失败: {e}")
            raise

    def _process_intelligent_response(self, llm_response, context: Dict[str, Any] = None) -> Dict[str, Any]:
        """
        处理LLM的智能响应，仅解析不执行，具体执行交给实现类的工作流处理
        
        Args:
            llm_response: LLM的原始响应
            context: 上下文信息
            
        Returns:
            解析后的响应字典，包含action和相关参数
        """
        try:
            # 提取响应文本
            if hasattr(llm_response, 'content'):
                response_text = llm_response.content
            else:
                response_text = str(llm_response)

            content_cleaned = JsonUtils.clean_json_content(response_text)
            parsed_response = json.loads(content_cleaned)

            action = parsed_response.get("action", "direct_answer")

            logger.info(f"🎯 LLM决策解析: {action}")

            # 直接返回解析结果，不执行具体操作
            result = {
                "action": action,
                "success": True,
                "parse_success": True
            }

            # 根据action类型添加相应参数，使用包含判断提高识别率
            if "tool" in action.lower():
                result.update({
                    "tool_name": parsed_response.get("tool_name", ""),
                    "tool_params": parsed_response.get("tool_params", {}),
                    "reasoning": parsed_response.get("reasoning", "")
                })
            elif "mcp" in action.lower():
                result.update({
                    "server_name": parsed_response.get("server_name", ""),
                    "tool_name": parsed_response.get("tool_name", ""),
                    "mcp_params": parsed_response.get("mcp_params", {}),
                    "reasoning": parsed_response.get("reasoning", "")
                })
            elif "rag" in action.lower():
                result.update({
                    "rag_name": parsed_response.get("rag_name", ""),
                    "rag_params": parsed_response.get("rag_params", {}),
                    "query": parsed_response.get("query", ""),
                    "reasoning": parsed_response.get("reasoning", "")
                })
            elif "answer" in action.lower():
                result.update({
                    "content": parsed_response.get("content", ""),
                    "reasoning": parsed_response.get("reasoning", "")
                })
            else:
                result.update({
                    "content": parsed_response.get("content", ""),
                    "reasoning": parsed_response.get("reasoning", "")
                })

            return result

        except Exception as e:

            logger.warning(f"⚠️ 解析智能响应失败: {e}，返回原始响应")

            # 如果解析失败，返回原始响应
            response_text = llm_response.content if hasattr(llm_response, 'content') else str(llm_response)
            return {
                "action": "direct_answer",
                "content": response_text,
                "reasoning": "响应解析失败，返回原始内容",
                "success": True,
                "parse_success": False,
                "parse_error": str(e)
            }

    def _execute_tool_action(self, parsed_response: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]:
        """执行工具调用操作"""
        try:
            tool_name = parsed_response.get("tool_name")
            tool_params = parsed_response.get("tool_params", {})
            reasoning = parsed_response.get("reasoning", "")

            if not tool_name:
                return {
                    "action": "use_tool",
                    "error": "未指定工具名称",
                    "success": False
                }

            # 查找工具
            target_tool = None
            for tool in self.tools:
                if tool.name == tool_name:
                    target_tool = tool
                    break

            if not target_tool:
                return {
                    "action": "use_tool",
                    "error": f"未找到工具: {tool_name}",
                    "success": False
                }

            # 执行工具
            logger.info(f"🔧 执行工具: {tool_name}，参数: {tool_params}")
            result = self._safe_invoke_tool(target_tool, tool_params)

            return {
                "action": "use_tool",
                "tool_name": tool_name,
                "tool_params": tool_params,
                "tool_result": result,
                "reasoning": reasoning,
                "success": True
            }

        except Exception as e:

            logger.error(f"❌ 工具执行失败: {e}")

            return {
                "action": "use_tool",
                "tool_name": parsed_response.get("tool_name", "unknown"),
                "error": str(e),
                "success": False
            }

    def _execute_mcp_action(self, parsed_response: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]:
        """执行MCP调用操作"""
        try:
            server_name = parsed_response.get("server_name")
            tool_name = parsed_response.get("tool_name")
            mcp_params = parsed_response.get("mcp_params", {})
            reasoning = parsed_response.get("reasoning", "")

            if not server_name:
                return {
                    "action": "use_mcp",
                    "error": "未指定MCP服务名称",
                    "success": False
                }

            logger.info(f"🔌 执行MCP服务: {server_name}:{tool_name}，参数: {mcp_params}")

            # 将参数字典转换为JSON字符串
            mcp_params_json = json.dumps(mcp_params) if mcp_params else "{}"

            # 使用异步调用MCPFactory.call_mcp_tool
            try:
                # 检查是否在事件循环中
                loop = asyncio.get_event_loop()
                if loop.is_running():
                    # 在已有事件循环中，使用run_in_executor
                    import concurrent.futures
                    with concurrent.futures.ThreadPoolExecutor() as executor:
                        future = executor.submit(
                            lambda: asyncio.run(MCPFactory.call_mcp_tool(server_name, tool_name, mcp_params_json))
                        )
                        result = future.result(timeout=30)
                else:
                    # 没有运行的事件循环，直接运行
                    result = loop.run_until_complete(MCPFactory.call_mcp_tool(server_name, tool_name, mcp_params_json))
            except RuntimeError:
                # 没有事件循环，创建新的
                result = asyncio.run(MCPFactory.call_mcp_tool(server_name, tool_name, mcp_params_json))

            return {
                "action": "use_mcp",
                "server_name": server_name,
                "tool_name": tool_name,
                "mcp_params": mcp_params,
                "mcp_result": result,
                "reasoning": reasoning,
                "success": True
            }

        except Exception as e:
            logger.error(f"❌ MCP执行失败: {e}")

            return {
                "action": "use_mcp",
                "server_name": parsed_response.get("server_name", "unknown"),
                "tool_name": parsed_response.get("tool_name", "unknown"),
                "error": str(e),
                "success": False
            }

    def _safe_invoke_tool(self, tool, tool_params: Dict[str, Any]):
        """安全地调用工具，支持同步和异步调用"""
        try:
            # 首先尝试异步调用
            if hasattr(tool, 'ainvoke'):
                try:
                    import asyncio
                    return asyncio.run(tool.ainvoke(tool_params))
                except Exception as async_error:

                    logger.warning(f"⚠️ 异步调用失败: {async_error}, 尝试同步调用")

            # 回退到同步调用
            if hasattr(tool, 'invoke'):
                return tool.invoke(tool_params)
            elif hasattr(tool, 'run'):
                # 某些工具可能使用run方法
                return tool.run(tool_params)
            else:
                raise Exception("工具不支持invoke、ainvoke或run方法")

        except Exception as e:
            logger.error(f"❌ 工具调用失败: {str(e)}")
            raise

    def _create_enhanced_system_prompt(self, context: Dict[str, Any] = None) -> str:
        """
        创建智能增强的系统提示词，让LLM能够智能判断是否需要使用工具/MCP并直接返回参数
        
        Args:
            context: 上下文信息，包含用户输入、状态等
        """
        base_prompt = self.sys_prompt or SystemPrompts.base_assistant()

        # 构建Agent上下文信息
        agent_context = self._build_agent_context(context)

        # 使用SystemPrompts创建智能提示词
        return SystemPrompts.build_system_prompt(base_prompt, agent_context)

    def _build_agent_context(self, context: Dict[str, Any] = None) -> Dict[str, Any]:
        """构建Agent上下文信息，用于提示词生成"""
        agent_context = context.copy() if context else {}

        # 获取MCP指导信息
        if hasattr(self, 'get_mcp_tools_prompt_text'):
            try:
                mcp_guidance = self.get_mcp_tools_prompt_text()
                if mcp_guidance:
                    agent_context['mcp_guidance'] = mcp_guidance
            except Exception:
                pass

        # 添加工具信息
        agent_context['tools'] = self.tools or []

        return agent_context

    def _create_base_initial_state(self, input_text: str) -> Dict[str, Any]:
        """创建基础的初始状态 - 通用部分，子类可以扩展"""
        return {
            "input": input_text,
            "original_input": input_text,
            "should_continue": True,
            "iteration_count": 0,
            "max_iterations": 5,
            "final_response": "",
            "success": False,
            "error": ""
        }

    def _create_thread_config(self, prefix: str = "thread") -> Dict[str, Any]:
        """创建线程配置 - 通用方法"""
        return {"configurable": {"thread_id": f"{prefix}_{int(datetime.now().timestamp())}"}}

    def _prepare_input_with_memory_base(self, input_text: str) -> Dict[str, Any]:
        """使用baseAgent的记忆工作流准备输入 - 通用方法"""
        try:
            # 使用baseAgent的智能记忆工作流
            memory_result = self.process_intelligent_memory_workflow(input_text)

            if memory_result.get("success", False):
                # 转换为通用格式
                history = memory_result.get("history", [])

                # 构建记忆上下文
                memory_context = {
                    "recent_history": history[-5:],  # 取最近5条
                    "total_history_count": len(history),
                    "memory_summary": memory_result.get("summary", ""),
                    "relevant_context": memory_result.get("relevant_context", [])
                }

                logger.info(f"🧠 使用记忆增强输入，历史记录数: {len(history)}")
                return memory_context
            else:
                logger.warning(f"记忆工作流失败，使用基础输入: {memory_result.get('error')}")

        except Exception as e:
            logger.warning(f"准备记忆输入失败: {e}")

        # 回退到空的记忆上下文
        return {
            "recent_history": [],
            "total_history_count": 0,
            "memory_summary": "",
            "relevant_context": []
        }
