"""
“思考-行动-观察”模式的React智能体
"""
import asyncio
import json
import re
from typing import Any, Dict, List
import uuid
import time

from langchain.schema import HumanMessage, AIMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.state import CompiledStateGraph
from langchain_huggingface import HuggingFaceEmbeddings
from langgraph.store.memory import InMemoryStore
from langgraph.types import Command, interrupt
from langgraph.checkpoint.memory import MemorySaver
from langgraph.store.postgres import PostgresStore
from psycopg_pool import ConnectionPool
from psycopg import Connection
from langchain_core.runnables.graph import MermaidDrawMethod
from flashtext import KeywordProcessor
from loguru import logger
from json_repair import repair_json
from threading import RLock
import traceback

from agent.tool_manager import ToolManager
from .agent_state import AgentState, create_initial_state
from .planner import Planner
from .executor2 import PlanExecutor, ExecutionStatus
from .adaptive_replanner import AdaptiveReplanner, AdaptationContext
from .reflection_module import ReflectionModule
from config import Config
from llm.llm_manager import get_llm_manager
from model.episodic_model import Episode
from model.context_model import ReasoningStep
from model.memory_model import MemoryType
from model.planer_model import Plan
from memory import ContextManager
from memory.context_manager import ToolContext
from agent.agent_prompts import (
    decision_prompt_template,
    think_prompt_template,
    compress_prompt_template,
    refine_websearch_prompt_template,
    final_prompt_template,
    semantic_prompt_template,
    zip_prompt_template,
)
from tools import ToolResult


class ReactAgent:
    """改造为线程安全的单例模式"""
    _lock = RLock()
    _instance = None
    def __new__(cls, *args, **kwargs):
        with cls._lock:
            if cls._instance is None:
                cls._instance = super(ReactAgent, cls).__new__(cls)
        return cls._instance

    def __init__(self, verbose: bool = True, mode: str = "hybrid",
                 enable_reflection: bool = True,
                 reflection_quality_threshold: float = 0.7):

        self.verbose = verbose
        self.mode = mode  # 三种模式："react", "plan_execute", or "hybrid"
        self.enable_reflection = enable_reflection

        # 基于向量检索的内存记忆系统
        embedding_model = HuggingFaceEmbeddings(
            model_name=Config.EMBEDDING_MODEL,
            model_kwargs = {'device': 'cpu'},
            encode_kwargs = {'normalize_embeddings': False}
        )
        # 内存记忆
        self.memory_store = InMemoryStore(index={"embed": embedding_model, "dims": Config.EMBEDDING_DIM})

        # # 单连postgres
        # conn = Connection.connect("postgresql://postgres:postgres@127.0.0.1:5432/agent?sslmode=disable",
        #                           autocommit=True, prepare_threshold=0)
        # self.memory_store = PostgresStore(conn=conn, index={"embed": embedding_model, "dims": Config.EMBEDDING_DIM})
        # self.memory_store.setup()

        # # 连接池postgres
        # pool = ConnectionPool(
        #     conninfo="postgresql://postgres:postgres@127.0.0.1:5432/agent?sslmode=disable",
        #     max_size=20,
        #     kwargs={"autocommit": True, "prepare_threshold": 0},
        # )
        # self.memory_store = PostgresStore(conn=pool, index={"embed": embedding_model, "dims": Config.EMBEDDING_DIM})
        # self.memory_store.setup()

        # 工具调用的上下文记忆管理系统
        self.context_manager = ContextManager(self.memory_store)

        self.tool_manager = ToolManager()

        # 初始化规划器和执行器
        self.planner = Planner(self.memory_store)
        self.executor = PlanExecutor(self.memory_store, self.tool_manager)

        # 初始化自适应重规划器，以增强混合方法
        self.adaptive_replanner = AdaptiveReplanner(self.planner, self.tool_manager)

        # 初始化反思模块
        self.reflection_module = ReflectionModule(
            quality_threshold=reflection_quality_threshold,
            max_refinement_iterations=3,
            verbose=self.verbose
        ) if enable_reflection else None

        # 初始化LLM管理
        self.llm_manager = get_llm_manager()

        # 创建流程图
        self.graph = self._create_graph()


    def workflow_image(self):
        """显示流程图"""
        # return self.graph.get_graph().draw_mermaid_png(draw_method=MermaidDrawMethod.PYPPETEER)
        return self.graph.get_graph().draw_mermaid_png()

    def _create_graph(self) -> CompiledStateGraph:
        workflow = StateGraph(AgentState)

        if self.mode == "plan_execute":
            # 三种模式：直接回答、思考模型、规划-执行模式
            workflow.add_node("decide_approach", self._decide_approach_node)
            workflow.add_node("plan", self._plan_node)
            workflow.add_node("execute", self._execute_node)
            workflow.add_node("evaluate_execution", self._evaluate_execution_node)
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)

            workflow.set_entry_point("decide_approach")
            workflow.add_conditional_edges(
                "decide_approach",
                self._route_after_decision,
                {
                    "finish": "finish",
                    "think": "think",
                    "plan": "plan"
                }
            )

            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )
            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")

            workflow.add_edge("plan", "execute")
            workflow.add_edge("execute", "evaluate_execution")
            workflow.add_conditional_edges(
                "evaluate_execution",
                self._should_finish_after_evaluation,
                {
                    "think": "think",
                    "finish": "finish"
                }
            )

            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)

        elif self.mode == "hybrid":
            workflow.add_node("decide_approach", self._decide_approach_node)
            workflow.add_node("plan", self._plan_node)
            workflow.add_node("execute", self._execute_node)
            workflow.add_node("evaluate_execution", self._evaluate_execution_node)
            workflow.add_node("adaptive_replan", self._adaptive_replan_node)
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)

            workflow.set_entry_point("decide_approach")
            workflow.add_conditional_edges(
                "decide_approach",
                self._route_after_decision,
                {
                    "finish": "finish",
                    "plan": "plan",
                    "think": "think"
                }
            )

            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )
            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")

            workflow.add_edge("plan", "execute")
            workflow.add_edge("execute", "evaluate_execution")
            workflow.add_conditional_edges(
                "evaluate_execution",
                self._should_replan_after_evaluation,
                {
                    "adaptive_replan": "adaptive_replan",
                    "think": "think",
                    "finish": "finish"
                }
            )

            workflow.add_conditional_edges(
                "adaptive_replan",
                self._route_after_replan,
                {
                    "plan": "plan",
                    "think": "think",
                    "finish": "finish"
                }
            )

            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)

        else:
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)

            workflow.set_entry_point("think")

            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )

            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")
            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)

        # memory = MemorySaver()
        # return workflow.compile(checkpointer=memory)
        return workflow.compile(store=self.memory_store)

    async def run(
            self, session_id: str = None, query: str=None,
            interrupt_feedback: str = None, history: List[Any] = None, max_steps: int = None
    ):
        if max_steps is None:
            max_steps = Config.MAX_ITERATIONS
        if session_id is None or session_id == "":
            session_id = str(uuid.uuid4())
        self.context_manager.start_session(session_id, query)

        if session_id not in self.tool_manager.user_tools:
            await self.tool_manager.load_session_tools(session_id)

        initial_state = create_initial_state(query, max_steps)
        initial_state["session_id"] = session_id
        initial_state["mode"] = self.mode

        if interrupt_feedback:
            initial_state = Command(resume=interrupt_feedback)

        config = {
            "configurable": {"thread_id": f"react_agent_{session_id}"},
            "recursion_limit": 100
        }
        try:
            start_time = time.time()
            final_state = await self.graph.ainvoke(initial_state, config)
            execution_time = time.time() - start_time

            if self.verbose:
                print(f"Debug: 最终状态：{final_state}")

            current_plan = final_state.get("current_plan") if isinstance(final_state, dict) else None
            if isinstance(current_plan, Plan):
                current_plan = current_plan.model_dump_json()

            response = {
                "input": query,
                "output": final_state.get("output", "No output generated") \
                    if isinstance(final_state, dict) else "No output generated",
                "steps": self._format_steps(final_state),
                "success": not final_state.get("has_error", False) \
                    if isinstance(final_state, dict) else False,
                "error": final_state.get("error_message") \
                    if isinstance(final_state, dict) else f"Invalid state type: {type(final_state)}",
                "metadata": {
                    **(final_state.get("metadata", {}) if isinstance(final_state, dict) else {}),
                    "mode": final_state.get("mode") if isinstance(final_state, dict) else self.mode,
                    "session_id": final_state.get("session_id") \
                        if isinstance(final_state, dict) else session_id,
                    "chosen_approach": final_state.get("chosen_approach") \
                        if isinstance(final_state, dict) else None,
                    "current_plan": current_plan,
                    "execution_time": f'{execution_time:.2f}s'
                }
            }

        except Exception as e:
            logger.error(traceback.format_exc())
            response = {
                "input": query,
                "output": None,
                "steps": [],
                "success": False,
                "error": f"Agent execution failed: {str(e)}",
                "metadata": {"mode": self.mode, "session_id": session_id}
            }

        await self.context_manager.end_session()
        self.llm_manager.cleanup_session(session_id)
        return response

    async def _think_node(self, state: AgentState) -> AgentState:
        """思考节点——生成想法并决定行动。"""
        if self.verbose:
            print(f"\n🤔 Step {state['current_step'] + 1}: 思考中...")

        try:
            prompt = await self._create_thinking_prompt(state)
            messages = [
                HumanMessage(content=prompt)
            ]

            response = await self.llm_manager.ainvoke(messages, state.get('session_id'))
            thought_content = response.content

            if self.verbose:
                print("=" * 80)
                print(f"🔍 想法: ", thought_content)

            # 解析想法，提取可能存在的行动

            thought_dict = repair_json(thought_content, return_objects=True)
            action, action_input = None, {}
            if isinstance(thought_dict, dict):
                action = thought_dict.get("action", None)
                if action:
                   action = re.split("[,;]", str(action).strip())[0]
                action_input = thought_dict.get('action_input', None)
                if not isinstance(action_input, dict):
                    action_input = {}

            thought_content = json.dumps(thought_dict, ensure_ascii=False)

            state["thoughts"].append(thought_content)
            state["current_step"] += 1

            # 将推理步骤存储在上下文管理器中
            reasoning_step = ReasoningStep(
                step_number=state["current_step"],
                thought=thought_content,
                planned_action=action,
                action_input=action_input,
                confidence=0.7
            )
            await self.context_manager.add_reasoning_step(reasoning_step)

            # 若已指定操作，则为该操作做准备
            if action:
                state["actions"].append({
                    "name": action,
                    "input": action_input,
                    "step": state["current_step"]
                })

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"思考失败: {str(e)}"
            return state

    async def _human_feedback_node(self, state: AgentState) -> AgentState:
        human_response = interrupt({"output": state["output"]})
        return human_response["data"]

    async def _act_node(self, state: AgentState) -> AgentState:
        """执行节点"""
        if not state["actions"]:
            return state

        current_action = state["actions"][-1]
        action_name = current_action["name"]
        action_input = current_action["input"]
        session_id = state.get("session_id")
        try:
            # TODO: 如果工具调用在输入不变的情况下，输出始终不变，则可以直接调用记忆的工具调用结果，
            #  但因为某些工具不满足这个条件，例如查询用户信息，或者网络搜索，返回的结果是实时更新的，所以暂不采用调用记忆这种方式。
            if self.verbose:
                print(f"🔧 执行工具: {action_name}，工具输入: {action_input}")

            result = await self.tool_manager.execute_tool(action_name, action_input)

            # TODO: 引入RAG内容检索

            if action_name == "web_search":
                result = await self._refine_websearch_result(session_id, action_input, result)

            # 如果工具调用返回的结果太长，则进行文本压缩.
            if len(str(result.data)) > 1024:
                result = await self._compress_result(session_id, action_input, result)

            if self.verbose:
                print(f"📤 工具调用结果: {result.model_dump_json()[:200]} ...")

            state["tool_results"].append({
                "success": True,
                "tool": action_name,
                "input": action_input,
                "result": result.model_dump(mode='json'),
                "step": state["current_step"]
            })

            # 将重要结果存储在上下文存储器中，以实现会话持久化。
            await self._store_result_in_context(action_name, action_input, result, state)

            # 存储工具调用的上下文
            tool_context = ToolContext(
                tool_name=action_name,
                input_data=action_input,
                output_data=result.data if result.success else None,
                success=result.success,
                error_message=result.error if not result.success else None,
                execution_time=0.0,
                metadata=result.metadata if hasattr(result, 'metadata') else {}
            )
            await self.context_manager.add_tool_context(tool_context)

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"动作执行失败: {str(e)}"
            return state

    async def _compress_result(self, session_id: str, query: str, result: ToolResult) -> ToolResult:
        """压缩过长的工具调用返回结果"""
        if isinstance(result.data, list):
            contents = [str(item) for item in result.data]
        else:
            contents = str(result.data)
        compress_prompt = compress_prompt_template.format(
            inputs={
                'query': query,
                'contents': contents,
                'results_limit': 1024,
            }
        )
        messages = [
            HumanMessage(content=compress_prompt)
        ]
        result.data = await self.llm_manager.ainvoke(messages, session_id=session_id)
        return result

    async def _observe_node(self, state: AgentState) -> AgentState:
        """观察节点进程工具的结果，并创建观察记录。"""

        if not state["tool_results"]:
            return state

        current_result = state["tool_results"][-1]
        tool_result = current_result["result"]

        if tool_result["success"]:
            observation = f"工具 {current_result['tool']} 调用成功. Result:{tool_result['data']}"
        else:
            observation = f"工具 {current_result['tool']} 调用失败. 错误: {tool_result['error']}"

        state["observations"].append(observation)

        if self.verbose:
            print(f"👁️ 观察结果: {str(observation)[:100]}...")

        return state

    async def _finish_node(self, state: AgentState) -> AgentState:
        """完成节点 - 生成最终输出。"""
        try:
            # # 检查输出是否已在思考阶段设置
            # if state.get("output") and state.get("is_complete"):
            #     if self.verbose:
            #         print(f"🎯 最终回答: {state['output']}")
            #     return state

            # 最终答案生成提示词
            prompt = await self._create_final_answer_prompt(state)
            messages = [
                HumanMessage(content=prompt)
            ]
            response = await self.llm_manager.ainvoke(messages, state.get('session_id'))
            final_answer = response.content

            answer_match = re.search(r'Final Answer:\s*(.+)', final_answer, re.IGNORECASE | re.DOTALL)
            if answer_match:
                final_answer = answer_match.group(1).strip()

            state["output"] = final_answer
            state["is_complete"] = True

            if self.verbose:
                print(f"🎯 最终回答: {final_answer}")

            # 存储场景对话
            if not state.get("has_error", False):
                await self._store_success_episode(state)

            # 记忆session相关的事实
            messages.append(
                AIMessage(content=final_answer)
            )
            try:
                await self._store_fact_memory(state['session_id'], state['input'], messages)
            except Exception as e:
                logger.error(traceback.format_exc())
                if self.verbose:
                    print(f"存储事实记忆失败: {e}")

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"最终回答生成失败: {str(e)}"

        # TODO：前面问答的对话信息压缩
        try:
            await self._zip_conversation(state)
        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"对话信息压缩失败: {e}")
        return state

    async def _zip_conversation(self, state: AgentState):

        # 最近20的对话记录，倒序存储。
        conversation_memory = await self.memory_store.aget(
            (MemoryType.SHORT_TERM.value, state.get('session_id')),
            key='conversations'
        )
        if conversation_memory is not None:
            conversations = json.loads(conversation_memory.value['conversations'])
        else:
            conversations = []
        conversations.insert(0,
            {
                "user_query": state["input"],
                "ai_response": state["output"]
            }
        )
        conversations = conversations[:20]
        await self.memory_store.aput(
            (MemoryType.SHORT_TERM.value, state.get('session_id')),
            key='conversations',
            value={
                'conversations': json.dumps(conversations, ensure_ascii=False)
            }
        )

        # 压缩会话
        word_threshold = 5000
        word_count = 0
        conversations_for_zip = []
        for conversation in conversations:
            word_count += len(json.dumps(conversation, ensure_ascii=False))
            if word_count >= word_threshold:
                break
            conversations_for_zip.append(conversation)

        zip_prompt = zip_prompt_template.format(
            inputs={
                'conversations': conversations_for_zip[::-1]
            },
            remove_template_variables=True
        )
        response = await self.llm_manager.ainvoke([HumanMessage(content=zip_prompt)], state.get('session_id'))

        await self.memory_store.aput(
            (MemoryType.SUMMARY.value, state.get('session_id')),
            key='summary',
            value={
                'summary': response.content
            }
        )

    async def _store_fact_memory(self, session_id: str, query: str, messages: List):
        similar_memories = await self.memory_store.asearch(
            (MemoryType.SEMANTIC.value, session_id),
            query=query,
            limit=5
        )
        similar_memories = similar_memories or []
        similar_semantics = [memory.value['semantic'] for memory in similar_memories if memory is not None]
        semantic_prompt = semantic_prompt_template.format(
            inputs={
                'similar_semantics': similar_semantics,
                'messages': str(messages),
            },
            remove_template_variables=True
        )
        msg = [
            HumanMessage(content=semantic_prompt)
        ]
        response = await self.llm_manager.ainvoke(msg, session_id)

        json_dict = repair_json(response.content, return_objects=True)
        if not isinstance(json_dict, dict) or 'facts' not in json_dict:
            return

        new_semantics = json_dict['facts']
        if not isinstance(new_semantics, list):
            return
        new_semantics = [semantic for semantic in new_semantics if isinstance(semantic, dict)]
        if len(new_semantics) == 0:
            return
        store_tasks = [self._store_semantic(session_id, query, semantic) for semantic in new_semantics]
        await asyncio.gather(*store_tasks)

    async def _store_semantic(self, session_id: str, query: str, semantic: Dict):
        """存储单项事实"""
        fact_id = semantic.get('fact_id', '')
        operation = semantic.pop('operation', None)
        if operation == '新增':
            fact_id = str(uuid.uuid4())
            semantic['fact_id'] = fact_id
        semantic.pop('操作理由说明', None)

        if operation in ['新增', '修改']:
            await self.memory_store.aput(
                (MemoryType.SEMANTIC.value, session_id),
                key=str(fact_id),
                value={
                    'query': query,
                    'semantic': json.dumps(semantic, ensure_ascii=False)
                },
                index=['query']
            )
        elif operation == '删除':
            self.memory_store.delete(
                (MemoryType.SEMANTIC.value, session_id),
                key=str(fact_id)
            )
        else:
            pass

    async def _store_success_episode(self, state: AgentState):
        steps = self._format_steps(state)
        success = not state.get("has_error", False)

        current_plan = state.get("current_plan") if isinstance(state, dict) else None
        if isinstance(current_plan, Plan):
            current_plan = current_plan.model_dump_json()

        episode = Episode(
            id=state.get("session_id"),
            query=state.get("input"),
            response=state.get("output", "无"),
            reasoning_steps=steps,
            tools_used=list(set(step.get("action", "") for step in steps if step.get("action"))),
            success=success,
            duration=0.0,
            timestamp=time.time(),
            importance=0.7,
            metadata={
                **(state.get("metadata", {}) if isinstance(state, dict) else {}),
                "mode": state.get("mode") if isinstance(state, dict) else self.mode,
                "session_id": state.get("session_id"),
                "chosen_approach": state.get("chosen_approach") \
                    if isinstance(state, dict) else None,
                "current_plan": current_plan,
                "execution_time": f'{0.0:.2f}s'
            }
        )
        await self.memory_store.aput(
            (MemoryType.EPISODIC.value, state.get('session_id')),
            key=str(uuid.uuid4()),
            value={
                'query': state.get("input"),
                'episode': episode.model_dump_json()
            },
            index=['query']
        )

    async def _reflect_node(self, state: AgentState) -> AgentState:
        """反思节点 - 进行自我批评与响应优化."""
        if not self.reflection_module:
            if self.verbose:
                print("🔍 未启用反思, 跳过...")
            return state

        if self.verbose:
            print(f"\n🔍 开始反思过程...")

        try:
            state["reflection_enabled"] = True

            # 保存原响应，用于比较
            original_response = state.get("output", "")
            state["original_response"] = original_response

            if not original_response:
                if self.verbose:
                    print("⚠️ 无可供反思的响应输出，跳过反思环节。")
                return state

            reasoning_steps = []
            for i, thought in enumerate(state.get("thoughts", [])):
                reasoning_steps.append({
                    "step": i + 1,
                    "thought": thought,
                    "action": None,
                    "observation": None
                })

            actions = state.get("actions", [])
            observations = state.get("observations", [])

            for i, action in enumerate(actions):
                step_data = {
                    "step": action.get("step", i + 1),
                    "thought": None,
                    "action": action.get("name"),
                    "action_input": action.get("input"),
                    "observation": observations[i] if i < len(observations) else None
                }

                # 找到对应的推理步骤，或创建新的推理步骤。
                step_found = False
                for rs in reasoning_steps:
                    if rs["step"] == step_data["step"]:
                        rs.update({k: v for k, v in step_data.items() if v is not None})
                        step_found = True
                        break

                if not step_found:
                    reasoning_steps.append(step_data)

            # 按步骤编号对推理步骤进行排序
            reasoning_steps.sort(key=lambda x: x["step"])

            # 进行反思优化
            refined_response, reflection_metadata = await self.reflection_module.reflect_and_refine(
                state, original_response, reasoning_steps
            )

            state["output"] = refined_response
            state["reflection_iterations"] = reflection_metadata["reflection_iterations"]
            state["reflection_history"] = reflection_metadata["reflection_history"]
            state["final_quality_score"] = reflection_metadata["final_quality_score"]
            state["reflection_improvements"] = reflection_metadata["total_improvements"]

            if "metadata" not in state:
                state["metadata"] = {}
            state["metadata"]["reflection"] = reflection_metadata

            if self.verbose:
                print(f"🎉 完成反思!")
                print(f"📊 回复质量评分: {reflection_metadata['final_quality_score']:.2f}")
                print(f"🔧 改进点: {len(reflection_metadata['total_improvements'])}")
                if reflection_metadata['total_improvements']:
                    for improvement in reflection_metadata['total_improvements']:
                        print(f"  • {improvement}")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"❌ 反思失败: {str(e)}")

            if "metadata" not in state:
                state["metadata"] = {}
            state["metadata"]["reflection_error"] = str(e)

            return state

    def _should_continue_after_think(self, state: AgentState) -> str:
        """决定是继续采取行动还是就此结束。."""
        if state["current_step"] >= state["max_steps"]:
            return "finish"

        if state["has_error"]:
            return "finish"

        # 检查是否还存在需执行的操作
        if state["actions"] and len(state["actions"]) > len(state["observations"]):
            return "act"

        # 检查最后一个思路是否表明我们应当结束。
        if state["thoughts"]:
            last_thought = state["thoughts"][-1]
            state["output"] = last_thought
            return "finish"

        return "finish"

    async def _store_result_in_context(self, action_name: str, action_input: Dict, result: Any, state: AgentState):
        """将重要工具的结果存储到上下文内存中，以实现会话持久性。"""
        try:
            if result.success:
                if action_name == "calculator" and result.data:
                    calculation_result = result.data
                    expression = action_input.get("expression")
                    if calculation_result is not None:
                        self.context_manager.set_shared_variable(
                            "last_calculation_result",
                            calculation_result,
                            source_tool="calculator"
                        )
                        self.context_manager.set_shared_variable(
                            "last_calculation_expression",
                            expression,
                            source_tool="calculator"
                        )
                        # Also store with a timestamped key for history
                        timestamp_key = f"calculation_{int(time.time())}"
                        self.context_manager.set_shared_variable(
                            timestamp_key,
                            {"expression": expression, "result": calculation_result},
                            source_tool="calculator"
                        )

                # Store database results that might be important
                elif action_name == "database" and result.data:
                    if "get" in str(action_input).lower():
                        # Store retrieved data
                        key_match = re.search(r'get\s+(\w+)', str(action_input).lower())
                        if key_match:
                            key = key_match.group(1)
                            self.context_manager.set_shared_variable(
                                f"db_retrieved_{key}",
                                result.data,
                                source_tool="database"
                            )
                    elif "set" in str(action_input).lower():
                        # Store confirmation of data storage
                        self.context_manager.set_shared_variable(
                            "last_db_operation",
                            {"operation": "set", "input": action_input, "result": result.data},
                            source_tool="database"
                        )

                # Store web search results summary
                elif action_name in ["web_search", "wikipedia"] and result.data:
                    search_summary = str(result.data)[:200] + "..." if len(str(result.data)) > 200 else str(result.data)
                    self.context_manager.set_shared_variable(
                        f"last_{action_name}_result",
                        search_summary,
                        source_tool=action_name
                    )
        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"⚠️ 警告！上下文结果存储失败: {str(e)}")

    async def _create_thinking_prompt(self, state: AgentState) -> str:
        """创建思考提示词"""

        # 记忆中的相关内容
        memory_context = await self._get_relevant_memory_context(state)

        # 已完成的 “思考 - 行动 - 观察” 循环
        past_process = []
        for i in range(len(state["thoughts"])):
            thought = state["thoughts"][i]
            action = state["actions"][i] if i < len(state["actions"]) else None
            observation = state["observations"][i] if i < len(state["observations"]) else None
            if action and observation:
                past_process.append(f"Thought: {thought}")
                past_process.append(f"Action: {action['name']}")
                past_process.append(f"Action Input: {action['input']}")
                past_process.append(f"Observation: {observation}")

        return think_prompt_template.format(
            inputs={
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'tools_names': self.tool_manager.get_tool_names(state['session_id']),
                'tools_description': self.tool_manager.format_tools_for_prompt(state['session_id']),
                'user_input': state['input'],
                'memory_context': memory_context,
                'past_process': past_process,
            },
            remove_template_variables=True
        )

    async def _get_relevant_memory_context(self, state: AgentState) -> str:
        """获取当前查询相关的记忆上下文"""
        context_parts = []

        try:
            # 获取当前会话的共享变量
            shared_vars = self.context_manager.get_all_shared_variables()
            if self.verbose:
                print(f"🔍 Debug: 共享变量: {shared_vars}")

            # 检索情景记忆以寻找类似的过往互动
            try:
                similar_memories = await self.memory_store.asearch(
                    (MemoryType.EPISODIC.value, state.get('session_id')),
                    query=state['input'],
                    limit=5
                )
                similar_memories = similar_memories or []
                similar_episodes = [Episode(**json.loads(memory.value['episode'])) for memory in similar_memories if memory is not None]
                for episode in similar_episodes:
                    if any(keyword in state['input'].lower() for keyword in \
                           ['calculate', 'calculation', 'previous', 'result', 'math',
                            '计算', '运算', '先前', '之前', '前面', '结果', '数学']
                    ):
                        context_parts.append(f"  Previous Query: {episode.query}")
                        context_parts.append(f"  Previous Result: {episode.response}")
                        context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                        if 'calculator' in episode.tools_used:
                            for step in episode.reasoning_steps:
                                if isinstance(step, dict) and 'action' in step and step[
                                    'action'] == 'calculator':
                                    context_parts.append(
                                        f"  Calculation: {step.get('input', 'N/A')} = {episode.response}")
                                    break
                        context_parts.append("")
                    else:
                        context_parts.append(f"  Query: {episode.query}")
                        context_parts.append(f"  Response: {episode.response}")
                        context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                        context_parts.append("")
            except Exception as e:
                logger.error(traceback.format_exc())
                if self.verbose:
                    print(f"⚠️ 警告：未能获取情景记忆: {str(e)}")

            # 检索事实性记忆。
            try:
                relevant_memories = await self.memory_store.asearch(
                    (MemoryType.SEMANTIC.value, state.get('session_id')),
                    query=state['input'],
                    limit=5,
                )
                relevant_memories = relevant_memories or []
                if len(relevant_memories) > 0:
                    context_parts.append("\n相关事实性记忆（利用这些信息辅助回答当前查询）:")
                    for memory in relevant_memories:
                        if memory.value is not None:
                            context_parts.append(f"- {memory.value['semantic']}")

            except Exception as e:
                logger.error(traceback.format_exc())
                if self.verbose:
                    print(f"⚠️ 警告：未能获取事实记忆: {str(e)}")

            # 获取近期记忆摘要
            try:
                zipped_memory = await self.memory_store.aget(
                    (MemoryType.SUMMARY.value, state.get('session_id')),
                    key='summary'
                )
                if zipped_memory is not None:
                    context_parts.append("\n近期记忆摘要:")
                    context_parts.append(zipped_memory.value['summary'])

            except Exception as e:
                logger.error(traceback.format_exc())
                if self.verbose:
                    print(f"⚠️ 警告：获取近期记忆摘要: {str(e)}")

            # 获取近期记忆详情
            try:
                recent_memory = await self.memory_store.aget(
                    (MemoryType.SHORT_TERM.value, state.get('session_id')),
                    key='conversations'
                )
                if recent_memory is not None:
                    recent_conversations = json.loads(recent_memory.value['conversations'])
                    context_parts.append("\n近期会话记录详情:")

                    # 需要限制长度
                    word_threshold = 1000
                    word_count = 0
                    limited_conversations = []
                    for conversation in recent_conversations:
                        word_count += len(json.dumps(conversation, ensure_ascii=False))
                        if word_count > word_threshold:
                            break
                        limited_conversations.append(conversation)

                    for conversation in limited_conversations[::-1]:
                        context_parts.append(f"- {json.dumps(conversation, ensure_ascii=False)}")

            except Exception as e:
                logger.error(traceback.format_exc())
                if self.verbose:
                    print(f"⚠️ 警告：获取近期记忆详情: {str(e)}")

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"⚠️ 警告：未能获取记忆上下文: {str(e)}")

        return "\n".join(context_parts) if context_parts else ""

    async def _create_final_answer_prompt(self, state: AgentState) -> str:
        """创建用于生成最终答案的提示词。"""

        # 添加完整的对话历史
        past_process = []
        if state["execution_result"]:
            id2step = {}
            current_plan = state["current_plan"]
            for step in current_plan.steps:
                id2step[step.id] = step

            for i, step_result in enumerate(state["execution_result"].step_results):
                step = id2step[step_result.step_id]
                past_process.append(f"Action: {step.description}")
                past_process.append(f"Action Input: {step.input_template}")
                past_process.append(f"Observation: {step_result.output}")

        for i, (thought, action, observation) in enumerate(zip(
                state["thoughts"],
                state["actions"] + [None] * len(state["thoughts"]),
                state["observations"] + [None] * len(state["thoughts"])
        )):
            past_process.append(f"Thought: {thought}")
            if action:
                past_process.append(f"Action: {action['name']}")
                past_process.append(f"Action Input: {action['input']}")
            if observation:
                past_process.append(f"Observation: {observation}")

        # 添加事实性记忆
        context_parts = []
        try:
            relevant_memories = await self.memory_store.asearch(
                (MemoryType.SEMANTIC.value, state.get('session_id')),
                query=state['input'],
                limit=10,
            )
            relevant_memories = relevant_memories or []
            for memory in relevant_memories:
                if memory.value is not None:
                    context_parts.append(f"- {memory.value['semantic']}")

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"⚠️ 警告：未能获取事实记忆: {str(e)}")

        # 获取近期记忆摘要
        summarize_context_parts = []
        try:
            zipped_memory = await self.memory_store.aget(
                (MemoryType.SUMMARY.value, state.get('session_id')),
                key='summary'
            )
            if zipped_memory is not None:
                summarize_context_parts.append(zipped_memory.value['summary'])

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"⚠️ 警告：获取近期记忆摘要: {str(e)}")

        # 获取近期记忆详情
        conversation_context_parts = []
        try:
            recent_memory = await self.memory_store.aget(
                (MemoryType.SHORT_TERM.value, state.get('session_id')),
                key='conversations'
            )
            if recent_memory is not None:
                recent_conversations = json.loads(recent_memory.value['conversations'])

                # 需要限制长度
                word_threshold = 1000
                word_count = 0
                limited_conversations = []
                for conversation in recent_conversations:
                    word_count += len(json.dumps(conversation, ensure_ascii=False))
                    if word_count > word_threshold:
                        break
                    limited_conversations.append(conversation)

                for conversation in limited_conversations[::-1]:
                    conversation_context_parts.append(f"- {json.dumps(conversation, ensure_ascii=False)}")

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"⚠️ 警告：获取近期记忆详情: {str(e)}")


        return final_prompt_template.format(
            inputs = {
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'question': state['input'],
                'semantics': '\n'.join(context_parts),
                'summary': '\n'.join(summarize_context_parts),
                'conversations': '\n'.join(conversation_context_parts),
                'past_process': past_process,
            },
            remove_template_variables=True
        )

    def _format_steps(self, state: AgentState) -> List[Dict[str, Any]]:
        """将推理步骤整理成便于输出的格式"""
        steps = []

        # 处理状态可能并非有效字典的情况
        if not isinstance(state, dict):
            print(f"Warning: Expected dict for state, got {type(state)}: {state}")
            return []

        # 使用默认值安全获取所需字段
        thoughts = state.get("thoughts", [])
        actions = state.get("actions", [])
        observations = state.get("observations", [])
        for i, thought in enumerate(thoughts):
            step = {
                "step": i + 1,
                "thought": thought,
                "action": None,
                "action_input": None,
                "observation": None
            }

            if i < len(actions):
                action = actions[i]
                if isinstance(action, dict):
                    step["action"] = action.get("name")
                    step["action_input"] = action.get("input")
                else:
                    step["action"] = str(action)

            if i < len(observations):
                step["observation"] = observations[i]

            steps.append(step)

        return steps

    async def _decide_approach_node(self, state: AgentState) -> AgentState:
        """决定使用Simple、 ReAct 还是 Plan-Execute 方法。"""
        if self.verbose:
            print(f"\n🤔 确定处理方式: {state['input']}")

        try:
            # 获取相似的事件 / 片段，为决策提供参考。
            similar_memories = await self.memory_store.asearch(
                (MemoryType.EPISODIC.value, state.get('session_id')),
                query = state['input'],
                limit=3
            )
            similar_memories = similar_memories or []
            similar_episodes = [Episode(**json.loads(memory.value['episode'])) for memory in similar_memories if memory is not None]

            decision_prompt = await self._create_decision_prompt(state, similar_episodes)
            messages = [
                HumanMessage(content=decision_prompt)
            ]

            response = await self.llm_manager.ainvoke(messages, state.get('session_id'))
            decision_text = response.content.lower()

            if "simple" in decision_text:
                state["chosen_approach"] = "simple"
            elif "plan" in decision_text and "execute" in decision_text:
                state["chosen_approach"] = "plan_execute"
            else:
                state["chosen_approach"] = "react"
            if self.verbose:
                print(f"📋 所选方法：{state['chosen_approach']}")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["chosen_approach"] = "react"
            if self.verbose:
                print(f"⚠️ 决策失败，默认采用 ReAct 模式: {str(e)}")
            return state

    async def _plan_node(self, state: AgentState) -> AgentState:
        """规划节点 - 创建执行计划."""
        if self.verbose:
            print(f"\n📋 规划: {state['input']}")

        try:
            context = await self.context_manager.get_relevant_context("planner", state['input'])

            plan = await self.planner.create_plan(
                query=state['input'],
                available_tools=self.tool_manager.format_tools_for_prompt(state['session_id']),
                context=context
            )

            state["current_plan"] = plan
            state["metadata"]["plan_id"] = plan.id
            state["metadata"]["plan_confidence"] = plan.confidence

            if self.verbose:
                print(f"📝 创建了一份包含 {len(plan.steps)} 步的计划 (可执行度: {plan.confidence:.2f})")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"Planning failed: {str(e)}"
            return state

    async def _execute_node(self, state: AgentState) -> AgentState:
        """执行节点"""
        if self.verbose:
            print(f"\n⚡ 执行计划...")

        try:
            plan = state.get("current_plan")
            if not plan:
                state["has_error"] = True
                state["error_message"] = "没有可执行的计划."
                return state

            # Execute the plan
            execution_result = await self.executor.execute_plan(state.get("session_id"), plan)

            state["execution_result"] = execution_result
            state["metadata"]["execution_success_rate"] = execution_result.success_rate
            state["metadata"]["execution_time"] = execution_result.total_time

            # 更新执行结果到状态
            if execution_result.status == ExecutionStatus.COMPLETED:
                state["output"] = execution_result.final_output
                state["is_complete"] = True

                if self.verbose:
                    print(f"✅ 计划执行成功 (完成率: {execution_result.success_rate:.2f})")
            else:
                state["plan_failed"] = True
                if self.verbose:
                    print(f"⚠️ 计划执行失败 (完成率: {execution_result.success_rate:.2f})")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"计划执行失败: {str(e)}"
            return state

    def _route_after_decision(self, state: AgentState) -> str:
        """决策路由"""
        chosen_approach = state.get("chosen_approach", "simple")
        if chosen_approach == 'simple':
            return "finish"
        elif chosen_approach == "plan_execute":
            return "plan"
        else:
            return "think"

    def _should_continue_after_execute(self, state: AgentState) -> str:
        """计划执行后确定后续行动"""
        if state.get("is_complete", False):
            return "finish"

        if state.get("plan_failed", False) and state["current_step"] < state["max_steps"]:
            return "think"

        return "finish"

    async def _create_decision_prompt(self, state: AgentState, similar_episodes: List[Episode]) -> str:
        """创建决策提示词。"""
        query = state['input']

        complexity_indicators = [
            "multiple steps", "first", "then", "after that", "calculate and",
            "search and", "find and", "compare", "analyze", "complex",
            "多个步骤", "首先", "然后", "之后", "计算并",
            "搜索并", "找到并", "比较", "分析", "复杂",
        ]

        has_complexity = any(indicator in query.lower() for indicator in complexity_indicators)

        similar_episodes_text = ""
        if similar_episodes:
            similar_episodes_text = "\n类似的过往事件:\n"
            for episode in similar_episodes:
                approach = "Plan-Execute" if len(episode.tools_used) > 2 else "ReAct"
                similar_episodes_text += f"- 查询: '{episode.query}' | 选用方法: {approach} | 成功: {episode.success} | 工具调用次数: {len(episode.tools_used)}\n"

        zipped_memory = await self.memory_store.aget(
            (MemoryType.SHORT_TERM.value, state.get('session_id')),
            key='zipped_conversation'
        )
        if zipped_memory is not None:
            zipped_conversation = zipped_memory.value['summary']
        else:
            zipped_conversation = ''

        return decision_prompt_template.format(
            inputs={
                'query': query,
                'has_complexity': has_complexity,
                'word_count': len(query),
                'similar_episodes_text': similar_episodes_text,
                'zipped_conversation': zipped_conversation
            },
            remove_template_variables=True
        )

    async def _evaluate_execution_node(self, state: AgentState) -> AgentState:
        """评估执行结果，并判断是否需要重新规划。"""
        if self.verbose:
            print(f"\n🔍 评估执行结果...")

        try:
            current_plan = state.get("current_plan")
            execution_result = state.get("execution_result")

            if not current_plan or not execution_result:
                state["evaluation_result"] = "no_plan_or_result"
                return state

            # 创建适配上下文
            adaptation_context = AdaptationContext(
                original_query=state["input"],
                current_plan=current_plan,
                execution_results=execution_result.step_results if hasattr(execution_result, 'step_results') else [],
                partial_outputs=state.get("partial_outputs", {}),
                failed_attempts=state.get("failed_attempts", []),
                available_tools=self.tool_manager.get_tool_names(state['session_id']),
                time_budget_remaining=max(0, 300 - (state.get("current_step", 0) * 10)),  # Estimate remaining time
                success_probability=execution_result.success_rate if hasattr(execution_result, 'success_rate') else 0.5,
                context_variables=state.get("context_variables", {})
            )

            replan_decision = await self.adaptive_replanner.should_replan(
                adaptation_context,
                state.get("session_id")
            )

            # 保存评估结果
            state["evaluation_result"] = "replan_needed" if replan_decision.should_replan else "continue"
            state["replan_decision"] = replan_decision
            state["adaptation_context"] = adaptation_context

            if self.verbose:
                print(f"📊 Evaluation: {state['evaluation_result']}")
                if replan_decision.should_replan:
                    print(f"🔄 建议重新规划：{replan_decision.reasoning}")
                    print(f"📈 预期改进：{replan_decision.estimated_improvement:.2f}")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"❌ 评估失败：{str(e)}")
            state["evaluation_result"] = "evaluation_failed"
            state["error_message"] = f"评估失败：{str(e)}"
            return state

    async def _adaptive_replan_node(self, state: AgentState) -> AgentState:
        """根据评估结果执行自适应重新规划。"""
        if self.verbose:
            print(f"\n🔄 运行自适应重新规划 ...")

        try:
            replan_decision = state.get("replan_decision")
            adaptation_context = state.get("adaptation_context")

            if not replan_decision or not adaptation_context:
                state["replan_result"] = "no_decision_or_context"
                return state

            new_plan, replan_record = await self.adaptive_replanner.execute_adaptive_replan(
                replan_decision,
                adaptation_context,
                state.get("session_id")
            )

            state["current_plan"] = new_plan
            state["replan_result"] = "success"
            state["replan_record"] = replan_record

            # 重置运行状态，以开启新计划
            if "execution_result" in state:
                del state["execution_result"]

            # 跟踪重新规划尝试次数
            replanning_attempts = state.get("replanning_attempts", 0) + 1
            state["replanning_attempts"] = replanning_attempts

            if self.verbose:
                print(f"✅ 重新规划成功: {replan_decision.strategy.value}")
                print(f"🎯 新计划: {new_plan.goal}")
                print(f"📝 步骤: {len(new_plan.steps)}")
                print(f"🔢 重新规划的尝试次数: {replanning_attempts}")

            if replanning_attempts >= 3:
                state["replan_result"] = "max_attempts_reached"
                if self.verbose:
                    print(f"⚠️ 已达到最大重新规划尝试次数，切换回 ReAct 方法。")

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            if self.verbose:
                print(f"❌ 重新计划失败：{str(e)}")
            state["replan_result"] = "failed"
            state["error_message"] = f"重新计划失败：{str(e)}"
            return state

    def _should_finish_after_evaluation(self, state: AgentState) -> str:
        """执行评估后确定下一步（行动 / 步骤）。"""
        evaluation_result = state.get("evaluation_result", "no_result")
        execution_result = state.get("execution_result")

        # 检查执行结果是否足够成功以完成（任务 / 流程）
        if (evaluation_result == "continue" and execution_result and
                hasattr(execution_result, 'success_rate') and execution_result.success_rate >= 0.7):
            return "finish"

        # 若评估失败，或计划已完成但结果不令人满意，则退回到 ReAct 方法
        return "think"

    def _should_replan_after_evaluation(self, state: AgentState) -> str:
        """结果评估之后决定是否重新规划"""
        evaluation_result = state.get("evaluation_result", "no_result")
        execution_result = state.get("execution_result")

        # 检查执行结果是否足够成功以完成（任务 / 流程）
        if (evaluation_result == "continue" and execution_result and
                hasattr(execution_result, 'success_rate') and execution_result.success_rate >= 0.7):
            return "finish"

        if evaluation_result == "replan_needed":
            return "adaptive_replan"

        # 若评估失败，或计划已完成但结果不令人满意，则退回到 ReAct 方法
        return "think"

    def _route_after_replan(self, state: AgentState) -> str:
        """重新规划后的路径执行。"""
        replan_result = state.get("replan_result", "no_result")

        # 若重新规划失败，则退回到 ReAct 方法。
        if replan_result in ["failed", "max_attempts_reached", "no_decision_or_context"]:
            return "think"

        # 如果重新规划成功，则切换为新计划
        if replan_result == "success":
            replan_record = state.get("replan_record", {})

            # 检查该策略是否建议切换到 ReAct 方法。
            if replan_record.get("strategy") == "switch_approach":
                return "think"
            else:
                return "plan"

        return "finish"

    @staticmethod
    def keywords_exist(text: str, keywords: List[str]) -> bool:
        """检查文本中是否包含特定的关键词"""
        keyword_processor = KeywordProcessor()
        keyword_processor.add_keywords_from_list(keywords)
        keywords_found = keyword_processor.extract_keywords(text)
        return len(keywords_found) > 0

    async def _refine_websearch_result(self, session_id: str, query:str, result: ToolResult) -> ToolResult:
        """优化网络搜索结果"""
        if not result.data:
            return result
        extract_tasks = [self._refine_websearch_item(session_id, query, item) for item in result.data]
        result.data = [data for data in await asyncio.gather(*extract_tasks, return_exceptions=False)
                       if data is not None]
        return result

    async def _refine_websearch_item(self, session_id: str, query: str, data: Dict) -> Dict | None:
        """websearch的结果data必须包含三个字段：title、 href、 body"""
        content = str(data.get('body', ''))
        if len(content) < 10:
            return None
        refine_websearch_prompt = refine_websearch_prompt_template.format(
            inputs={
                'query': query,
                'content': content,
            },
            remove_template_variables=True
        )
        messages = [
            HumanMessage(content=refine_websearch_prompt)
        ]
        response = await self.llm_manager.ainvoke(messages, session_id)
        if len(response.content) < 10:
            return None
        data['body'] = response.content
        return data