"""
React智能体
"""
import json
import re
import uuid
import time
import traceback

from langchain.schema import HumanMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.state import CompiledStateGraph
from json_repair import repair_json
from typing import Any, Dict
from loguru import logger

from tools.tool_manager import ToolManager
from agent.react_state import AgentState, create_initial_state
from model import ReasoningStep, ToolContext
from config import Config
from llm.llm_manager import LLMManager
from tools import ToolResult
from memory import MemoryManager
from agent.prompts import (
    think_prompt_template,
    final_prompt_template, compress_prompt_template
)

class ReactAgent:

    def __init__( self,
        tool_manager: ToolManager
    ):
        self.memory_store = MemoryManager(Config.MEMORY_STORE_TYPE)
        self.tool_manager = tool_manager
        self.llm_manager = LLMManager()
        self.graph = self._create_graph()

    def _create_graph(self) -> CompiledStateGraph:
        workflow = StateGraph(AgentState)
        workflow.add_node("think", self._think_node)
        workflow.add_node("act", self._act_node)
        workflow.add_node("observe", self._observe_node)
        workflow.add_node("finish", self._finish_node)

        workflow.set_entry_point("think")
        workflow.add_conditional_edges(
            "think",
            self._should_continue_after_think,
            {
                "act": "act",
                "finish": "finish"
            }
        )
        workflow.add_edge("act", "observe")
        workflow.add_edge("observe", "think")
        workflow.add_edge("finish", END)
        return workflow.compile(store=self.memory_store)

    async def run(
            self, session_id: str = None, query: str = None, max_steps: int = None
    ) -> ToolResult:
        if not session_id:
            session_id = str(uuid.uuid4())

        if session_id not in self.tool_manager.user_tools:
            await self.tool_manager.load_session_tools(session_id)

        if not max_steps:
            max_steps = Config.MAX_ITERATIONS

        initial_state = create_initial_state(session_id, query, max_steps)
        initial_state["session_id"] = session_id

        config = {
            "configurable": {"thread_id": f"react_agent_{session_id}"},
            "recursion_limit": 100
        }
        try:
            final_state = await self.graph.ainvoke(initial_state, config)
            result = ToolResult(
                success=not final_state.get("has_error", False),
                data=final_state.get("output", "无输出"),
                error=final_state.get("error_message")
            )
        except Exception as e:
            logger.error(traceback.format_exc())
            result = ToolResult(
                success=False,
                data=None,
                error=f"运行失败: {str(e)}"
            )
        return result

    async def _think_node(self, state: AgentState) -> AgentState:
        """思考节点——生成想法并决定行动。"""
        try:
            prompt = await self._create_thinking_prompt(state)
            messages = [
                HumanMessage(content=prompt)
            ]
            thought_content = await self.llm_manager.safe_ainvoke(messages)

            # 解析想法，提取可能存在的行动
            thought_dict = repair_json(thought_content, return_objects=True)
            action, action_input = None, {}
            if isinstance(thought_dict, dict):
                action = thought_dict.get("action", None)
                if action:
                   action = re.split("[,;]", str(action).strip())[0]
                action_input = thought_dict.get('action_input', None)
                if not isinstance(action_input, dict):
                    action_input = {}

            thought_content = json.dumps(thought_dict, ensure_ascii=False)

            state["thoughts"].append(thought_content)
            state["current_step"] += 1
            if action:
                state["actions"].append({"name": action, "input": action_input, "step": state["current_step"]})

            reasoning_step = ReasoningStep(
                step_number=state["current_step"],
                thought=thought_content,
                planned_action=action,
                action_input=action_input,
                confidence=0.7
            )
            state['reasoning_steps'].append(reasoning_step)
            await self.memory_store.store_reasoning_step(state['session_id'], reasoning_step)
            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"思考失败: {str(e)}"
            return state

    async def _act_node(self, state: AgentState) -> AgentState:
        """执行节点"""
        if not state["actions"]:
            return state

        current_action = state["actions"][-1]
        action_name = current_action["name"]
        action_input = current_action["input"]
        try:
            result = await self.tool_manager.execute_tool(action_name, action_input)

            # 如果工具调用返回的结果太长，则进行文本压缩.
            if len(str(result.data)) > Config.TOOL_RESULTS_LIMIT:
                result = await self._compress_result(action_input, result)

            state["tool_results"].append({
                "success": True,
                "tool": action_name,
                "input": action_input,
                "result": result.model_dump(mode='json'),
                "step": state["current_step"]
            })

            # 将重要结果存储在上下文存储器中，以实现会话持久化。
            await self._store_tool_result(action_name, action_input, result, state)

            # 存储工具调用的上下文
            tool_context = ToolContext(
                tool_name=action_name,
                input_data=action_input,
                output_data=result.data if result.success else None,
                success=result.success,
                error_message=result.error if not result.success else None,
                execution_time=0.0,
                metadata=result.metadata if hasattr(result, 'metadata') else {}
            )
            session_id = state.get("session_id")
            await self.memory_store.store_tool_context(session_id, tool_context)

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"动作执行失败: {str(e)}"
            return state

    async def _store_tool_result(self, action_name: str, action_input: Dict, result: Any, state: AgentState):
        """将重要工具的结果存储到上下文内存中，以实现会话持久性。"""
        try:
            if not result.success:
                return

            shared_variables = state.get("shared_variables", {})
            # 存储计算结果
            if action_name == "calculator" and result.data:
                calculation_result = result.data
                expression = action_input.get("expression")
                if calculation_result is not None:
                    shared_variables["last_calculation_result"] = {
                        "value": result.data,
                        "source_tool": "calculator",
                        "timestamp": time.time()
                    }
                    shared_variables["last_calculation_expression"] = {
                        "value": expression,
                        "source_tool": "calculator",
                        "timestamp": time.time()
                    }
                    timestamp_key = f"calculation_{int(time.time())}"
                    shared_variables[timestamp_key] = {
                        "value": {"expression": expression, "result": calculation_result},
                        "source_tool": "calculator",
                        "timestamp": time.time()
                    }

            # 存储网页搜索结果
            elif action_name == "web_search" and result.data:
                shared_variables[f"last_{action_name}_result"] = {
                    "value": result.data,
                    "source_tool": "web_search",
                    "timestamp": time.time()
                }
            state["shared_variables"] = shared_variables

        except Exception as e:
            logger.error(traceback.format_exc())

    async def _compress_result(self, query:str, result: ToolResult) -> ToolResult:
        """压缩过长的工具调用返回结果"""
        if isinstance(result.data, list):
            contents = [str(item) for item in result.data]
        else:
            contents = str(result.data)
        compress_prompt = compress_prompt_template.format(
            inputs={
                'query': query,
                'contents': contents,
                'results_limit': Config.TOOL_RESULTS_LIMIT,
            }
        )
        messages = [
            HumanMessage(content=compress_prompt)
        ]
        result.data = await self.llm_manager.safe_ainvoke(messages)
        return result

    async def _observe_node(self, state: AgentState) -> AgentState:
        """观察节点进程工具的结果，并创建观察记录。"""
        if not state["tool_results"]:
            return state

        current_result = state["tool_results"][-1]
        tool_result = current_result["result"]

        if tool_result["success"]:
            observation = f"工具 {current_result['tool']} 调用成功. Result:{tool_result['data']}"
        else:
            observation = f"工具 {current_result['tool']} 调用失败. 错误: {tool_result['error']}"
        state["observations"].append(observation)

        return state

    def _should_continue_after_think(self, state: AgentState) -> str:
        """决定是继续采取行动还是就此结束。."""
        if state["current_step"] >= state["max_steps"]:
            return "finish"

        if state["has_error"]:
            return "finish"

        # 检查最后一个思路是否表明我们应当结束。
        if state["thoughts"]:
            last_thought = state["thoughts"][-1]
            if re.search(r'Final Answer:', last_thought, re.IGNORECASE):
                # 提取最终答案，并将其设为输出。
                final_answer_match = re.search(r'Final Answer:\s*(.+)', last_thought, re.IGNORECASE | re.DOTALL)
                if final_answer_match:
                    state["output"] = final_answer_match.group(1).strip()
                    state["is_complete"] = True
                return "finish"

            # 检查其他完成标识
            last_thought_lower = last_thought.lower()
            if "i now know" in last_thought_lower and "final answer" in last_thought_lower:
                return "finish"

        # 检查是否还存在需执行的操作
        if state["actions"] and len(state["actions"]) > len(state["observations"]):
            return "act"

        return "finish"

    async def _create_thinking_prompt(self, state: AgentState) -> str:
        """创建思考提示词"""

        # 记忆中的相关内容
        memory_context = await self._get_relevant_context(state)

        # 已完成的 “思考 - 行动 - 观察” 循环
        past_process = []
        for i in range(len(state["thoughts"])):
            thought = state["thoughts"][i]
            action = state["actions"][i] if i < len(state["actions"]) else None
            observation = state["observations"][i] if i < len(state["observations"]) else None
            if action and observation:
                past_process.append(f"Thought: {thought}")
                past_process.append(f"Action: {action['name']}")
                past_process.append(f"Action Input: {action['input']}")
                past_process.append(f"Observation: {observation}")

        return think_prompt_template.format(
            inputs={
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'tools_names': self.tool_manager.get_tool_names(state['session_id']),
                'tools_description': self.tool_manager.format_tools_for_prompt(state['session_id']),
                'user_input': state['input'],
                'memory_context': memory_context,
                'past_process': past_process,
            },
            remove_template_variables=True
        )

    async def _finish_node(self, state: AgentState) -> AgentState:
        """完成节点 - 生成最终输出。"""
        try:
            # 检查输出是否已在思考阶段设置
            if state.get("output") and state.get("is_complete"):
                return state

            # 最终答案生成提示词
            prompt = await self._create_final_answer_prompt(state)
            messages = [
                HumanMessage(content=prompt)
            ]
            final_answer = await self.llm_manager.safe_ainvoke(messages)

            answer_match = re.search(r'Final Answer:\s*(.+)', final_answer, re.IGNORECASE | re.DOTALL)
            if answer_match:
                final_answer = answer_match.group(1).strip()

            state["output"] = final_answer
            logger.debug(f"最终回答: {final_answer}")
            state["is_complete"] = True

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"最终回答生成失败: {str(e)}"

        return state

    async def _get_relevant_context(self, state: AgentState) -> str:
        """获取当前用户输入相关的记忆上下文"""
        context_parts = []

        try:
            # 获取当前会话的共享变量
            shared_variables = state["shared_variables"]
            if len(shared_variables) != 0:
                context_parts.append("\n当前会话共享变量：")
                context_parts.append(f"{shared_variables}")

            # 检索相似情景记忆
            episodes_contexts = await self.memory_store.get_similar_episodes(state["session_id"], state['input'])
            if len(episodes_contexts) != 0:
                context_parts.append("\n相似情景记忆：")
                for item in episodes_contexts:
                    context_parts.append(f"- {item}")

            # 检索相关事实性记忆
            semantic_contexts = await self.memory_store.get_relevant_semantics(state["session_id"], state['input'])
            if len(semantic_contexts) != 0:
                context_parts.append("\n相关事实性记忆：")
                for item in semantic_contexts:
                    context_parts.append(f"- {item}")

            # 获取近期记忆详情
            conversations = await self.memory_store.get_conversations(state['session_id'])
            if len(conversations) != 0:
                context_parts.append("\n近期会话记录详情:")
                for item in conversations:
                    context_parts.append(f"- {item}")

        except Exception as e:
            logger.error(traceback.format_exc())

        return "\n".join(context_parts) if context_parts else ""

    async def _create_final_answer_prompt(self, state: AgentState) -> str:
        """创建用于生成最终答案的提示词。"""

        # 添加完整的对话历史
        past_process = []
        for i, (thought, action, observation) in enumerate(zip(
                state["thoughts"],
                state["actions"] + [None] * len(state["thoughts"]),
                state["observations"] + [None] * len(state["thoughts"])
        )):
            past_process.append(f"{i}: Thought: {thought}")
            if action:
                past_process.append(f" Action: {action['name']}")
                past_process.append(f" Action Input: {action['input']}")
            if observation:
                past_process.append(f" Observation: {observation}")
        past_process_context = '\n'.join(past_process)

        # 添加事实性记忆
        semantics = await self.memory_store.get_relevant_semantics(state["session_id"], state['input'])
        semantic_contexts = '\n'.join([f"- {item}" for item in semantics])

        # 获取近期记忆详情
        conversations = await self.memory_store.get_conversations(state["session_id"])
        conversation_contexts = '\n'.join([f"- {item}" for item in conversations])

        return final_prompt_template.format(
            inputs = {
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'question': state['input'],
                'semantics': semantic_contexts,
                'conversations': conversation_contexts,
                'past_process': past_process_context,
            },
            remove_template_variables=True
        )