"""
React智能体
"""
import asyncio
import json
import re
from typing import Any, Dict
import uuid
import time

from langchain.schema import HumanMessage, AIMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.state import CompiledStateGraph
from loguru import logger
from json_repair import repair_json
import traceback

from agent.tool_manager import ToolManager
from react.react_state import AgentState, create_initial_state
from config import Config
from llm.llm_manager import get_llm_manager
from model.episodic_model import Episode
from model.context_model import ReasoningStep
from model.memory_model import MemoryType
from memory import ContextManager
from memory.context_manager import ToolContext
from react.agent_prompts import (
    think_prompt_template,
    final_prompt_template,
    compress_prompt_template,
    refine_websearch_prompt_template
)

from tools import ToolResult
from langgraph.store.memory import BaseStore


class ReactNode:

    def __init__( self,
        memory_store: BaseStore,
        tool_manager: ToolManager
    ):
        self.memory_store = memory_store
        self.context_manager = ContextManager(self.memory_store)
        self.tool_manager = tool_manager
        self.llm_manager = get_llm_manager()
        self.graph = self._create_graph()
        self.tmp_session = None

    def _create_graph(self) -> CompiledStateGraph:
        workflow = StateGraph(AgentState)
        workflow.add_node("think", self._think_node)
        workflow.add_node("act", self._act_node)
        workflow.add_node("observe", self._observe_node)
        workflow.add_node("finish", self._finish_node)

        workflow.set_entry_point("think")
        workflow.add_conditional_edges(
            "think",
            self._should_continue_after_think,
            {
                "act": "act",
                "finish": "finish"
            }
        )
        workflow.add_edge("act", "observe")
        workflow.add_edge("observe", "think")
        workflow.add_edge("finish", END)
        return workflow.compile(store=self.memory_store)

    async def run(
            self, session_id: str = None, query: str=None, max_steps: int = None
    ) -> ToolResult:
        if max_steps is None:
            max_steps = Config.MAX_ITERATIONS
        if session_id is None or session_id == "":
            session_id = str(uuid.uuid4())
        self.tmp_session = str(uuid.uuid4())
        self.context_manager.start_session(session_id, query)

        if session_id not in self.tool_manager.user_tools:
            await self.tool_manager.load_session_tools(session_id)

        initial_state = create_initial_state(query, max_steps)
        initial_state["session_id"] = session_id

        config = {
            "configurable": {"thread_id": f"react_agent_{session_id}"},
            "recursion_limit": 100
        }
        try:
            final_state = await self.graph.ainvoke(initial_state, config)
            result = ToolResult(
                success=not final_state.get("has_error", False),
                data=final_state.get("output", "无输出"),
                error=final_state.get("error_message")
            )
        except Exception as e:
            logger.error(traceback.format_exc())
            result = ToolResult(
                success=False,
                data=None,
                error=f"运行失败: {str(e)}"
            )
        await self.context_manager.end_session()
        self.llm_manager.cleanup_session(self.tmp_session)
        return result

    async def _think_node(self, state: AgentState) -> AgentState:
        """思考节点——生成想法并决定行动。"""
        try:
            prompt = await self._create_thinking_prompt(state)

            messages = [
                HumanMessage(content=prompt)
            ]

            response = await self.llm_manager.ainvoke(messages, self.tmp_session)
            thought_content = response.content

            # 解析想法，提取可能存在的行动
            thought_dict = repair_json(thought_content, return_objects=True)
            action, action_input = None, {}
            if isinstance(thought_dict, dict):
                action = thought_dict.get("action", None)
                if action:
                   action = re.split("[,;]", str(action).strip())[0]
                action_input = thought_dict.get('action_input', None)
                if not isinstance(action_input, dict):
                    action_input = {}

            thought_content = json.dumps(thought_dict, ensure_ascii=False)

            state["thoughts"].append(thought_content)
            state["current_step"] += 1

            # 将推理步骤存储在上下文管理器中
            reasoning_step = ReasoningStep(
                step_number=state["current_step"],
                thought=thought_content,
                planned_action=action,
                action_input=action_input,
                confidence=0.7
            )
            await self.context_manager.add_reasoning_step(reasoning_step)

            # 若已指定操作，则为该操作做准备
            if action:
                state["actions"].append({
                    "name": action,
                    "input": action_input,
                    "step": state["current_step"]
                })

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"思考失败: {str(e)}"
            return state

    async def _act_node(self, state: AgentState) -> AgentState:
        """执行节点"""
        if not state["actions"]:
            return state

        current_action = state["actions"][-1]
        action_name = current_action["name"]
        action_input = current_action["input"]
        session_id = state.get("session_id")
        try:
            result = await self.tool_manager.execute_tool(action_name, action_input)

            if action_name == "web_search":
                result = await self._refine_websearch_result(session_id, action_input, result)

            # 如果工具调用返回的结果太长，则进行文本压缩.
            if len(str(result.data)) > 1024:
                result = await self._compress_result(action_input, result)

            state["tool_results"].append({
                "success": True,
                "tool": action_name,
                "input": action_input,
                "result": result.model_dump(mode='json'),
                "step": state["current_step"]
            })

            # 将重要结果存储在上下文存储器中，以实现会话持久化。
            await self._store_result_in_context(action_name, action_input, result, state)

            # 存储工具调用的上下文
            tool_context = ToolContext(
                tool_name=action_name,
                input_data=action_input,
                output_data=result.data if result.success else None,
                success=result.success,
                error_message=result.error if not result.success else None,
                execution_time=0.0,
                metadata=result.metadata if hasattr(result, 'metadata') else {}
            )
            await self.context_manager.add_tool_context(tool_context)

            return state

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"动作执行失败: {str(e)}"
            return state

    async def _compress_result(self, query:str, result: ToolResult) -> ToolResult:
        """压缩过长的工具调用返回结果"""
        if isinstance(result.data, list):
            contents = [str(item) for item in result.data]
        else:
            contents = str(result.data)
        compress_prompt = compress_prompt_template.format(
            inputs={
                'query': query,
                'contents': contents,
                'results_limit': 1024,
            }
        )
        messages = [
            HumanMessage(content=compress_prompt)
        ]
        result.data = await self.llm_manager.ainvoke(messages, self.tmp_session)
        return result

    async def _observe_node(self, state: AgentState) -> AgentState:
        """观察节点进程工具的结果，并创建观察记录。"""

        if not state["tool_results"]:
            return state

        current_result = state["tool_results"][-1]
        tool_result = current_result["result"]

        if tool_result["success"]:
            observation = f"工具 {current_result['tool']} 调用成功. Result:{tool_result['data']}"
        else:
            observation = f"工具 {current_result['tool']} 调用失败. 错误: {tool_result['error']}"

        state["observations"].append(observation)

        return state

    def _should_continue_after_think(self, state: AgentState) -> str:
        """决定是继续采取行动还是就此结束。."""
        if state["current_step"] >= state["max_steps"]:
            return "finish"

        if state["has_error"]:
            return "finish"

        # 检查最后一个思路是否表明我们应当结束。
        if state["thoughts"]:
            last_thought = state["thoughts"][-1]
            if re.search(r'Final Answer:', last_thought, re.IGNORECASE):
                # 提取最终答案，并将其设为输出。
                final_answer_match = re.search(r'Final Answer:\s*(.+)', last_thought, re.IGNORECASE | re.DOTALL)
                if final_answer_match:
                    state["output"] = final_answer_match.group(1).strip()
                    state["is_complete"] = True
                return "finish"

            # 检查其他完成标识
            last_thought_lower = last_thought.lower()
            if "i now know" in last_thought_lower and "final answer" in last_thought_lower:
                return "finish"

        # 检查是否还存在需执行的操作
        if state["actions"] and len(state["actions"]) > len(state["observations"]):
            return "act"

        return "finish"

    async def _store_result_in_context(self, action_name: str, action_input: Dict, result: Any, state: AgentState):
        """将重要工具的结果存储到上下文内存中，以实现会话持久性。"""
        try:
            if result.success:
                if action_name == "calculator" and result.data:
                    calculation_result = result.data
                    expression = action_input.get("expression")
                    if calculation_result is not None:
                        self.context_manager.set_shared_variable(
                            "last_calculation_result",
                            calculation_result,
                            source_tool="calculator"
                        )
                        self.context_manager.set_shared_variable(
                            "last_calculation_expression",
                            expression,
                            source_tool="calculator"
                        )
                        # Also store with a timestamped key for history
                        timestamp_key = f"calculation_{int(time.time())}"
                        self.context_manager.set_shared_variable(
                            timestamp_key,
                            {"expression": expression, "result": calculation_result},
                            source_tool="calculator"
                        )

                # Store database results that might be important
                elif action_name == "database" and result.data:
                    if "get" in str(action_input).lower():
                        # Store retrieved data
                        key_match = re.search(r'get\s+(\w+)', str(action_input).lower())
                        if key_match:
                            key = key_match.group(1)
                            self.context_manager.set_shared_variable(
                                f"db_retrieved_{key}",
                                result.data,
                                source_tool="database"
                            )
                    elif "set" in str(action_input).lower():
                        # Store confirmation of data storage
                        self.context_manager.set_shared_variable(
                            "last_db_operation",
                            {"operation": "set", "input": action_input, "result": result.data},
                            source_tool="database"
                        )

                # Store web search results summary
                elif action_name in ["web_search", "wikipedia"] and result.data:
                    search_summary = str(result.data)[:200] + "..." if len(str(result.data)) > 200 else str(result.data)
                    self.context_manager.set_shared_variable(
                        f"last_{action_name}_result",
                        search_summary,
                        source_tool=action_name
                    )
        except Exception as e:
            logger.error(traceback.format_exc())

    async def _create_thinking_prompt(self, state: AgentState) -> str:
        """创建思考提示词"""

        # 记忆中的相关内容
        memory_context = await self._get_relevant_memory_context(state)

        # 已完成的 “思考 - 行动 - 观察” 循环
        past_process = []
        for i in range(len(state["thoughts"])):
            thought = state["thoughts"][i]
            action = state["actions"][i] if i < len(state["actions"]) else None
            observation = state["observations"][i] if i < len(state["observations"]) else None
            if action and observation:
                past_process.append(f"Thought: {thought}")
                past_process.append(f"Action: {action['name']}")
                past_process.append(f"Action Input: {action['input']}")
                past_process.append(f"Observation: {observation}")

        return think_prompt_template.format(
            inputs={
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'tools_names': self.tool_manager.get_tool_names(state['session_id']),
                'tools_description': self.tool_manager.format_tools_for_prompt(state['session_id']),
                'question': state['input'],
                'memory_context': memory_context,
                'past_process': past_process,
            },
            remove_template_variables=True
        )

    async def _get_relevant_memory_context(self, state: AgentState) -> str:
        """获取当前查询相关的记忆上下文"""
        context_parts = []

        try:
            # 获取当前会话的共享变量
            shared_vars = self.context_manager.get_all_shared_variables()

            # 检索情景记忆以寻找类似的过往互动
            try:
                similar_memories = await self.memory_store.asearch(
                    (MemoryType.EPISODIC.value, state.get('session_id')),
                    query=state['input'],
                    limit=5
                )
                similar_memories = similar_memories or []
                similar_episodes = [Episode(**json.loads(memory.value['episode'])) for memory in similar_memories if memory is not None]
                for episode in similar_episodes:
                    if any(keyword in state['input'].lower() for keyword in \
                           ['calculate', 'calculation', 'previous', 'result', 'math',
                            '计算', '运算', '先前', '之前', '前面', '结果', '数学']
                    ):
                        context_parts.append(f"  Previous Query: {episode.query}")
                        context_parts.append(f"  Previous Result: {episode.response}")
                        context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                        if 'calculator' in episode.tools_used:
                            for step in episode.reasoning_steps:
                                if isinstance(step, dict) and 'action' in step and step[
                                    'action'] == 'calculator':
                                    context_parts.append(
                                        f"  Calculation: {step.get('input', 'N/A')} = {episode.response}")
                                    break
                        context_parts.append("")
                    else:
                        context_parts.append(f"  Query: {episode.query}")
                        context_parts.append(f"  Response: {episode.response}")
                        context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                        context_parts.append("")
            except Exception as e:
                logger.error(traceback.format_exc())

            # 检索事实性记忆。
            try:
                relevant_memories = await self.memory_store.asearch(
                    (MemoryType.SEMANTIC.value, state.get('session_id')),
                    query=state['input'],
                    limit=5,
                )
                relevant_memories = relevant_memories or []
                if len(relevant_memories) > 0:
                    context_parts.append("\n相关事实性记忆（利用这些信息辅助回答当前查询）:")
                    for memory in relevant_memories:
                        if memory.value is not None:
                            context_parts.append(f"- {memory.value['semantic']}")

            except Exception as e:
                logger.error(traceback.format_exc())

            # 获取近期记忆摘要
            try:
                zipped_memory = await self.memory_store.aget(
                    (MemoryType.SUMMARY.value, state.get('session_id')),
                    key='summary'
                )
                if zipped_memory is not None:
                    context_parts.append("\n近期记忆摘要:")
                    context_parts.append(zipped_memory.value['summary'])

            except Exception as e:
                logger.error(traceback.format_exc())

            # 获取近期记忆详情
            try:
                recent_memory = await self.memory_store.aget(
                    (MemoryType.SHORT_TERM.value, state.get('session_id')),
                    key='conversations'
                )
                if recent_memory is not None:
                    recent_conversations = json.loads(recent_memory.value['conversations'])
                    context_parts.append("\n近期会话记录详情:")

                    # 需要限制长度
                    word_threshold = 1000
                    word_count = 0
                    limited_conversations = []
                    for conversation in recent_conversations:
                        word_count += len(json.dumps(conversation, ensure_ascii=False))
                        if word_count > word_threshold:
                            break
                        limited_conversations.append(conversation)

                    for conversation in limited_conversations[::-1]:
                        context_parts.append(f"- {json.dumps(conversation, ensure_ascii=False)}")

            except Exception as e:
                logger.error(traceback.format_exc())

        except Exception as e:
            logger.error(traceback.format_exc())

        return "\n".join(context_parts) if context_parts else ""

    async def _finish_node(self, state: AgentState) -> AgentState:
        """完成节点 - 生成最终输出。"""
        try:
            # 检查输出是否已在思考阶段设置
            if state.get("output") and state.get("is_complete"):
                return state

            # 最终答案生成提示词
            prompt = await self._create_final_answer_prompt(state)
            messages = [
                HumanMessage(content=prompt)
            ]
            response = await self.llm_manager.ainvoke(messages, self.tmp_session)
            final_answer = response.content

            answer_match = re.search(r'Final Answer:\s*(.+)', final_answer, re.IGNORECASE | re.DOTALL)
            if answer_match:
                final_answer = answer_match.group(1).strip()

            state["output"] = final_answer
            state["is_complete"] = True

        except Exception as e:
            logger.error(traceback.format_exc())
            state["has_error"] = True
            state["error_message"] = f"最终回答生成失败: {str(e)}"

        return state

    async def _create_final_answer_prompt(self, state: AgentState) -> str:
        """创建用于生成最终答案的提示词。"""

        # 添加完整的对话历史
        past_process = []
        for i, (thought, action, observation) in enumerate(zip(
                state["thoughts"],
                state["actions"] + [None] * len(state["thoughts"]),
                state["observations"] + [None] * len(state["thoughts"])
        )):
            past_process.append(f"Thought: {thought}")
            if action:
                past_process.append(f"Action: {action['name']}")
                past_process.append(f"Action Input: {action['input']}")
            if observation:
                past_process.append(f"Observation: {observation}")

        # 添加事实性记忆
        context_parts = []
        try:
            relevant_memories = await self.memory_store.asearch(
                (MemoryType.SEMANTIC.value, state.get('session_id')),
                query=state['input'],
                limit=10,
            )
            relevant_memories = relevant_memories or []
            for memory in relevant_memories:
                if memory.value is not None:
                    context_parts.append(f"- {memory.value['semantic']}")

        except Exception as e:
            logger.error(traceback.format_exc())

        # 获取近期记忆摘要
        summarize_context_parts = []
        try:
            zipped_memory = await self.memory_store.aget(
                (MemoryType.SUMMARY.value, state.get('session_id')),
                key='summary'
            )
            if zipped_memory is not None:
                summarize_context_parts.append(zipped_memory.value['summary'])

        except Exception as e:
            logger.error(traceback.format_exc())

        # 获取近期记忆详情
        conversation_context_parts = []
        try:
            recent_memory = await self.memory_store.aget(
                (MemoryType.SHORT_TERM.value, state.get('session_id')),
                key='conversations'
            )
            if recent_memory is not None:
                recent_conversations = json.loads(recent_memory.value['conversations'])

                # 需要限制长度
                word_threshold = 1000
                word_count = 0
                limited_conversations = []
                for conversation in recent_conversations:
                    word_count += len(json.dumps(conversation, ensure_ascii=False))
                    if word_count > word_threshold:
                        break
                    limited_conversations.append(conversation)

                for conversation in limited_conversations[::-1]:
                    conversation_context_parts.append(f"- {json.dumps(conversation, ensure_ascii=False)}")

        except Exception as e:
            logger.error(traceback.format_exc())


        return final_prompt_template.format(
            inputs = {
                'system_time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                'question': state['input'],
                'semantics': '\n'.join(context_parts),
                'summary': '\n'.join(summarize_context_parts),
                'conversations': '\n'.join(conversation_context_parts),
                'past_process': past_process,
            },
            remove_template_variables=True
        )

    async def _refine_websearch_result(self, session_id: str, query:str, result: ToolResult) -> ToolResult:
        """优化网络搜索结果"""
        if not result.data:
            return result
        extract_tasks = [self._refine_websearch_item(session_id, query, item) for item in result.data]
        result.data = [data for data in await asyncio.gather(*extract_tasks, return_exceptions=False)
                       if data is not None]
        return result

    async def _refine_websearch_item(self, session_id: str, query: str, data: Dict) -> Dict | None:
        """websearch的结果data必须包含三个字段：title、 href、 body"""
        content = str(data.get('body', ''))
        if len(content) < 10:
            return None
        refine_websearch_prompt = refine_websearch_prompt_template.format(
            inputs={
                'query': query,
                'content': content,
            },
            remove_template_variables=True
        )
        messages = [
            HumanMessage(content=refine_websearch_prompt)
        ]
        response = await self.llm_manager.ainvoke(messages, self.tmp_session)
        if len(response.content) < 10:
            return None
        data['body'] = response.content
        return data