"""
ReAct 架构控制器 - 心理健康聊天机器人
实现 Reasoning + Acting 框架，结合推理和行动
"""

import asyncio
import json
import logging
import time
from typing import Dict, List, Any, Optional, AsyncGenerator
from datetime import datetime

from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate

from .react_tools import (
    KnowledgeRetrievalTool,
    EmotionAnalysisTool,
    SafetyCheckTool,
    ConversationHistoryTool,
    ConversationStorageTool,
    ProfessionalAdviceTool
)
from .react_prompts import REACT_SYSTEM_PROMPT, REACT_HUMAN_PROMPT
from app.configs.settings import api_settings

logger = logging.getLogger(__name__)


class ReActController:
    """ReAct 架构控制器 - 心理健康专用"""
    
    def __init__(self):
        """初始化 ReAct 控制器"""
        self.llm = ChatOpenAI(
            model="deepseek-chat",
            temperature=0.1,  # 较低温度确保推理的一致性
            max_tokens=2000,
            openai_api_key=api_settings.OPENAI_API_KEY,
            openai_api_base=api_settings.OPENAI_BASE_URL,
            streaming=True  # 启用流式模式
        )
        
        # 初始化工具
        self.tools = {
            "knowledge_retrieval": KnowledgeRetrievalTool(),
            "emotion_analysis": EmotionAnalysisTool(),
            "safety_check": SafetyCheckTool(),
            "conversation_history": ConversationHistoryTool(),
            "conversation_storage": ConversationStorageTool(),
            "professional_advice": ProfessionalAdviceTool()
        }
        
        # ReAct 提示模板
        self.prompt_template = ChatPromptTemplate.from_messages([
            ("system", REACT_SYSTEM_PROMPT),
            ("human", REACT_HUMAN_PROMPT)
        ])
        
        self.max_iterations = 5  # 最大推理-行动循环次数
        
    async def process_message(
        self,
        user_input: str,
        chat_history: List[Dict[str, str]] = None,
        timeout: int = 30,
        stream_callback=None
    ) -> Dict[str, Any]:
        """
        使用 ReAct 架构处理用户消息
        
        Args:
            user_input: 用户输入
            chat_history: 对话历史
            timeout: 超时时间
            
        Returns:
            处理结果字典
        """
        start_time = time.time()
        chat_history = chat_history or []
        
        try:
            logger.info(f"[ReAct] 开始处理消息: {user_input[:50]}...")
            
            # 初始化 ReAct 状态
            react_state = {
                "user_input": user_input,
                "chat_history": chat_history,
                "thoughts": [],
                "actions": [],
                "observations": [],
                "final_answer": None,
                "stream_callback": stream_callback,  # 添加流式回调
                "metadata": {
                    "emotion": "unknown",
                    "confidence": 0.5,
                    "crisis_level": "low",
                    "safety_triggered": False,
                    "tools_used": [],
                    "iteration_count": 0
                }
            }
            
            # 执行 ReAct 循环
            for iteration in range(self.max_iterations):
                react_state["metadata"]["iteration_count"] = iteration + 1
                logger.info(f"[ReAct] 开始第 {iteration + 1} 次推理-行动循环")

                # 流式回调：发送迭代开始信号
                if stream_callback:
                    await stream_callback({
                        "type": "iteration_start",
                        "iteration": iteration + 1,
                        "message": f"开始第 {iteration + 1} 次推理循环"
                    })
                    logger.info(f"[ReAct] 发送迭代开始信号: {iteration + 1}")

                # 生成思考和行动
                thought_action = await self._generate_thought_action(react_state)

                # 思考过程已在_generate_thought_action中通过流式LLM调用发送
                
                if thought_action.get("action") in ["Final Answer", "最终回答"]:
                    # 达到最终答案
                    react_state["final_answer"] = thought_action.get("action_input", "")
                    logger.info(f"[ReAct] 达到最终答案，结束循环")
                    break
                
                # 执行行动
                action_name = thought_action.get("action")
                action_input = thought_action.get("action_input")
                logger.info(f"[ReAct] 准备执行行动: {action_name}, 输入: {action_input[:50]}...")

                # 行动信息已在_generate_thought_action中发送

                observation = await self._execute_action(
                    action_name,
                    action_input,
                    react_state
                )

                # 流式回调：发送观察结果（标准ReAct的观察阶段）
                if stream_callback:
                    # 格式化观察结果
                    if action_name == 'conversation_history':
                        observation_text = self._format_conversation_history_observation(observation)
                    else:
                        observation_text = observation.get('result', str(observation))

                    await stream_callback({
                        "type": "observation",
                        "iteration": iteration + 1,
                        "action": action_name,
                        "observation": observation_text,
                        "full_result": observation
                    })
                    logger.info(f"[ReAct] 发送观察结果: {observation_text[:50]}...")
                    # 添加延迟以便观察
                    await asyncio.sleep(0.3)
                
                # 记录思考、行动和观察
                react_state["thoughts"].append(thought_action.get("thought", ""))
                react_state["actions"].append({
                    "action": thought_action.get("action"),
                    "action_input": thought_action.get("action_input")
                })
                react_state["observations"].append(observation)

                # 智能停止检查：如果重复调用同一工具或已获得足够信息，强制停止
                if self._should_force_stop(react_state, action_name, iteration):
                    logger.info(f"[ReAct] 检测到重复调用或已获得足够信息，强制生成最终答案")
                    break

                # 检查是否需要提前结束（如安全问题）
                if observation.get("requires_immediate_response"):
                    safety_response = observation.get("response", "")
                    react_state["metadata"]["safety_triggered"] = True
                    logger.warning(f"[ReAct] 安全检查触发，提前结束循环")

                    # 安全检查触发时也使用流式输出
                    if stream_callback and safety_response:
                        logger.info(f"[ReAct] 开始流式输出安全检查回应...")

                        # 发送安全回应开始信号
                        await stream_callback({
                            "type": "final_answer_start",
                            "message": "安全检查触发，正在生成回应..."
                        })

                        # 模拟流式输出安全回应
                        chunk_size = 10  # 每次输出10个字符
                        for i in range(0, len(safety_response), chunk_size):
                            chunk = safety_response[i:i+chunk_size]
                            await stream_callback({
                                "type": "content",
                                "content": chunk,
                                "chunk_id": i // chunk_size + 1
                            })
                            # 添加小延迟模拟流式效果
                            await asyncio.sleep(0.05)

                        # 发送完成信号
                        await stream_callback({
                            "type": "final_answer_complete",
                            "full_content": safety_response
                        })

                    react_state["final_answer"] = safety_response
                    break
            
            # 如果没有最终答案，生成一个
            if not react_state["final_answer"]:
                react_state["final_answer"] = await self._generate_final_answer(react_state, stream_callback)

            # 自动存储对话记录到向量数据库
            try:
                storage_tool = self.tools["conversation_storage"]
                storage_result = await storage_tool.execute(react_state["final_answer"], react_state)
                logger.info(f"[ReAct] 对话记录存储结果: {storage_result.get('result', 'Unknown')}")
            except Exception as e:
                logger.warning(f"[ReAct] 对话记录存储失败: {e}")

            execution_time = time.time() - start_time
            
            # 构建返回结果
            result = {
                "response": react_state["final_answer"],
                "emotion": react_state["metadata"]["emotion"],
                "confidence": react_state["metadata"]["confidence"],
                "crisis_level": react_state["metadata"]["crisis_level"],
                "safety_triggered": react_state["metadata"]["safety_triggered"],
                "execution_time": execution_time,
                "tools_used": react_state["metadata"]["tools_used"],
                "iteration_count": react_state["metadata"]["iteration_count"],
                "react_trace": {
                    "thoughts": react_state["thoughts"],
                    "actions": react_state["actions"],
                    "observations": react_state["observations"]
                }
            }
            
            logger.info(f"[ReAct] 处理完成，用时 {execution_time:.2f}秒，迭代 {react_state['metadata']['iteration_count']} 次")
            return result
            
        except Exception as e:
            execution_time = time.time() - start_time
            logger.error(f"[ReAct] 处理失败: {e}")
            return {
                "response": "抱歉，我在处理您的消息时遇到了问题。请稍后再试。",
                "emotion": "unknown",
                "confidence": 0.0,
                "crisis_level": "unknown",
                "safety_triggered": False,
                "execution_time": execution_time,
                "error": str(e),
                "tools_used": [],
                "iteration_count": 0
            }
    
    async def _generate_thought_action(self, react_state: Dict[str, Any]) -> Dict[str, str]:
        """生成思考和行动"""
        try:
            # 构建上下文
            context = self._build_context(react_state)
            
            # 调用 LLM 生成思考和行动
            messages = self.prompt_template.format_messages(
                user_input=react_state["user_input"],
                chat_history=self._format_chat_history(react_state["chat_history"]),
                context=context,
                available_tools=self._get_available_tools_description()
            )
            
            # 使用流式调用获取思考过程
            full_response = ""
            stream_callback = react_state.get('stream_callback')

            logger.info(f"[ReAct] 开始流式LLM调用，回调函数: {stream_callback is not None}")
            logger.info(f"[ReAct] 消息内容: {messages}")

            chunk_count = 0
            logger.info(f"[ReAct] 准备开始流式循环...")

            stream_generator = self.llm.astream(messages)
            logger.info(f"[ReAct] 流式生成器创建成功: {stream_generator}")

            logger.info(f"[ReAct] 开始迭代流式生成器...")
            async for chunk in stream_generator:
                chunk_count += 1
                logger.info(f"[ReAct] 收到LLM块 #{chunk_count}: '{chunk.content}'")

                if chunk.content:
                    full_response += chunk.content
                    # 实时发送思考片段
                    if stream_callback:
                        logger.info(f"[ReAct] 发送思考片段到回调: '{chunk.content}'")
                        await stream_callback({
                            "type": "thinking_chunk",
                            "content": chunk.content,
                            "iteration": react_state["metadata"]["iteration_count"],
                            "full_content_so_far": full_response
                        })
                        logger.debug(f"[ReAct] 思考片段已发送")
                    else:
                        logger.warning(f"[ReAct] 没有回调函数，跳过思考片段")
                else:
                    logger.info(f"[ReAct] 收到空内容块 #{chunk_count}")

            logger.info(f"[ReAct] 流式循环结束")
            logger.info(f"[ReAct] 流式LLM调用完成，总共收到 {chunk_count} 个块")

            # 调试：打印 LLM 响应
            logger.info(f"[ReAct] LLM 响应: {full_response[:200]}...")

            # 解析响应
            parsed_result = self._parse_thought_action(full_response)

            # 发送解析后的思考和行动
            if stream_callback and parsed_result:
                if parsed_result.get("thought"):
                    await stream_callback({
                        "type": "thought",
                        "iteration": react_state["metadata"]["iteration_count"],
                        "content": parsed_result["thought"]
                    })

                if parsed_result.get("action"):
                    await stream_callback({
                        "type": "action",
                        "iteration": react_state["metadata"]["iteration_count"],
                        "action": parsed_result["action"],
                        "input": parsed_result.get("action_input", "")
                    })

            return parsed_result
            
        except Exception as e:
            logger.error(f"[ReAct] 生成思考行动失败: {e}")
            return {
                "thought": "我需要为用户提供帮助",
                "action": "Final Answer",
                "action_input": "抱歉，我在思考过程中遇到了问题。请告诉我您需要什么帮助？"
            }
    
    async def _execute_action(
        self,
        action: str,
        action_input: str,
        react_state: Dict[str, Any]
    ) -> Dict[str, Any]:
        """执行指定的行动"""
        try:
            logger.info(f"[ReAct] 检查工具: {action}, 可用工具: {list(self.tools.keys())}")

            if action in self.tools:
                logger.info(f"[ReAct] 找到工具 {action}，开始执行...")
                tool = self.tools[action]
                observation = await tool.execute(action_input, react_state)

                # 更新元数据
                react_state["metadata"]["tools_used"].append(action)
                logger.info(f"[ReAct] 工具 {action} 执行完成")
                if "emotion" in observation:
                    react_state["metadata"]["emotion"] = observation["emotion"]
                if "confidence" in observation:
                    react_state["metadata"]["confidence"] = observation["confidence"]
                if "crisis_level" in observation:
                    react_state["metadata"]["crisis_level"] = observation["crisis_level"]
                
                return observation
            else:
                return {
                    "result": f"未知的工具: {action}",
                    "error": True
                }
                
        except Exception as e:
            logger.error(f"[ReAct] 执行行动失败 {action}: {e}")
            return {
                "result": f"执行 {action} 时出错: {str(e)}",
                "error": True
            }
    
    def _build_context(self, react_state: Dict[str, Any]) -> str:
        """构建当前上下文"""
        context_parts = []

        # 添加之前的思考、行动和观察
        for i, (thought, action, observation) in enumerate(zip(
            react_state["thoughts"],
            react_state["actions"],
            react_state["observations"]
        )):
            context_parts.append(f"思考 {i+1}: {thought}")
            context_parts.append(f"行动 {i+1}: {action['action']}[{action['action_input']}]")

            # 特殊处理conversation_history工具的观察结果
            if action['action'] == 'conversation_history':
                observation_text = self._format_conversation_history_observation(observation)
            else:
                observation_text = observation.get('result', str(observation))

            context_parts.append(f"观察 {i+1}: {observation_text}")

        return "\n".join(context_parts)

    def _should_force_stop(self, react_state: Dict[str, Any], current_action: str, iteration: int) -> bool:
        """判断是否应该强制停止ReAct循环"""
        try:
            actions = react_state.get("actions", [])
            observations = react_state.get("observations", [])

            # 定义工具调用限制 - 更宽松的限制
            tool_limits = {
                "emotion_analysis": 2,      # 情绪分析允许2次（可能需要重新评估）
                "conversation_history": 2,   # 对话历史检索允许2次（不同查询）
                "safety_check": 2,          # 安全检查允许2次（情况可能变化）
                "knowledge_retrieval": 3,   # 知识检索最多3次（不同角度）
                "professional_advice": 2    # 专业建议允许2次（综合建议）
            }

            # 统计工具调用次数
            action_counts = {}
            for action in actions:
                action_name = action.get("action", "")
                if action_name:  # 排除空的action
                    action_counts[action_name] = action_counts.get(action_name, 0) + 1

            # 检查当前工具是否超过限制
            current_count = action_counts.get(current_action, 0)
            limit = tool_limits.get(current_action, 2)  # 默认限制2次

            if current_count >= limit:
                logger.info(f"[ReAct] 工具 {current_action} 已达到调用上限 {limit}，强制停止")
                return True

            # 检查是否已有足够信息（基于工具类型和结果）
            # 注释掉过度严格的检查，让AI自主决定
            # if self._has_sufficient_information(react_state, current_action):
            #     logger.info(f"[ReAct] 已获得足够信息，无需调用 {current_action}，强制停止")
            #     return True

            # 检查总体停止条件 - 非常宽松的判断，主要防止无限循环
            unique_tools = len([tool for tool in action_counts.keys() if tool])
            total_calls = sum(action_counts.values())

            # 只有在极端情况下才强制停止
            if unique_tools >= 5:  # 允许最多4个不同工具
                logger.info(f"[ReAct] 已调用 {unique_tools} 个不同工具，可能过多，强制停止")
                return True

            # 如果总调用次数极多，强制停止
            if total_calls >= 8:  # 进一步提高总调用次数限制
                logger.info(f"[ReAct] 总工具调用次数 {total_calls} 过多，强制停止")
                return True

            return False

        except Exception as e:
            logger.error(f"[ReAct] 检查强制停止条件失败: {e}")
            return False

    def _has_sufficient_information(self, react_state: Dict[str, Any], current_action: str) -> bool:
        """检查是否已有足够信息，无需调用当前工具"""
        try:
            actions = react_state.get("actions", [])
            observations = react_state.get("observations", [])

            # 统计当前工具的调用次数
            current_tool_calls = sum(1 for action in actions if action.get("action") == current_action)

            # 只有在已经调用过且有明确结果时才认为信息充分
            if current_tool_calls == 0:
                return False  # 还没调用过，肯定需要调用

            # 针对特定工具的检查 - 更严格的判断
            if current_action == "emotion_analysis":
                # 只有在已经有明确的情绪分析结果时才认为充分
                emotion_found = False
                for obs in observations:
                    obs_str = str(obs).lower()
                    if any(keyword in obs_str for keyword in ["检测到主要情绪", "emotion", "置信度"]):
                        emotion_found = True
                        break
                return emotion_found and current_tool_calls >= 1

            elif current_action == "conversation_history":
                # 只有在已经有对话记录结果时才认为充分
                history_found = False
                for obs in observations:
                    if obs.get("related_conversations") or "相关对话记录" in str(obs):
                        history_found = True
                        break
                return history_found and current_tool_calls >= 1

            elif current_action == "safety_check":
                # 只有在已经有安全检查结果时才认为充分
                safety_found = False
                for obs in observations:
                    obs_str = str(obs).lower()
                    if any(keyword in obs_str for keyword in ["安全检查完成", "风险等级", "crisis"]):
                        safety_found = True
                        break
                return safety_found and current_tool_calls >= 1

            # 对于其他工具，更宽松的判断
            return current_tool_calls >= 2  # 允许调用2次

        except Exception as e:
            logger.error(f"[ReAct] 检查信息充分性失败: {e}")
            return False

    def _format_conversation_history_observation(self, observation: Dict[str, Any]) -> str:
        """格式化conversation_history工具的观察结果"""
        try:
            result_text = observation.get('result', '')
            related_conversations = observation.get('related_conversations', [])

            if not related_conversations:
                return result_text

            # 构建详细的对话历史展示
            formatted_parts = [result_text]
            formatted_parts.append("\n相关对话记录:")

            for i, conv in enumerate(related_conversations[:3]):  # 只显示前3个最相关的
                content = conv.get('content', '')
                score = conv.get('score', 0)
                metadata = conv.get('metadata', {})
                source = metadata.get('source', 'unknown')

                # 截断过长的内容
                if len(content) > 200:
                    content = content[:200] + "..."

                formatted_parts.append(f"  {i+1}. 来源: {source} (相似度: {score:.3f})")
                formatted_parts.append(f"     内容: {content}")

            return "\n".join(formatted_parts)

        except Exception as e:
            logger.error(f"[ReAct] 格式化conversation_history观察结果失败: {e}")
            return observation.get('result', str(observation))
    
    def _format_chat_history(self, chat_history: List[Dict[str, str]]) -> str:
        """格式化对话历史"""
        if not chat_history:
            return "无对话历史"
        
        formatted = []
        for msg in chat_history[-5:]:  # 只取最近5条
            role = "用户" if msg["role"] == "human" else "助手"
            formatted.append(f"{role}: {msg['content']}")
        
        return "\n".join(formatted)
    
    def _get_available_tools_description(self) -> str:
        """获取可用工具描述"""
        descriptions = []
        for name, tool in self.tools.items():
            descriptions.append(f"- {name}: {tool.description}")
        return "\n".join(descriptions)
    
    def _parse_thought_action(self, response: str) -> Dict[str, str]:
        """解析 LLM 响应中的思考和行动"""
        lines = response.strip().split('\n')

        thought = ""
        action = ""
        action_input = ""

        # 找到第一个有效的思考和行动，在遇到观察或最终回答时停止
        for line in lines:
            line = line.strip()
            if not line:
                continue

            # 如果遇到观察或最终回答，停止解析（这些应该由系统生成）
            if (line.startswith("观察:") or line.startswith("Observation:") or
                line.startswith("最终回答:") or line.startswith("Final Answer:")):
                break

            # 支持中英文格式
            if line.startswith("思考:") or line.startswith("Thought:"):
                if not thought:  # 只取第一个思考
                    thought = line.split(":", 1)[1].strip()
            elif line.startswith("行动:") or line.startswith("Action:"):
                if not action:  # 只取第一个行动
                    action = line.split(":", 1)[1].strip()
            elif line.startswith("行动输入:") or line.startswith("Action Input:"):
                if not action_input:  # 只取第一个行动输入
                    action_input = line.split(":", 1)[1].strip()

        # 检查是否想要直接回答（没有明确的行动，但思考中表明可以直接回答）
        if not action:
            # 检查思考内容是否表明可以直接回答
            if thought and any(keyword in thought.lower() for keyword in [
                "可以直接", "直接回应", "直接回答", "不需要调用", "不需要工具",
                "简单问候", "简单的问候", "直接友好回应", "已经获得", "足以回答",
                "现在可以直接回答", "不需要再次调用", "立即回答"
            ]):
                action = "最终回答"
                action_input = thought
            else:
                # 默认为情感分析（第一步）
                action = "emotion_analysis"
                action_input = "分析用户的情绪状态"

        # 如果有行动但没有行动输入，使用思考内容或默认输入
        if action and not action_input:
            action_input = thought if thought else "分析用户输入"

        logger.info(f"[ReAct] 解析结果 - 思考: {thought[:50]}..., 行动: {action}, 输入: {action_input[:50]}...")

        return {
            "thought": thought,
            "action": action,
            "action_input": action_input
        }
    
    async def _generate_final_answer(self, react_state: Dict[str, Any], stream_callback=None) -> str:
        """生成最终答案（支持流式调用）"""
        try:
            # 构建详细的观察结果摘要
            observations_summary = []
            conversation_history_found = False

            for i, (action, observation) in enumerate(zip(react_state["actions"], react_state["observations"])):
                action_name = action.get("action", "unknown")

                if action_name == "conversation_history":
                    # 特殊处理conversation_history工具的结果
                    formatted_obs = self._format_conversation_history_observation(observation)
                    observations_summary.append(f"对话历史检索结果: {formatted_obs}")
                    conversation_history_found = True
                else:
                    observations_summary.append(f"{action_name}结果: {observation.get('result', str(observation))}")

            observations_text = "\n".join(observations_summary)

            # 构建最终提示词
            final_prompt = f"""
基于以下信息为用户提供最终回复：

用户问题: {react_state["user_input"]}
工具执行结果: {observations_text}

请注意：
1. 如果用户询问之前的对话内容，请直接引用对话历史检索结果中的具体内容
2. 如果检索到了相关对话记录，请明确告诉用户找到了什么内容
3. 提供专业、有帮助的心理健康建议回复
4. 保持温暖、理解和支持的语调
"""

            # 使用流式调用生成最终答案
            if stream_callback:
                logger.info(f"[ReAct] 开始流式生成最终答案...")

                # 发送最终答案开始信号
                await stream_callback({
                    "type": "final_answer_start",
                    "message": "正在生成最终回答..."
                })

                full_response = ""
                chunk_count = 0

                stream_generator = self.llm.astream([HumanMessage(content=final_prompt)])
                async for chunk in stream_generator:
                    chunk_count += 1
                    content = chunk.content if hasattr(chunk, 'content') else str(chunk)

                    if content:
                        full_response += content
                        # 发送内容片段
                        await stream_callback({
                            "type": "content",
                            "content": content,
                            "chunk_id": chunk_count
                        })

                logger.info(f"[ReAct] 流式最终答案生成完成，总共 {chunk_count} 个块")

                # 发送完成信号
                await stream_callback({
                    "type": "final_answer_complete",
                    "full_content": full_response
                })

                return full_response
            else:
                # 非流式调用（兼容性）
                response = await self.llm.ainvoke([HumanMessage(content=final_prompt)])
                return response.content

        except Exception as e:
            logger.error(f"[ReAct] 生成最终答案失败: {e}")
            return "感谢您的信任。基于我们的对话，我建议您保持积极的心态，如有需要请寻求专业帮助。"


# 创建全局实例
react_controller = ReActController()
