"""用于自我批判和响应优化的反思模块。"""

import re
from typing import Any, Dict, List, Tuple
from json_repair import repair_json

from langchain.schema import HumanMessage
from llm.llm_manager import get_llm_manager
from .agent_state import AgentState
from model.reflection_model import (
    ReflectionType, CritiqueResult, ReflectionIssue, ReflectionSeverity, RefinementResult
)
from agent.agent_prompts import critique_prompt_template, reflection_refinement_prompt_template


class ReflectionModule:
    """用于自我评价与响应优化的反思模块"""
    
    def __init__(self, 
                 quality_threshold: float = 0.7,
                 max_refinement_iterations: int = 3,
                 verbose: bool = False):
        """初始化反思模块。参数：
            - quality_threshold：接受响应所需的最低质量分数
            - max_refinement_iterations：优化循环的最大次数
            - verbose：是否打印详细的反思过程
        """
        self.quality_threshold = quality_threshold
        self.max_refinement_iterations = max_refinement_iterations
        self.verbose = verbose
        self.llm_manager = get_llm_manager()
    
    async def reflect_and_refine(self, 
                                state: AgentState,
                                response: str,
                                reasoning_steps: List[Dict[str, Any]]) -> Tuple[str, Dict[str, Any]]:
        """# 主要反思与优化流程
        - 参数（Args）：
          - state：智能体当前状态
          - response：待反思的初始响应
          - reasoning_steps：得出该响应所采取的推理步骤
        - 返回值（Returns）：
          - tuple（refined_response，reflection_metadata），其中：
            - refined_response：优化后的响应
            - reflection_metadata：反思元数据（指在反思过程中产生的描述性信息，如反思时间、关键调整点等）
        """
        if self.verbose:
            print(f"\n🔍 开始反思 ...")
        
        current_response = response
        reflection_history = []
        total_improvements = []
        
        for iteration in range(self.max_refinement_iterations):
            if self.verbose:
                print(f"\n📝 反思迭代轮次：{iteration + 1}/{self.max_refinement_iterations}")

            critique = await self.self_critique(
                state, current_response, reasoning_steps
            )
            
            reflection_history.append({
                "iteration": iteration + 1,
                "critique": critique,
                "response_at_iteration": current_response
            })
            
            if self.verbose:
                print(f"🎯 回复质量评分: {critique.overall_quality:.2f}")
                print(f"🔍 问题个数: {len(critique.issues)}")
                print(f"✅ 优点个数: {len(critique.strengths)}")

            if critique.overall_quality >= self.quality_threshold and not critique.needs_refinement:
                if self.verbose:
                    print(f"✅ 以满足回复质量评分阈值，停止优化.")
                break
            
            # 若质量过低或存在问题，则需优化响应
            if critique.needs_refinement:
                refinement = await self.refine_response(
                    state, current_response, critique, reasoning_steps
                )
                
                current_response = refinement.refined_response
                total_improvements.extend(refinement.improvements_made)
                
                if self.verbose:
                    print(f"🔧 完成优化.")
                    print(f"📈 回复质量评分改进：+{refinement.quality_improvement:.2f}")
            else:
                if self.verbose:
                    print(f"🛑 尽管质量得分较低，但无法优化")
                break

        metadata = {
            "reflection_iterations": len(reflection_history),
            "final_quality_score": reflection_history[-1]["critique"].overall_quality if reflection_history else 0.0,
            "total_improvements": total_improvements,
            "reflection_history": reflection_history,
            "quality_threshold": self.quality_threshold,
            "threshold_met": reflection_history[-1]["critique"].overall_quality >= self.quality_threshold if reflection_history else False
        }
        
        if self.verbose:
            print(f"\n🎉 Reflection complete!")
            print(f"📊 Final quality: {metadata['final_quality_score']:.2f}")
            print(f"🔧 Total improvements: {len(total_improvements)}")
        
        return current_response, metadata
    
    async def self_critique(self, 
                           state: AgentState,
                           response: str,
                           reasoning_steps: List[Dict[str, Any]]) -> CritiqueResult:
        """对响应进行自我批评。
        参数：
            - state：智能体当前状态
            - response：待批评的响应
            - reasoning_steps：已执行的推理步骤
        返回值：
            包含详细分析的 “批评结果”（CritiqueResult）
        """
        if self.verbose:
            print(f"🤔 进行自我批评...")

        critique_prompt = self._create_critique_prompt(state, response, reasoning_steps)
        messages = [
            HumanMessage(content=critique_prompt)
        ]
        
        try:
            llm_response = await self.llm_manager.ainvoke(messages, state.get("session_id"))
            critique_text = llm_response.content
            
            if self.verbose:
                print(f"🔍 批评分析已完成.")
            return self._parse_critique_response(critique_text)
            
        except Exception as e:
            if self.verbose:
                print(f"❌ 批评执行失败: {str(e)}")

            # 返回默认批评意见
            return CritiqueResult(
                overall_quality=0.5,
                confidence=0.3,
                issues=[ReflectionIssue(
                    type=ReflectionType.REASONING,
                    severity=ReflectionSeverity.MAJOR,
                    description="由于错误，无法进行自我批判。",
                    suggestion="建议进行人工审核",
                    confidence=0.3
                )],
                strengths=[],
                needs_refinement=False,
                reasoning=f"批判失败: {str(e)}",
                metadata={"error": str(e)}
            )
    
    async def refine_response(self,
                             state: AgentState,
                             original_response: str,
                             critique: CritiqueResult,
                             reasoning_steps: List[Dict[str, Any]]) -> RefinementResult:
        """根据批评意见优化响应。
        参数：
            - state：智能体当前状态
            - original_response：待优化的原始响应
            - critique：包含待解决问题的批评结果
            - reasoning_steps：原始推理步骤
        返回值：
            包含优化后响应的 “优化结果”（RefinementResult）
        """
        if self.verbose:
            print(f"🔧 根据批评意见优化响应...")

        refinement_prompt = self._create_refinement_prompt(
            state, original_response, critique, reasoning_steps
        )
        messages = [
            HumanMessage(content=refinement_prompt)
        ]
        
        try:
            llm_response = await self.llm_manager.ainvoke(messages, state.get("session_id"))
            refinement_text = llm_response.content
            
            if self.verbose:
                print(f"✨ 响应优化已完成.")
            return self._parse_refinement_response(refinement_text, critique)
            
        except Exception as e:
            if self.verbose:
                print(f"❌ 反思失败: {str(e)}")

            return RefinementResult(
                refined_response=original_response,
                improvements_made=[],
                quality_improvement=0.0,
                confidence=0.3,
                refinement_reasoning=f"反思失败: {str(e)}",
                metadata={"error": str(e)}
            )
    
    def _create_critique_prompt(self, 
                               state: AgentState,
                               response: str,
                               reasoning_steps: List[Dict[str, Any]]) -> str:
        """创建自我批评的提示词"""
        steps_text = ""
        for i, step in enumerate(reasoning_steps, 1):
            if step.get("thought"):
                steps_text += f"Step {i} - Thought: {step['thought']}\n"
            if step.get("action"):
                steps_text += f"Step {i} - Action: {step['action']} with input: {step.get('action_input', 'N/A')}\n"
            if step.get("observation"):
                steps_text += f"Step {i} - Observation: {step['observation']}\n"
            steps_text += "\n"

        return critique_prompt_template.format(
            inputs={
                'original_question': state['input'],
                'reasoning_steps': steps_text,
                'generated_response': response,
            },
            remove_template_variables=True
        )

    def _create_refinement_prompt(self,
                                 state: AgentState,
                                 original_response: str,
                                 critique: CritiqueResult,
                                 reasoning_steps: List[Dict[str, Any]]) -> str:
        """创建响应优化的提示词."""

        issues_text = ""
        for issue in critique.issues:
            issues_text += f"- {issue.type.value.upper()}: {issue.description}\n"
            issues_text += f"  Suggestion: {issue.suggestion}\n"
            issues_text += f"  Severity: {issue.severity.value}\n\n"

        return reflection_refinement_prompt_template.format(
            inputs={
                "original_question": state['input'],
                "original_response": original_response,
                "overall_quality": f'{critique.overall_quality:.2f}',
                "issues_text": issues_text,
                "strengths": '\n'.join(f'- {strength}' for strength in critique.strengths)
            },
            remove_template_variables=True
        )

    def _parse_critique_response(self, critique_text: str) -> CritiqueResult:
        """将大语言模型（LLM）的批评意见响应解析为结构化格式"""
        try:
            json_match = re.search(r'\{.*\}', critique_text, re.DOTALL)
            if json_match:
                critique_data = repair_json(json_match.group(), return_objects=True)
            else:
                critique_data = repair_json(critique_text, return_objects=True)

            issues = []
            dimensions = critique_data.get("dimensions", {})
            
            for dim_name, dim_data in dimensions.items():
                if dim_data.get("issues"):
                    for issue_text in dim_data["issues"]:
                        issues.append(ReflectionIssue(
                            type=ReflectionType(dim_name.lower()),
                            severity=ReflectionSeverity.MAJOR,  # Default severity
                            description=issue_text,
                            suggestion=dim_data.get("suggestions", ["Review and improve"])[0],
                            confidence=critique_data.get("confidence", 0.7)
                        ))
            
            return CritiqueResult(
                overall_quality=critique_data.get("overall_quality", 0.5),
                confidence=critique_data.get("confidence", 0.7),
                issues=issues,
                strengths=critique_data.get("strengths", []),
                needs_refinement=critique_data.get("needs_refinement", True),
                reasoning=critique_data.get("reasoning", "Automated critique analysis"),
                metadata={"raw_response": critique_text}
            )
            
        except Exception as e:
            return CritiqueResult(
                overall_quality=0.5,
                confidence=0.3,
                issues=[ReflectionIssue(
                    type=ReflectionType.REASONING,
                    severity=ReflectionSeverity.MAJOR,
                    description="解析评论响应失败",
                    suggestion="建议进行人工审核",
                    confidence=0.3
                )],
                strengths=[],
                needs_refinement=False,
                reasoning=f"解析失败：{str(e)}",
                metadata={"error": str(e), "raw_response": critique_text}
            )
    
    def _parse_refinement_response(self, 
                                  refinement_text: str,
                                  original_critique: CritiqueResult) -> RefinementResult:
        """将大语言模型（LLM）的优化响应解析为结构化格式"""
        try:
            json_match = re.search(r'\{.*\}', refinement_text, re.DOTALL)
            if json_match:
                refinement_data = repair_json(json_match.group(), return_objects=True)
            else:
                refinement_data = repair_json(refinement_text, return_objects=True)
            
            return RefinementResult(
                refined_response=refinement_data.get("refined_response", "优化失败"),
                improvements_made=refinement_data.get("improvements_made", []),
                quality_improvement=refinement_data.get("quality_improvement", 0.0),
                confidence=refinement_data.get("confidence", 0.7),
                refinement_reasoning=refinement_data.get("refinement_reasoning", "Automated refinement"),
                metadata={"raw_response": refinement_text}
            )
            
        except Exception as e:
            return RefinementResult(
                refined_response=refinement_text,
                improvements_made=["基于批评意见进行的优化尝试"],
                quality_improvement=0.1,
                confidence=0.3,
                refinement_reasoning=f"解析错误，使用原始响应: {str(e)}",
                metadata={"error": str(e), "raw_response": refinement_text}
            )