"""
性能优化模块
为提示词工程工作流提供性能优化功能，包括执行时间优化、迭代算法优化和缓存机制
"""

import time
import hashlib
from typing import Dict, Any, Optional, List
from functools import lru_cache
from src.research_core.prompt_eng_state import PromptEngineeringState


class PerformanceOptimizer:
    """
    性能优化器类
    减少工作流执行时间，提高响应速度
    优化迭代算法，更快收敛到高质量提示词
    增加缓存机制，避免重复计算
    """
    
    def __init__(self):
        # 初始化缓存
        self._prompt_cache: Dict[str, Dict[str, Any]] = {}
        self._evaluation_cache: Dict[str, Dict[str, Any]] = {}
        self._optimization_cache: Dict[str, str] = {}
        
        # 性能统计
        self._performance_stats: Dict[str, List[float]] = {
            "analysis_time": [],
            "evaluation_time": [],
            "optimization_time": [],
            "cache_hits": [],
            "cache_misses": []
        }
    
    def optimize_workflow_execution(self, state: PromptEngineeringState) -> PromptEngineeringState:
        """
        优化工作流执行，通过提前终止和智能调度减少执行时间
        
        Args:
            state: 当前工作流状态
            
        Returns:
            优化后的工作流状态
        """
        # 记录执行开始时间
        start_time = time.time()
        current_stage = state.get('current_stage', 'unknown')
        
        # 如果是分析阶段，应用快速分析优化
        if current_stage == "analysis":
            state = self._optimize_analysis_stage(state)
        
        # 如果是评估阶段，应用缓存机制
        elif current_stage == "evaluation":
            state = self._optimize_evaluation_stage(state)
            
        # 如果是优化阶段，应用迭代优化算法
        elif current_stage == "optimization":
            state = self._optimize_optimization_stage(state)
        
        # 记录执行时间
        execution_time = time.time() - start_time
        if 'execution_time' not in state:
            state['execution_time'] = {}
        state['execution_time'][current_stage] = execution_time
        
        # 更新性能统计
        if current_stage in ["analysis", "evaluation", "optimization"]:
            time_key = f"{current_stage}_time"
            if time_key in self._performance_stats:
                self._performance_stats[time_key].append(execution_time)
        
        return state
    
    def _optimize_analysis_stage(self, state: PromptEngineeringState) -> PromptEngineeringState:
        """
        优化分析阶段的执行速度
        
        Args:
            state: 当前状态
            
        Returns:
            优化后的状态
        """
        requirement = state.get('requirement', '')
        
        # 生成需求的哈希值用于缓存
        requirement_hash = self._generate_hash(requirement)
        
        # 检查是否有缓存的分析结果
        if requirement_hash in self._prompt_cache:
            # 缓存命中，直接使用缓存结果
            cached_result = self._prompt_cache[requirement_hash]
            state['requirement_analysis'] = cached_result['analysis']
            state['context_info'] = cached_result.get('context_info', {})
            
            # 更新缓存命中统计
            self._performance_stats['cache_hits'].append(1)
            self._performance_stats['cache_misses'].append(0)
        else:
            # 缓存未命中，更新统计
            self._performance_stats['cache_hits'].append(0)
            self._performance_stats['cache_misses'].append(1)
            
            # 这里可以应用其他优化策略，比如：
            # 1. 并行处理多个分析任务
            # 2. 使用更高效的分析算法
            # 3. 提前终止不必要的分析过程
            
            # 当前实现暂不修改分析逻辑，但为将来优化预留接口
            pass
        
        return state
    
    def _optimize_evaluation_stage(self, state: PromptEngineeringState) -> PromptEngineeringState:
        """
        优化评估阶段，使用缓存避免重复计算
        
        Args:
            state: 当前状态
            
        Returns:
            优化后的状态
        """
        current_prompt = state.get('current_prompt', '')
        requirement = state.get('requirement', '')
        
        if not current_prompt:
            return state
            
        # 生成提示词和需求的联合哈希值
        eval_key = self._generate_hash(f"{current_prompt}||{requirement}")
        
        # 检查评估缓存
        if eval_key in self._evaluation_cache:
            # 使用缓存的评估结果
            cached_evaluation = self._evaluation_cache[eval_key]
            state['prompt_evaluation'] = cached_evaluation.get('evaluation', '')
            state['quality_score'] = cached_evaluation.get('quality_score', 0.0)
            state['quality_evaluation_details'] = cached_evaluation.get('details', {})
            
            # 更新缓存命中统计
            self._performance_stats['cache_hits'].append(1)
            self._performance_stats['cache_misses'].append(0)
        else:
            # 缓存未命中，更新统计
            self._performance_stats['cache_hits'].append(0)
            self._performance_stats['cache_misses'].append(1)
            
            # 在实际评估后，将结果存入缓存
            # 这将在评估代理执行完毕后由工作流处理
            
            # 可以应用的优化：
            # 1. 增量评估 - 只评估变更部分
            # 2. 早期终止 - 如果质量明显不足或已足够高则提前终止
            # 3. 并行评估多个维度
            
            pass
        
        return state
    
    def _optimize_optimization_stage(self, state: PromptEngineeringState) -> PromptEngineeringState:
        """
        优化优化阶段，使用智能算法更快收敛到高质量提示词
        
        Args:
            state: 当前状态
            
        Returns:
            优化后的状态
        """
        current_prompt = state.get('current_prompt', '')
        requirement = state.get('requirement', '')
        quality_score = state.get('quality_score', 0.0)
        
        if not current_prompt:
            return state
            
        # 生成优化键
        optimization_key = self._generate_hash(f"{current_prompt}||{requirement}")
        
        # 检查是否有缓存的优化结果
        if optimization_key in self._optimization_cache:
            # 使用缓存的优化结果
            optimized_prompt = self._optimization_cache[optimization_key]
            state['current_prompt'] = optimized_prompt
            
            # 更新缓存命中统计
            self._performance_stats['cache_hits'].append(1)
            self._performance_stats['cache_misses'].append(0)
            
            return state
        else:
            # 缓存未命中，更新统计
            self._performance_stats['cache_hits'].append(0)
            self._performance_stats['cache_misses'].append(1)
            
            # 可以应用的优化算法：
            # 1. 基于历史优化结果的学习算法
            # 2. 遗传算法或模拟退火算法寻找更优提示词
            # 3. 基于梯度的优化方法
            # 4. 早期收敛检测 - 如果连续几次迭代质量提升很小则提前终止
            
            # 当前实现暂不修改优化逻辑，但为将来优化预留接口
            pass
        
        return state
    
    def cache_analysis_result(self, requirement: str, analysis: str, context_info: Optional[Dict[str, Any]] = None):
        """
        缓存分析结果
        
        Args:
            requirement: 用户需求
            analysis: 分析结果
            context_info: 上下文信息
        """
        requirement_hash = self._generate_hash(requirement)
        self._prompt_cache[requirement_hash] = {
            'analysis': analysis,
            'context_info': context_info or {},
            'timestamp': time.time()
        }
    
    def cache_evaluation_result(self, prompt: str, requirement: str, evaluation: str, 
                              quality_score: float, details: Optional[Dict[str, Any]] = None):
        """
        缓存评估结果
        
        Args:
            prompt: 提示词
            requirement: 用户需求
            evaluation: 评估结果
            quality_score: 质量评分
            details: 详细评估信息
        """
        eval_key = self._generate_hash(f"{prompt}||{requirement}")
        self._evaluation_cache[eval_key] = {
            'evaluation': evaluation,
            'quality_score': quality_score,
            'details': details or {},
            'timestamp': time.time()
        }
    
    def cache_optimization_result(self, original_prompt: str, requirement: str, optimized_prompt: str):
        """
        缓存优化结果
        
        Args:
            original_prompt: 原始提示词
            requirement: 用户需求
            optimized_prompt: 优化后的提示词
        """
        optimization_key = self._generate_hash(f"{original_prompt}||{requirement}")
        self._optimization_cache[optimization_key] = optimized_prompt
    
    def _calculate_complexity_factor(self, question: str) -> float:
        """
        计算问题复杂度因子
        
        Args:
            question: 研究问题
            
        Returns:
            复杂度因子 (0.5-2.0)
        """
        # 基础复杂度
        complexity = 1.0
        
        # 根据问题长度调整复杂度
        if len(question) > 50:
            complexity *= 1.5
        elif len(question) > 50:
            complexity *= 1.2
        elif len(question) < 10:
            complexity *= 0.8
            
        # 根据关键词调整复杂度
        complex_keywords = ["全面", "深入", "详细", "综合", "系统", "全方位", "多维度"]
        medium_keywords = ["研究", "分析", "探讨", "调查", "了解"]
        
        if any(keyword in question for keyword in complex_keywords):
            complexity *= 1.5
        elif any(keyword in question for keyword in medium_keywords):
            complexity *= 1.2
            
        # 限制复杂度因子范围
        return max(0.5, min(2.0, complexity))
    
    def _calculate_length_factor(self, question: str) -> float:
        """
        计算问题长度因子
        
        Args:
            question: 研究问题
            
        Returns:
            长度因子 (0.5-2.0)
        """
        length = len(question)
        
        if length > 100:
            return 2.0
        elif length > 50:
            return 1.2
        elif length > 20:
            return 1.0
        else:
            return 0.8
    
    def calculate_dynamic_timeout(self, question: str) -> int:
        """
        计算动态超时时间
        
        Args:
            question: 研究问题
            
        Returns:
            超时时间（秒）
        """
        # 基础超时时间（秒）
        base_timeout = 300  # 5分钟
        
        # 计算复杂度因子和长度因子
        complexity_factor = self._calculate_complexity_factor(question)
        length_factor = self._calculate_length_factor(question)
        
        # 计算最终超时时间
        timeout = int(base_timeout * complexity_factor * length_factor)
        
        # 限制超时时间范围 (1分钟到30分钟)
        return max(60, min(1800, timeout))
    
    def get_workflow_config(self, question: str) -> Dict[str, Any]:
        """
        获取工作流配置
        
        Args:
            question: 研究问题
            
        Returns:
            工作流配置字典
        """
        # 计算复杂度因子
        complexity_factor = self._calculate_complexity_factor(question)
        
        # 基础配置
        config = {
            "use_checkpointer": True,
            "recursion_limit": 50,
            "max_iterations": 10,
            "timeout": self.calculate_dynamic_timeout(question)
        }
        
        # 根据复杂度调整配置
        if complexity_factor > 1.5:
            config["recursion_limit"] = 30
            config["max_iterations"] = 5
        elif complexity_factor > 1.0:
            config["recursion_limit"] = 40
            config["max_iterations"] = 8
        else:
            config["recursion_limit"] = 60
            config["max_iterations"] = 15
            
        # 根据问题长度调整迭代次数
        if len(question) > 100:
            config["max_iterations"] = 5  # 对于长问题减少迭代次数
        elif len(question) < 20:
            config["max_iterations"] = 15  # 对于短问题增加迭代次数
        
        # 根据问题关键词调整配置
        if any(keyword in question.lower() for keyword in ["最新", "趋势", "发展"]):
            config["recursion_limit"] = 100
            config["max_iterations"] = 20
        
        return config
    
    def _generate_hash(self, content: str) -> str:
        """
        生成内容的哈希值
        
        Args:
            content: 要哈希的内容
            
        Returns:
            哈希值字符串
        """
        return hashlib.md5(content.encode('utf-8')).hexdigest()
    
    def get_performance_stats(self) -> Dict[str, Any]:
        """
        获取性能统计信息
        
        Returns:
            性能统计信息字典
        """
        stats = {}
        for key, values in self._performance_stats.items():
            if values:
                stats[key] = {
                    'count': len(values),
                    'total': sum(values),
                    'average': sum(values) / len(values),
                    'min': min(values),
                    'max': max(values)
                }
            else:
                stats[key] = {
                    'count': 0,
                    'total': 0,
                    'average': 0,
                    'min': 0,
                    'max': 0
                }
        return stats
    
    def clear_cache(self):
        """清空所有缓存"""
        self._prompt_cache.clear()
        self._evaluation_cache.clear()
        self._optimization_cache.clear()
    
    def get_cache_info(self) -> Dict[str, int]:
        """
        获取缓存信息
        
        Returns:
            各类缓存的大小信息
        """
        return {
            'prompt_cache_size': len(self._prompt_cache),
            'evaluation_cache_size': len(self._evaluation_cache),
            'optimization_cache_size': len(self._optimization_cache)
        }


# 导出类和函数
__all__ = [
    'PerformanceOptimizer',
    'get_performance_optimizer',
    'apply_performance_optimization',
    'get_workflow_config'
]




# 创建全局性能优化器实例
_performance_optimizer = PerformanceOptimizer()


def get_performance_optimizer() -> PerformanceOptimizer:
    """
    获取全局性能优化器实例
    
    Returns:
        PerformanceOptimizer实例
    """
    return _performance_optimizer


def apply_performance_optimization(state: PromptEngineeringState) -> PromptEngineeringState:
    """
    应用性能优化到工作流状态
    
    Args:
        state: 工作流状态
        
    Returns:
        优化后的工作流状态
    """
    optimizer = get_performance_optimizer()
    return optimizer.optimize_workflow_execution(state)


def get_workflow_config(question: str) -> Dict[str, Any]:
    """
    获取工作流配置
    
    Args:
        question: 研究问题
        
    Returns:
        工作流配置字典
    """
    optimizer = get_performance_optimizer()
    return optimizer.get_workflow_config(question)

# 导出类和函数
__all__ = [
    'PerformanceOptimizer',
    'get_performance_optimizer',
    'apply_performance_optimization',
    'get_workflow_config'
]