#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
智能算法选择器

使用机器学习技术自动选择最适合的优化算法：
1. 问题特征提取
2. 算法性能预测
3. 自适应参数调优
4. 历史性能学习
"""

import numpy as np
import pandas as pd
from typing import Dict, List, Any, Optional, Tuple, Callable
from dataclasses import dataclass, asdict
import json
import pickle
import os
from datetime import datetime
import logging
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, r2_score
import joblib

try:
    from .models import (
        OptimizationProblem, DecisionVariable, ObjectiveFunction,
        VariableType, OptimizationType
    )
    from .advanced_algorithms import AdvancedAlgorithmEngine, AlgorithmResult
except ImportError:
    from models import (
        OptimizationProblem, DecisionVariable, ObjectiveFunction,
        VariableType, OptimizationType
    )
    from advanced_algorithms import AdvancedAlgorithmEngine, AlgorithmResult


@dataclass
class ProblemFeatures:
    """问题特征"""
    num_variables: int
    num_objectives: int
    num_constraints: int
    continuous_ratio: float
    integer_ratio: float
    binary_ratio: float
    variable_range_avg: float
    variable_range_std: float
    problem_complexity: float
    constraint_density: float
    objective_type: str
    has_bounds: bool
    dimension_ratio: float
    
    def to_vector(self) -> np.ndarray:
        """转换为特征向量"""
        return np.array([
            self.num_variables,
            self.num_objectives,
            self.num_constraints,
            self.continuous_ratio,
            self.integer_ratio,
            self.binary_ratio,
            self.variable_range_avg,
            self.variable_range_std,
            self.problem_complexity,
            self.constraint_density,
            1.0 if self.objective_type == 'maximize' else 0.0,
            1.0 if self.has_bounds else 0.0,
            self.dimension_ratio
        ])


@dataclass
class AlgorithmPerformance:
    """算法性能记录"""
    algorithm_name: str
    problem_features: ProblemFeatures
    execution_time: float
    best_fitness: float
    convergence_rate: float
    success_rate: float
    parameter_config: Dict[str, Any]
    timestamp: datetime
    problem_id: str


class ProblemAnalyzer:
    """问题分析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def extract_features(self, problem: OptimizationProblem) -> ProblemFeatures:
        """提取问题特征"""
        num_variables = len(problem.variables)
        num_objectives = len(problem.objectives)
        num_constraints = len(problem.constraints)
        
        # 变量类型统计
        continuous_count = sum(1 for var in problem.variables 
                             if var.variable_type == VariableType.CONTINUOUS)
        integer_count = sum(1 for var in problem.variables 
                          if var.variable_type == VariableType.INTEGER)
        binary_count = sum(1 for var in problem.variables 
                         if var.variable_type == VariableType.BINARY)
        
        continuous_ratio = continuous_count / num_variables if num_variables > 0 else 0
        integer_ratio = integer_count / num_variables if num_variables > 0 else 0
        binary_ratio = binary_count / num_variables if num_variables > 0 else 0
        
        # 变量范围分析
        ranges = []
        has_bounds = True
        
        for var in problem.variables:
            if var.lower_bound is not None and var.upper_bound is not None:
                ranges.append(var.upper_bound - var.lower_bound)
            else:
                has_bounds = False
                ranges.append(1.0)  # 默认范围
        
        variable_range_avg = np.mean(ranges) if ranges else 0.0
        variable_range_std = np.std(ranges) if ranges else 0.0
        
        # 问题复杂度估算
        problem_complexity = self._calculate_complexity(problem)
        
        # 约束密度
        constraint_density = num_constraints / num_variables if num_variables > 0 else 0
        
        # 目标类型
        objective_type = 'maximize' if any(
            obj.optimization_type == OptimizationType.MAXIMIZE 
            for obj in problem.objectives
        ) else 'minimize'
        
        # 维度比率
        dimension_ratio = num_objectives / num_variables if num_variables > 0 else 0
        
        return ProblemFeatures(
            num_variables=num_variables,
            num_objectives=num_objectives,
            num_constraints=num_constraints,
            continuous_ratio=continuous_ratio,
            integer_ratio=integer_ratio,
            binary_ratio=binary_ratio,
            variable_range_avg=variable_range_avg,
            variable_range_std=variable_range_std,
            problem_complexity=problem_complexity,
            constraint_density=constraint_density,
            objective_type=objective_type,
            has_bounds=has_bounds,
            dimension_ratio=dimension_ratio
        )
    
    def _calculate_complexity(self, problem: OptimizationProblem) -> float:
        """计算问题复杂度"""
        # 基于变量数量、约束数量和变量类型的复杂度估算
        base_complexity = len(problem.variables) * len(problem.objectives)
        
        # 约束增加复杂度
        constraint_factor = 1 + 0.1 * len(problem.constraints)
        
        # 离散变量增加复杂度
        discrete_count = sum(1 for var in problem.variables 
                           if var.variable_type in [VariableType.INTEGER, VariableType.BINARY])
        discrete_factor = 1 + 0.2 * discrete_count / len(problem.variables)
        
        return base_complexity * constraint_factor * discrete_factor


class PerformancePredictor:
    """性能预测器"""
    
    def __init__(self, model_path: Optional[str] = None):
        self.models = {}
        self.scalers = {}
        self.feature_names = None
        self.model_path = model_path or "models/performance_predictor"
        self.logger = logging.getLogger(__name__)
        
        # 确保模型目录存在
        os.makedirs(os.path.dirname(self.model_path), exist_ok=True)
        
        # 尝试加载已有模型
        self._load_models()
    
    def train(self, performance_data: List[AlgorithmPerformance]):
        """训练性能预测模型"""
        if not performance_data:
            self.logger.warning("没有性能数据用于训练")
            return
        
        # 准备训练数据
        X = []
        y_time = []
        y_fitness = []
        y_convergence = []
        algorithms = []
        
        for perf in performance_data:
            features = perf.problem_features.to_vector()
            X.append(features)
            y_time.append(perf.execution_time)
            y_fitness.append(perf.best_fitness)
            y_convergence.append(perf.convergence_rate)
            algorithms.append(perf.algorithm_name)
        
        X = np.array(X)
        
        # 特征标准化
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X)
        
        # 为每个算法训练独立的模型
        unique_algorithms = list(set(algorithms))
        
        for algorithm in unique_algorithms:
            # 筛选该算法的数据
            alg_indices = [i for i, alg in enumerate(algorithms) if alg == algorithm]
            
            if len(alg_indices) < 5:  # 数据太少，跳过
                continue
            
            X_alg = X_scaled[alg_indices]
            y_time_alg = np.array(y_time)[alg_indices]
            y_fitness_alg = np.array(y_fitness)[alg_indices]
            y_convergence_alg = np.array(y_convergence)[alg_indices]
            
            # 训练多个预测模型
            models = {
                'time': RandomForestRegressor(n_estimators=100, random_state=42),
                'fitness': GradientBoostingRegressor(n_estimators=100, random_state=42),
                'convergence': RandomForestRegressor(n_estimators=100, random_state=42)
            }
            
            targets = {
                'time': y_time_alg,
                'fitness': y_fitness_alg,
                'convergence': y_convergence_alg
            }
            
            self.models[algorithm] = {}
            
            for target_name, model in models.items():
                try:
                    model.fit(X_alg, targets[target_name])
                    self.models[algorithm][target_name] = model
                    
                    # 评估模型性能
                    if len(X_alg) > 1:
                        y_pred = model.predict(X_alg)
                        r2 = r2_score(targets[target_name], y_pred)
                        self.logger.info(f"{algorithm} {target_name} 模型 R² 分数: {r2:.3f}")
                        
                except Exception as e:
                    self.logger.error(f"训练 {algorithm} {target_name} 模型失败: {e}")
        
        # 保存标准化器
        self.scalers['main'] = scaler
        
        # 保存模型
        self._save_models()
        
        self.logger.info(f"完成 {len(unique_algorithms)} 个算法的性能预测模型训练")
    
    def predict_performance(self, problem_features: ProblemFeatures, 
                          algorithm_name: str) -> Dict[str, float]:
        """预测算法性能"""
        if algorithm_name not in self.models:
            # 返回默认预测值
            return {
                'predicted_time': 10.0,
                'predicted_fitness': 0.5,
                'predicted_convergence': 0.5,
                'confidence': 0.1
            }
        
        try:
            # 准备特征
            features = problem_features.to_vector().reshape(1, -1)
            
            if 'main' in self.scalers:
                features_scaled = self.scalers['main'].transform(features)
            else:
                features_scaled = features
            
            # 预测各项指标
            predictions = {}
            confidence_scores = []
            
            for target_name in ['time', 'fitness', 'convergence']:
                if target_name in self.models[algorithm_name]:
                    model = self.models[algorithm_name][target_name]
                    pred = model.predict(features_scaled)[0]
                    predictions[f'predicted_{target_name}'] = pred
                    
                    # 计算置信度（基于模型的特征重要性）
                    if hasattr(model, 'feature_importances_'):
                        confidence = np.mean(model.feature_importances_)
                        confidence_scores.append(confidence)
                else:
                    # 默认值
                    default_values = {'time': 10.0, 'fitness': 0.5, 'convergence': 0.5}
                    predictions[f'predicted_{target_name}'] = default_values[target_name]
                    confidence_scores.append(0.1)
            
            predictions['confidence'] = np.mean(confidence_scores)
            return predictions
            
        except Exception as e:
            self.logger.error(f"预测 {algorithm_name} 性能失败: {e}")
            return {
                'predicted_time': 10.0,
                'predicted_fitness': 0.5,
                'predicted_convergence': 0.5,
                'confidence': 0.1
            }
    
    def _save_models(self):
        """保存模型"""
        try:
            model_data = {
                'models': self.models,
                'scalers': self.scalers,
                'timestamp': datetime.now().isoformat()
            }
            
            with open(f"{self.model_path}.pkl", 'wb') as f:
                pickle.dump(model_data, f)
                
            self.logger.info(f"模型已保存到 {self.model_path}.pkl")
            
        except Exception as e:
            self.logger.error(f"保存模型失败: {e}")
    
    def _load_models(self):
        """加载模型"""
        try:
            if os.path.exists(f"{self.model_path}.pkl"):
                with open(f"{self.model_path}.pkl", 'rb') as f:
                    model_data = pickle.load(f)
                
                self.models = model_data.get('models', {})
                self.scalers = model_data.get('scalers', {})
                
                self.logger.info(f"已加载模型从 {self.model_path}.pkl")
            
        except Exception as e:
            self.logger.error(f"加载模型失败: {e}")


class IntelligentAlgorithmSelector:
    """智能算法选择器"""
    
    def __init__(self, data_path: str = "data/performance_history.json"):
        self.analyzer = ProblemAnalyzer()
        self.predictor = PerformancePredictor()
        self.algorithm_engine = AdvancedAlgorithmEngine()
        self.data_path = data_path
        self.performance_history: List[AlgorithmPerformance] = []
        self.logger = logging.getLogger(__name__)
        
        # 确保数据目录存在
        os.makedirs(os.path.dirname(self.data_path), exist_ok=True)
        
        # 加载历史数据
        self._load_performance_history()
        
        # 如果有历史数据，训练预测模型
        if self.performance_history:
            self.predictor.train(self.performance_history)
    
    def select_best_algorithm(self, problem: OptimizationProblem,
                            candidate_algorithms: Optional[List[str]] = None) -> Tuple[str, Dict[str, Any]]:
        """选择最佳算法"""
        if candidate_algorithms is None:
            candidate_algorithms = self.algorithm_engine.get_available_algorithms()
        
        # 提取问题特征
        features = self.analyzer.extract_features(problem)
        
        # 预测各算法性能
        algorithm_scores = {}
        
        for algorithm in candidate_algorithms:
            predictions = self.predictor.predict_performance(features, algorithm)
            
            # 综合评分（可以根据需要调整权重）
            time_score = 1.0 / (1.0 + predictions['predicted_time'])  # 时间越短越好
            fitness_score = predictions['predicted_fitness']  # 适应度越高越好
            convergence_score = predictions['predicted_convergence']  # 收敛率越高越好
            confidence_score = predictions['confidence']  # 置信度越高越好
            
            # 加权综合评分
            composite_score = (
                0.3 * time_score +
                0.4 * fitness_score +
                0.2 * convergence_score +
                0.1 * confidence_score
            )
            
            algorithm_scores[algorithm] = {
                'score': composite_score,
                'predictions': predictions
            }
        
        # 选择得分最高的算法
        best_algorithm = max(algorithm_scores.keys(), 
                           key=lambda x: algorithm_scores[x]['score'])
        
        # 生成推荐的参数配置
        recommended_params = self._recommend_parameters(features, best_algorithm)
        
        self.logger.info(f"为问题选择算法: {best_algorithm}")
        self.logger.info(f"预测性能: {algorithm_scores[best_algorithm]['predictions']}")
        
        return best_algorithm, recommended_params
    
    def _recommend_parameters(self, features: ProblemFeatures, 
                            algorithm: str) -> Dict[str, Any]:
        """推荐算法参数"""
        # 基于问题特征推荐参数
        base_params = {}
        
        if algorithm == 'differential_evolution':
            # 根据问题规模调整种群大小
            population_size = min(100, max(20, features.num_variables * 5))
            max_generations = min(200, max(50, features.num_variables * 10))
            
            base_params = {
                'population_size': population_size,
                'max_generations': max_generations,
                'F': 0.8,
                'CR': 0.9
            }
            
        elif algorithm == 'ant_colony':
            num_ants = min(50, max(10, features.num_variables * 3))
            max_iterations = min(150, max(30, features.num_variables * 8))
            
            base_params = {
                'num_ants': num_ants,
                'max_iterations': max_iterations,
                'alpha': 1.0,
                'beta': 2.0,
                'rho': 0.1,
                'Q': 1.0
            }
            
        elif algorithm == 'artificial_bee_colony':
            colony_size = min(80, max(20, features.num_variables * 4))
            max_cycles = min(150, max(40, features.num_variables * 8))
            
            base_params = {
                'colony_size': colony_size,
                'max_cycles': max_cycles,
                'limit': 10
            }
        
        return base_params
    
    def record_performance(self, algorithm_name: str, problem: OptimizationProblem,
                         result: AlgorithmResult, parameter_config: Dict[str, Any]):
        """记录算法性能"""
        features = self.analyzer.extract_features(problem)
        
        # 计算收敛率
        convergence_rate = self._calculate_convergence_rate(result.convergence_history)
        
        # 计算成功率（简化版本）
        success_rate = 1.0 if result.best_fitness > float('-inf') else 0.0
        
        performance = AlgorithmPerformance(
            algorithm_name=algorithm_name,
            problem_features=features,
            execution_time=result.execution_time,
            best_fitness=result.best_fitness,
            convergence_rate=convergence_rate,
            success_rate=success_rate,
            parameter_config=parameter_config,
            timestamp=datetime.now(),
            problem_id=f"problem_{len(self.performance_history)}"
        )
        
        self.performance_history.append(performance)
        
        # 保存历史数据
        self._save_performance_history()
        
        # 重新训练预测模型（如果数据足够）
        if len(self.performance_history) >= 10:
            self.predictor.train(self.performance_history)
        
        self.logger.info(f"记录了 {algorithm_name} 的性能数据")
    
    def _calculate_convergence_rate(self, convergence_history: List[float]) -> float:
        """计算收敛率"""
        if len(convergence_history) < 2:
            return 0.0
        
        # 计算改进的比例
        improvements = 0
        for i in range(1, len(convergence_history)):
            if convergence_history[i] > convergence_history[i-1]:
                improvements += 1
        
        return improvements / (len(convergence_history) - 1)
    
    def _save_performance_history(self):
        """保存性能历史"""
        try:
            # 转换为可序列化的格式
            serializable_data = []
            for perf in self.performance_history:
                data = asdict(perf)
                data['timestamp'] = perf.timestamp.isoformat()
                data['problem_features'] = asdict(perf.problem_features)
                serializable_data.append(data)
            
            with open(self.data_path, 'w', encoding='utf-8') as f:
                json.dump(serializable_data, f, indent=2, ensure_ascii=False)
                
        except Exception as e:
            self.logger.error(f"保存性能历史失败: {e}")
    
    def _load_performance_history(self):
        """加载性能历史"""
        try:
            if os.path.exists(self.data_path):
                with open(self.data_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                
                for item in data:
                    # 重构对象
                    features_data = item['problem_features']
                    features = ProblemFeatures(**features_data)
                    
                    performance = AlgorithmPerformance(
                        algorithm_name=item['algorithm_name'],
                        problem_features=features,
                        execution_time=item['execution_time'],
                        best_fitness=item['best_fitness'],
                        convergence_rate=item['convergence_rate'],
                        success_rate=item['success_rate'],
                        parameter_config=item['parameter_config'],
                        timestamp=datetime.fromisoformat(item['timestamp']),
                        problem_id=item['problem_id']
                    )
                    
                    self.performance_history.append(performance)
                
                self.logger.info(f"加载了 {len(self.performance_history)} 条性能历史记录")
                
        except Exception as e:
            self.logger.error(f"加载性能历史失败: {e}")
    
    def get_algorithm_statistics(self) -> Dict[str, Any]:
        """获取算法统计信息"""
        if not self.performance_history:
            return {}
        
        stats = {}
        algorithms = set(perf.algorithm_name for perf in self.performance_history)
        
        for algorithm in algorithms:
            alg_perfs = [perf for perf in self.performance_history 
                        if perf.algorithm_name == algorithm]
            
            if alg_perfs:
                stats[algorithm] = {
                    'count': len(alg_perfs),
                    'avg_time': np.mean([p.execution_time for p in alg_perfs]),
                    'avg_fitness': np.mean([p.best_fitness for p in alg_perfs]),
                    'avg_convergence': np.mean([p.convergence_rate for p in alg_perfs]),
                    'success_rate': np.mean([p.success_rate for p in alg_perfs])
                }
        
        return stats