"""
优化决策代理
负责参数优化和决策支持
"""

from scipy.optimize import differential_evolution, minimize
import numpy as np

class OptimizationAgent:
    """优化决策代理"""
    
    def __init__(self):
        """初始化优化器"""
        self.optimization_history = []
    
    def optimize_parameters(self, objective_func, bounds, method='differential_evolution', constraints=None, max_iter=100):
        """
        参数优化
        参数:
            objective_func: 目标函数
            bounds: 参数边界 [(min, max), ...]
            method: 优化方法 (differential_evolution, minimize)
            constraints: 约束条件
            max_iter: 最大迭代次数
        返回:
            优化结果字典
        """
        if method == 'differential_evolution':
            result = differential_evolution(
                objective_func, 
                bounds, 
                constraints=constraints,
                maxiter=max_iter
            )
        elif method == 'minimize':
            # 生成初始猜测值
            x0 = np.array([(b[0] + b[1])/2 for b in bounds])
            result = minimize(
                objective_func,
                x0,
                bounds=bounds,
                constraints=constraints,
                options={'maxiter': max_iter}
            )
        else:
            raise ValueError(f"不支持的优化方法: {method}")
        
        # 记录优化历史
        optimization_record = {
            'method': method,
            'optimal_params': result.x.tolist(),
            'optimal_value': float(result.fun),
            'success': bool(result.success),
            'message': str(result.message)
        }
        self.optimization_history.append(optimization_record)
        
        return optimization_record

    def get_optimization_history(self):
        """获取优化历史记录"""
        return self.optimization_history

    def analyze_optimization_results(self, results):
        """
        分析优化结果
        参数:
            results: 优化结果字典
        返回:
            分析报告
        """
        report = {
            'optimal_parameters': results['optimal_params'],
            'optimal_value': results['optimal_value'],
            'convergence_status': '成功' if results['success'] else '失败',
            'performance_improvement': None,
            'parameter_sensitivity': None
        }
        
        if len(self.optimization_history) > 1:
            prev_result = self.optimization_history[-2]
            improvement = prev_result['optimal_value'] - results['optimal_value']
            report['performance_improvement'] = improvement
            
            # 简单的参数敏感性分析
            params = np.array(results['optimal_params'])
            report['parameter_sensitivity'] = (params - np.mean(params)).tolist()
        
        return report