import numpy as np
import matplotlib.pyplot as plt
from typing import Callable, Tuple, List, Dict, Any

# 配置matplotlib使用英文字体
plt.rcParams["font.family"] = ["sans-serif"]
plt.rcParams["axes.unicode_minus"] = False  # 正确显示负号

# 尝试使用TkAgg后端进行GUI显示
try:
    plt.switch_backend('TkAgg')
except Exception as e:
    print(f"TkAgg backend not available ({str(e)}), switching to Agg backend")
    plt.switch_backend('Agg')


class OptimizerComparison:
    """比较不同优化算法的核心性能指标，增强参数更新可视化"""
    
    def __init__(self):
        # 要比较的优化器
        self.optimizers = {
            'SGD': self.sgd,
            'Momentum': self.momentum,
            'AdaGrad': self.adagrad,
            'RMSProp': self.rmsprop,
            'Adam': self.adam
        }
        
    def rosenbrock(self, x: np.ndarray) -> float:
        """Rosenbrock测试函数（非凸优化问题）"""
        return 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2
    
    def calculate_loss(self, x: np.ndarray) -> float:
        """计算损失值（使用Rosenbrock函数值）"""
        return self.rosenbrock(x)
    
    def rosenbrock_gradient(self, x: np.ndarray) -> np.ndarray:
        """Rosenbrock函数的梯度"""
        dx0 = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
        dx1 = 200 * (x[1] - x[0]**2)
        return np.array([dx0, dx1])
    
    # 优化器实现（SGD、Momentum、AdaGrad、RMSProp、Adam）
    def sgd(self, grad_func: Callable, x0: np.ndarray, lr: float = 0.001, 
            max_iter: int = 1000) -> Tuple[List[np.ndarray], List[float]]:
        x = x0.copy()
        path = [x.copy()]
        losses = [self.calculate_loss(x)]
        
        for _ in range(max_iter):
            grad = grad_func(x)
            x -= lr * grad
            path.append(x.copy())
            losses.append(self.calculate_loss(x))
            
        return path, losses
    
    def momentum(self, grad_func: Callable, x0: np.ndarray, lr: float = 0.001, 
                 gamma: float = 0.9, max_iter: int = 1000) -> Tuple[List[np.ndarray], List[float]]:
        x = x0.copy()
        velocity = np.zeros_like(x)
        path = [x.copy()]
        losses = [self.calculate_loss(x)]
        
        for _ in range(max_iter):
            grad = grad_func(x)
            velocity = gamma * velocity + lr * grad
            x -= velocity
            path.append(x.copy())
            losses.append(self.calculate_loss(x))
            
        return path, losses
    
    def adagrad(self, grad_func: Callable, x0: np.ndarray, lr: float = 0.5, 
                eps: float = 1e-8, max_iter: int = 1000) -> Tuple[List[np.ndarray], List[float]]:
        x = x0.copy()
        G = np.zeros_like(x)
        path = [x.copy()]
        losses = [self.calculate_loss(x)]
        
        for _ in range(max_iter):
            grad = grad_func(x)
            G += grad ** 2
            x -= lr / (np.sqrt(G + eps)) * grad
            path.append(x.copy())
            losses.append(self.calculate_loss(x))
            
        return path, losses
    
    def rmsprop(self, grad_func: Callable, x0: np.ndarray, lr: float = 0.05, 
                gamma: float = 0.9, eps: float = 1e-8, max_iter: int = 1000) -> Tuple[List[np.ndarray], List[float]]:
        x = x0.copy()
        avg_sq_grad = np.zeros_like(x)
        path = [x.copy()]
        losses = [self.calculate_loss(x)]
        
        for _ in range(max_iter):
            grad = grad_func(x)
            avg_sq_grad = gamma * avg_sq_grad + (1 - gamma) * grad ** 2
            x -= lr / (np.sqrt(avg_sq_grad + eps)) * grad
            path.append(x.copy())
            losses.append(self.calculate_loss(x))
            
        return path, losses
    
    def adam(self, grad_func: Callable, x0: np.ndarray, lr: float = 0.1, 
             beta1: float = 0.9, beta2: float = 0.999, eps: float = 1e-8, 
             max_iter: int = 1000) -> Tuple[List[np.ndarray], List[float]]:
        x = x0.copy()
        m, v = np.zeros_like(x), np.zeros_like(x)
        path = [x.copy()]
        losses = [self.calculate_loss(x)]
        
        for i in range(max_iter):
            grad = grad_func(x)
            m = beta1 * m + (1 - beta1) * grad
            v = beta2 * v + (1 - beta2) * grad ** 2
            m_hat = m / (1 - beta1 ** (i + 1))
            v_hat = v / (1 - beta2 ** (i + 1))
            x -= lr * m_hat / (np.sqrt(v_hat) + eps)
            path.append(x.copy())
            losses.append(self.calculate_loss(x))
            
        return path, losses
    
    def compare_optimizers(self, x0: np.ndarray = np.array([0.0, 0.0]), 
                          max_iter: int = 1000) -> Dict[str, Any]:
        """运行所有优化器并返回结果"""
        results = {}
        
        for name, optimizer in self.optimizers.items():
            print(f"Running {name} optimizer...")
            # 设置每个优化器的最佳学习率
            if name == 'SGD':
                path, losses = optimizer(self.rosenbrock_gradient, x0, lr=0.001, max_iter=max_iter)
            elif name == 'Momentum':
                path, losses = optimizer(self.rosenbrock_gradient, x0, lr=0.001, gamma=0.9, max_iter=max_iter)
            elif name == 'AdaGrad':
                path, losses = optimizer(self.rosenbrock_gradient, x0, lr=0.5, eps=1e-8, max_iter=max_iter)
            elif name == 'RMSProp':
                path, losses = optimizer(self.rosenbrock_gradient, x0, lr=0.05, gamma=0.9, eps=1e-8, max_iter=max_iter)
            elif name == 'Adam':
                path, losses = optimizer(self.rosenbrock_gradient, x0, lr=0.1, beta1=0.9, beta2=0.999, max_iter=max_iter)
                
            # 计算关键性能指标
            optimal_point = np.array([1.0, 1.0])
            distances = [np.linalg.norm(point - optimal_point) for point in path]
            target_loss = 1e-3
            converged_idx = np.where(np.array(losses) < target_loss)[0]
            convergence_time = converged_idx[0] if len(converged_idx) > 0 else max_iter
                
            results[name] = {
                'path': np.array(path),
                'losses': np.array(losses),
                'distances': np.array(distances),
                'final_loss': losses[-1],
                'convergence_time': convergence_time,
                'final_distance': distances[-1]
            }
            
        return results
    
    def plot_performance_comparison(self, results: dict, save_path: str = "optimizer_performance.png"):
        """生成聚焦性能对比的图表，包含参数更新轨迹信息"""
        fig, axes = plt.subplots(2, 2, figsize=(16, 14))
        optimizers = list(results.keys())
        colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']
        optimal_point = np.array([1.0, 1.0])
        
        # 1. 左上：优化路径（参数空间探索效率）
        x_grid = np.linspace(-2, 2, 100)
        y_grid = np.linspace(-1, 3, 100)
        X, Y = np.meshgrid(x_grid, y_grid)
        Z = np.zeros_like(X)
        for i in range(X.shape[0]):
            for j in range(X.shape[1]):
                Z[i, j] = self.rosenbrock(np.array([X[i, j], Y[i, j]]))
        
        contour = axes[0, 0].contour(X, Y, Z, levels=np.logspace(-1, 5, 30), alpha=0.6, cmap='viridis')
        axes[0, 0].clabel(contour, inline=True, fontsize=8, fmt='%.1e')
        for i, name in enumerate(optimizers):
            path = results[name]['path']
            axes[0, 0].plot(path[:, 0], path[:, 1], color=colors[i], label=name, linewidth=2, alpha=0.8)
            axes[0, 0].scatter(path[0, 0], path[0, 1], color=colors[i], marker='o', s=100, edgecolor='black')
            axes[0, 0].scatter(path[-1, 0], path[-1, 1], color=colors[i], marker='x', s=120, edgecolor='black')
        
        axes[0, 0].scatter(1, 1, color='gold', marker='*', s=250, edgecolor='black', label='Global Minimum')
        axes[0, 0].set_xlabel('Parameter x0')
        axes[0, 0].set_ylabel('Parameter x1')
        axes[0, 0].set_title('Optimization Paths (Parameter Space Exploration)')
        axes[0, 0].legend(fontsize=9)
        axes[0, 0].grid(alpha=0.3)
        
        # 4. 右下：参数x0更新轨迹（Parameter Update Trajectory）
        for i, name in enumerate(optimizers):
            path = results[name]['path']
            axes[1, 1].plot(range(len(path)), path[:, 0], 
                          color=colors[i], label=name, linewidth=2, alpha=0.8)
        
        # 添加目标值参考线
        axes[1, 1].axhline(y=1.0, color='red', linestyle='--', linewidth=2, 
                         alpha=0.7, label='Optimal x0=1.0')
        axes[1, 1].set_xlabel('Iteration')
        axes[1, 1].set_ylabel('Parameter x0 Value')
        axes[1, 1].set_title('Parameter x0 Update Trajectory')
        axes[1, 1].legend(fontsize=9)
        axes[1, 1].grid(alpha=0.3)
        
        # 3. 左下：损失收敛曲线（收敛速度与稳定性）
        for i, name in enumerate(optimizers):
            axes[1, 0].plot(range(len(results[name]['losses'])), results[name]['losses'], 
                          color=colors[i], label=name, linewidth=2, alpha=0.8)
        
        axes[1, 0].set_yscale('log')
        axes[1, 0].set_xlabel('Iteration')
        axes[1, 0].set_ylabel('Loss Value (log scale)')
        axes[1, 0].set_title('Loss Convergence Curves')
        axes[1, 0].legend(fontsize=9)
        axes[1, 0].grid(alpha=0.3)
        
        # 2. 右上：收敛时间对比（达到目标损失的迭代次数）
        conv_times = [results[name]['convergence_time'] for name in optimizers]
        bars = axes[0, 1].bar(optimizers, conv_times, color=colors, alpha=0.8, edgecolor='black')
        for bar, time in zip(bars, conv_times):
            axes[0, 1].text(bar.get_x() + bar.get_width()/2, bar.get_height(),
                          str(time), ha='center', va='bottom', fontsize=9)
        
        axes[0, 1].set_xlabel('Optimizer')
        axes[0, 1].set_ylabel('Iterations to Reach Target Loss')
        axes[0, 1].set_title('Convergence Speed Comparison')
        axes[0, 1].tick_params(axis='x', rotation=45)
        axes[0, 1].grid(axis='y', alpha=0.3)
        
        # 保存和显示
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"Performance comparison plot saved to: {save_path}")
        
        try:
            plt.show(block=True)
        except Exception as e:
            print(f"Failed to display figure: {str(e)}")
    
    def print_performance_summary(self, results: dict):
        """打印性能对比摘要，包含参数更新相关指标"""
        print("\n" + "="*80)
        print("                      OPTIMIZER PERFORMANCE COMPARISON SUMMARY")
        print("="*80)
        
        # 按最终损失值排序（性能从好到差）
        sorted_results = sorted(results.items(), key=lambda x: x[1]['final_loss'])
        
        print(f"{'Rank':<6} {'Optimizer':<10} {'Final Loss':<15} {'x0 Final Value':<15} {'Convergence Time':<20}")
        print("-"*80)
        
        for rank, (name, res) in enumerate(sorted_results, 1):
            # 获取最终x0参数值
            final_x0 = res['path'][-1, 0]
            print(f"{rank:<6} {name:<10} {res['final_loss']:<15.6e} {final_x0:<15.6f} {res['convergence_time']:<20}")
        
        print("="*80 + "\n")


# 运行优化器比较
if __name__ == "__main__":
    comparator = OptimizerComparison()
    initial_point = np.array([-1.0, 1.0])  # 远离最优解的初始点
    max_iterations = 1000
    
    print(f"Starting optimization comparison (initial point: {initial_point})")
    results = comparator.compare_optimizers(x0=initial_point, max_iter=max_iterations)
    
    # 打印性能摘要（包含参数更新信息）
    comparator.print_performance_summary(results)
    
    # 生成性能对比图表（包含参数更新轨迹）
    comparator.plot_performance_comparison(results)
