"""
Week 13: 随机梯度下降算法实现
Stochastic Gradient Descent Algorithms Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Callable, Optional
from sklearn.datasets import make_regression, load_breast_cancer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class SGDOptimizer:
    """SGD优化器基类"""
    
    def __init__(self, learning_rate: float = 0.01):
        self.learning_rate = learning_rate
        self.history = {'loss': [], 'weights': []}
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        """更新权重 - 子类需要实现"""
        raise NotImplementedError

class VanillaSGD(SGDOptimizer):
    """标准SGD"""
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        return weights - self.learning_rate * gradient

class MomentumSGD(SGDOptimizer):
    """动量SGD"""
    
    def __init__(self, learning_rate: float = 0.01, momentum: float = 0.9):
        super().__init__(learning_rate)
        self.momentum = momentum
        self.velocity = None
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        if self.velocity is None:
            self.velocity = np.zeros_like(weights)
        
        self.velocity = self.momentum * self.velocity - self.learning_rate * gradient
        return weights + self.velocity

class NesterovSGD(SGDOptimizer):
    """Nesterov加速梯度"""
    
    def __init__(self, learning_rate: float = 0.01, momentum: float = 0.9):
        super().__init__(learning_rate)
        self.momentum = momentum
        self.velocity = None
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        if self.velocity is None:
            self.velocity = np.zeros_like(weights)
        
        prev_velocity = self.velocity.copy()
        self.velocity = self.momentum * self.velocity - self.learning_rate * gradient
        return weights - self.momentum * prev_velocity + (1 + self.momentum) * self.velocity

class AdaGrad(SGDOptimizer):
    """AdaGrad自适应梯度"""
    
    def __init__(self, learning_rate: float = 0.01, epsilon: float = 1e-8):
        super().__init__(learning_rate)
        self.epsilon = epsilon
        self.sum_squared_gradients = None
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        if self.sum_squared_gradients is None:
            self.sum_squared_gradients = np.zeros_like(weights)
        
        self.sum_squared_gradients += gradient ** 2
        adapted_lr = self.learning_rate / (np.sqrt(self.sum_squared_gradients) + self.epsilon)
        return weights - adapted_lr * gradient

class RMSprop(SGDOptimizer):
    """RMSprop"""
    
    def __init__(self, learning_rate: float = 0.01, decay_rate: float = 0.9, epsilon: float = 1e-8):
        super().__init__(learning_rate)
        self.decay_rate = decay_rate
        self.epsilon = epsilon
        self.sum_squared_gradients = None
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        if self.sum_squared_gradients is None:
            self.sum_squared_gradients = np.zeros_like(weights)
        
        self.sum_squared_gradients = (self.decay_rate * self.sum_squared_gradients + 
                                    (1 - self.decay_rate) * gradient ** 2)
        adapted_lr = self.learning_rate / (np.sqrt(self.sum_squared_gradients) + self.epsilon)
        return weights - adapted_lr * gradient

class Adam(SGDOptimizer):
    """Adam优化器"""
    
    def __init__(self, learning_rate: float = 0.001, beta1: float = 0.9, 
                 beta2: float = 0.999, epsilon: float = 1e-8):
        super().__init__(learning_rate)
        self.beta1 = beta1
        self.beta2 = beta2
        self.epsilon = epsilon
        self.m = None  # 一阶矩估计
        self.v = None  # 二阶矩估计
        self.t = 0     # 时间步
    
    def update(self, weights: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        if self.m is None:
            self.m = np.zeros_like(weights)
            self.v = np.zeros_like(weights)
        
        self.t += 1
        
        # 更新偏置一阶和二阶矩估计
        self.m = self.beta1 * self.m + (1 - self.beta1) * gradient
        self.v = self.beta2 * self.v + (1 - self.beta2) * gradient ** 2
        
        # 偏置校正
        m_hat = self.m / (1 - self.beta1 ** self.t)
        v_hat = self.v / (1 - self.beta2 ** self.t)
        
        return weights - self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)

class SGDComparison:
    """SGD算法比较"""
    
    def __init__(self):
        self.optimizers = {
            'SGD': VanillaSGD,
            'Momentum': MomentumSGD,
            'Nesterov': NesterovSGD,
            'AdaGrad': AdaGrad,
            'RMSprop': RMSprop,
            'Adam': Adam
        }
    
    def quadratic_function(self, x: np.ndarray, A: np.ndarray, b: np.ndarray) -> float:
        """二次函数: f(x) = 0.5 * x^T A x - b^T x"""
        return 0.5 * x.T @ A @ x - b.T @ x
    
    def quadratic_gradient(self, x: np.ndarray, A: np.ndarray, b: np.ndarray) -> np.ndarray:
        """二次函数梯度: ∇f(x) = A x - b"""
        return A @ x - b
    
    def compare_on_quadratic(self, n_dims: int = 2, n_iterations: int = 100):
        """在二次函数上比较优化器"""
        print("=== 二次函数优化比较 ===")
        
        # 创建病态二次函数 (条件数较大)
        np.random.seed(42)
        eigenvalues = np.array([1, 100])  # 条件数 = 100
        Q = np.random.randn(n_dims, n_dims)
        Q, _ = np.linalg.qr(Q)  # 正交化
        A = Q @ np.diag(eigenvalues) @ Q.T
        
        # 目标点
        x_star = np.array([1.0, -0.5])
        b = A @ x_star
        
        # 初始点
        x0 = np.array([3.0, 2.0])
        
        results = {}
        
        for name, optimizer_class in self.optimizers.items():
            print(f"运行 {name}...")
            
            # 创建优化器
            if name == 'SGD':
                optimizer = optimizer_class(learning_rate=0.01)
            elif name in ['Momentum', 'Nesterov']:
                optimizer = optimizer_class(learning_rate=0.01, momentum=0.9)
            elif name == 'AdaGrad':
                optimizer = optimizer_class(learning_rate=0.1)
            elif name == 'RMSprop':
                optimizer = optimizer_class(learning_rate=0.01, decay_rate=0.9)
            elif name == 'Adam':
                optimizer = optimizer_class(learning_rate=0.01)
            
            # 优化过程
            x = x0.copy()
            trajectory = [x.copy()]
            losses = [self.quadratic_function(x, A, b)]
            
            for i in range(n_iterations):
                gradient = self.quadratic_gradient(x, A, b)
                x = optimizer.update(x, gradient)
                
                trajectory.append(x.copy())
                losses.append(self.quadratic_function(x, A, b))
            
            results[name] = {
                'trajectory': np.array(trajectory),
                'losses': losses,
                'final_x': x,
                'final_loss': losses[-1]
            }
        
        self.visualize_quadratic_comparison(results, A, b, x_star)
        return results
    
    def visualize_quadratic_comparison(self, results: , A: np.ndarray, 
                                     b: np.ndarray, x_star: np.ndarray):
        """可视化二次函数优化比较"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 1. 轨迹图
        ax1 = axes[0, 0]
        
        # 绘制等高线
        x_range = np.linspace(-1, 4, 100)
        y_range = np.linspace(-2, 3, 100)
        X, Y = np.meshgrid(x_range, y_range)
        Z = np.zeros_like(X)
        
        for i in range(X.shape[0]):
            for j in range(X.shape[1]):
                point = np.array([X[i, j], Y[i, j]])
                Z[i, j] = self.quadratic_function(point, A, b)
        
        contour = ax1.contour(X, Y, Z, levels=20, alpha=0.6)
        
        # 绘制轨迹
        colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown']
        
        for i, (name, result) in enumerate(results.items()):
            trajectory = result['trajectory']
            ax1.plot(trajectory[:, 0], trajectory[:, 1], 
                    color=colors[i], linewidth=2, marker='o', 
                    markersize=3, label=name, alpha=0.8)
        
        # 标记最优点
        ax1.plot(x_star[0], x_star[1], 'k*', markersize=15, label='最优解')
        
        ax1.set_xlabel('x₁')
        ax1.set_ylabel('x₂')
        ax1.set_title('优化轨迹比较')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 损失曲线
        ax2 = axes[0, 1]
        
        for i, (name, result) in enumerate(results.items()):
            losses = result['losses']
            ax2.semilogy(losses, color=colors[i], linewidth=2, label=name)
        
        ax2.set_xlabel('迭代次数')
        ax2.set_ylabel('损失值 (对数尺度)')
        ax2.set_title('收敛速度比较')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 最终性能比较
        ax3 = axes[1, 0]
        
        names = list(results.keys())
        final_losses = [results[name]['final_loss'] for name in names]
        
        bars = ax3.bar(names, final_losses, color=colors[:len(names)], alpha=0.7)
        ax3.set_ylabel('最终损失值')
        ax3.set_title('最终性能比较')
        ax3.set_yscale('log')
        
        # 添加数值标注
        for bar, loss in zip(bars, final_losses):
            height = bar.get_height()
            ax3.text(bar.get_x() + bar.get_width()/2., height,
                    f'{loss:.2e}', ha='center', va='bottom', fontsize=8)
        
        ax3.grid(True, alpha=0.3)
        
        # 4. 收敛速度分析
        ax4 = axes[1, 1]
        
        # 计算到达1%最优值所需的迭代次数
        target_loss = 0.01 * results['SGD']['losses'][0]  # 1%的初始损失
        convergence_iterations = []
        
        for name in names:
            losses = results[name]['losses']
            converged_iter = len(losses)  # 默认为最大迭代次数
            
            for i, loss in enumerate(losses):
                if loss <= target_loss:
                    converged_iter = i
                    break
            
            convergence_iterations.append(converged_iter)
        
        bars = ax4.bar(names, convergence_iterations, color=colors[:len(names)], alpha=0.7)
        ax4.set_ylabel('收敛迭代次数')
        ax4.set_title('收敛速度 (到达1%初始损失)')
        
        # 添加数值标注
        for bar, iter_count in zip(bars, convergence_iterations):
            height = bar.get_height()
            ax4.text(bar.get_x() + bar.get_width()/2., height,
                    f'{iter_count}', ha='center', va='bottom')
        
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    def compare_on_logistic_regression(self, n_samples: int = 1000, n_features: int = 20):
        """在逻辑回归上比较优化器"""
        print("\n=== 逻辑回归优化比较 ===")
        
        # 生成数据
        data = load_breast_cancer()
        X, y = data.data, data.target
        
        # 标准化
        scaler = StandardScaler()
        X = scaler.fit_transform(X)
        
        # 添加偏置项
        X = np.column_stack([np.ones(X.shape[0]), X])
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )
        
        def sigmoid(z):
            return 1 / (1 + np.exp(-np.clip(z, -250, 250)))
        
        def logistic_loss(w, X, y):
            z = X @ w
            return np.mean(np.log(1 + np.exp(-y * z)))
        
        def logistic_gradient(w, X, y):
            z = X @ w
            return -X.T @ (y * sigmoid(-y * z)) / len(y)
        
        # 初始化权重
        np.random.seed(42)
        w0 = np.random.randn(X_train.shape[1]) * 0.01
        
        results = {}
        n_epochs = 100
        batch_size = 32
        
        for name, optimizer_class in self.optimizers.items():
            print(f"训练 {name}...")
            
            # 创建优化器
            if name == 'SGD':
                optimizer = optimizer_class(learning_rate=0.01)
            elif name in ['Momentum', 'Nesterov']:
                optimizer = optimizer_class(learning_rate=0.01, momentum=0.9)
            elif name == 'AdaGrad':
                optimizer = optimizer_class(learning_rate=0.1)
            elif name == 'RMSprop':
                optimizer = optimizer_class(learning_rate=0.01)
            elif name == 'Adam':
                optimizer = optimizer_class(learning_rate=0.001)
            
            w = w0.copy()
            train_losses = []
            test_losses = []
            
            for epoch in range(n_epochs):
                # 随机打乱数据
                indices = np.random.permutation(len(X_train))
                
                epoch_loss = 0
                n_batches = 0
                
                # 小批量训练
                for i in range(0, len(X_train), batch_size):
                    batch_indices = indices[i:i+batch_size]
                    X_batch = X_train[batch_indices]
                    y_batch = y_train[batch_indices]
                    
                    # 计算梯度
                    gradient = logistic_gradient(w, X_batch, y_batch)
                    
                    # 更新权重
                    w = optimizer.update(w, gradient)
                    
                    epoch_loss += logistic_loss(w, X_batch, y_batch)
                    n_batches += 1
                
                # 记录损失
                train_loss = logistic_loss(w, X_train, y_train)
                test_loss = logistic_loss(w, X_test, y_test)
                
                train_losses.append(train_loss)
                test_losses.append(test_loss)
            
            # 计算准确率
            train_pred = sigmoid(X_train @ w) > 0.5
            test_pred = sigmoid(X_test @ w) > 0.5
            
            train_acc = np.mean(train_pred == y_train)
            test_acc = np.mean(test_pred == y_test)
            
            results[name] = {
                'train_losses': train_losses,
                'test_losses': test_losses,
                'train_acc': train_acc,
                'test_acc': test_acc,
                'final_weights': w
            }
        
        self.visualize_logistic_comparison(results)
        return results
    
    def visualize_logistic_comparison(self, results: ):
        """可视化逻辑回归比较"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        
        colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown']
        
        # 1. 训练损失
        ax1 = axes[0, 0]
        for i, (name, result) in enumerate(results.items()):
            ax1.plot(result['train_losses'], color=colors[i], 
                    linewidth=2, label=name)
        
        ax1.set_xlabel('训练轮次')
        ax1.set_ylabel('训练损失')
        ax1.set_title('训练损失比较')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 测试损失
        ax2 = axes[0, 1]
        for i, (name, result) in enumerate(results.items()):
            ax2.plot(result['test_losses'], color=colors[i], 
                    linewidth=2, label=name)
        
        ax2.set_xlabel('训练轮次')
        ax2.set_ylabel('测试损失')
        ax2.set_title('测试损失比较')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 准确率比较
        ax3 = axes[1, 0]
        
        names = list(results.keys())
        train_accs = [results[name]['train_acc'] for name in names]
        test_accs = [results[name]['test_acc'] for name in names]
        
        x_pos = np.arange(len(names))
        width = 0.35
        
        ax3.bar(x_pos - width/2, train_accs, width, 
               label='训练准确率', color='lightblue', alpha=0.7)
        ax3.bar(x_pos + width/2, test_accs, width, 
               label='测试准确率', color='lightcoral', alpha=0.7)
        
        ax3.set_xlabel('优化器')
        ax3.set_ylabel('准确率')
        ax3.set_title('分类准确率比较')
        ax3.set_xticks(x_pos)
        ax3.set_xticklabels(names)
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 最终损失比较
        ax4 = axes[1, 1]
        
        final_train_losses = [results[name]['train_losses'][-1] for name in names]
        final_test_losses = [results[name]['test_losses'][-1] for name in names]
        
        ax4.bar(x_pos - width/2, final_train_losses, width, 
               label='训练损失', color='lightblue', alpha=0.7)
        ax4.bar(x_pos + width/2, final_test_losses, width, 
               label='测试损失', color='lightcoral', alpha=0.7)
        
        ax4.set_xlabel('优化器')
        ax4.set_ylabel('最终损失')
        ax4.set_title('最终损失比较')
        ax4.set_xticks(x_pos)
        ax4.set_xticklabels(names)
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

def demonstrate_sgd_algorithms():
    """演示SGD算法"""
    print("=== 随机梯度下降算法比较 ===\n")
    
    comparison = SGDComparison()
    
    # 1. 二次函数优化比较
    print("1. 二次函数优化")
    quadratic_results = comparison.compare_on_quadratic(n_dims=2, n_iterations=100)
    
    # 2. 逻辑回归比较
    print("2. 逻辑回归优化")
    logistic_results = comparison.compare_on_logistic_regression()
    
    # 3. 总结
    print("\n=== SGD算法总结 ===")
    print("1. SGD: 简单但可能震荡")
    print("2. Momentum: 加速收敛，减少震荡")
    print("3. Nesterov: 更智能的动量方法")
    print("4. AdaGrad: 自适应学习率，适合稀疏数据")
    print("5. RMSprop: 解决AdaGrad学习率衰减问题")
    print("6. Adam: 结合动量和自适应学习率，通常表现最好")

if __name__ == "__main__":
    demonstrate_sgd_algorithms()