"""
Week 11: 过拟合分析与防止
Overfitting Analysis and Prevention
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Optional
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.pipeline import Pipeline
from sklearn.model_selection import validation_curve, learning_curve
from sklearn.metrics import mean_squared_error
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class OverfittingDetector:
    """过拟合检测器"""
    
    def __init__(self):
        self.name = "Overfitting Detector"
    
    def true_function(self, x: np.ndarray) -> np.ndarray:
        """真实函数"""
        return 0.5 * x + 0.3 * np.sin(4 * np.pi * x) + 0.1 * x**2
    
    def generate_data(self, n_samples: int, noise_std: float = 0.2, 
                     x_range: [float, float] = (0, 1)) -> [np.ndarray, np.ndarray]:
        """生成数据"""
        np.random.seed(42)
        x = np.random.uniform(x_range[0], x_range[1], n_samples)
        y_true = self.true_function(x)
        noise = np.random.normal(0, noise_std, n_samples)
        y = y_true + noise
        return x.reshape(-1, 1), y
    
    def demonstrate_overfitting_progression(self, max_degree: int = 15):
        """演示过拟合的发展过程"""
        # 生成小训练集
        X_train, y_train = self.generate_data(20, noise_std=0.2)
        X_test, y_test = self.generate_data(100, noise_std=0.2)
        
        # 测试点用于可视化
        x_plot = np.linspace(0, 1, 200).reshape(-1, 1)
        y_plot_true = self.true_function(x_plot.flatten())
        
        degrees = range(1, max_degree + 1)
        train_errors = []
        test_errors = []
        
        fig, axes = plt.subplots(3, 5, figsize=(20, 12))
        axes = axes.flatten()
        
        for i, degree in enumerate(degrees):
            # 训练多项式模型
            poly_features = PolynomialFeatures(degree=degree)
            X_train_poly = poly_features.fit_transform(X_train)
            X_test_poly = poly_features.transform(X_test)
            X_plot_poly = poly_features.transform(x_plot)
            
            # 拟合模型
            try:
                theta = np.linalg.inv(X_train_poly.T @ X_train_poly) @ X_train_poly.T @ y_train
            except np.linalg.LinAlgError:
                theta = np.linalg.pinv(X_train_poly) @ y_train
            
            # 预测
            y_train_pred = X_train_poly @ theta
            y_test_pred = X_test_poly @ theta
            y_plot_pred = X_plot_poly @ theta
            
            # 计算误差
            train_error = mean_squared_error(y_train, y_train_pred)
            test_error = mean_squared_error(y_test, y_test_pred)
            
            train_errors.append(train_error)
            test_errors.append(test_error)
            
            # 可视化前15个度数
            if i < 15:
                ax = axes[i]
                
                # 绘制真实函数
                ax.plot(x_plot.flatten(), y_plot_true, 'g-', linewidth=2, 
                       label='真实函数', alpha=0.8)
                
                # 绘制训练数据
                ax.scatter(X_train.flatten(), y_train, color='blue', s=50, 
                          alpha=0.7, label='训练数据')
                
                # 绘制拟合曲线
                ax.plot(x_plot.flatten(), y_plot_pred, 'r-', linewidth=2, 
                       label=f'度数={degree}')
                
                ax.set_title(f'度数={degree}\n训练误差={train_error:.3f}, 测试误差={test_error:.3f}')
                ax.set_xlim(0, 1)
                ax.set_ylim(-1, 2)
                ax.grid(True, alpha=0.3)
                
                if i == 0:
                    ax.legend()
        
        plt.tight_layout()
        plt.show()
        
        # 绘制学习曲线
        self.plot_overfitting_curve(degrees, train_errors, test_errors)
        
        return degrees, train_errors, test_errors
    
    def plot_overfitting_curve(self, degrees: [int], train_errors: [float], 
                              test_errors: [float]):
        """绘制过拟合曲线"""
        plt.figure(figsize=(12, 5))
        
        # 误差曲线
        plt.subplot(1, 2, 1)
        plt.plot(degrees, train_errors, 'bo-', linewidth=2, label='训练误差')
        plt.plot(degrees, test_errors, 'ro-', linewidth=2, label='测试误差')
        
        # 标记最优点
        optimal_degree = degrees[np.argmin(test_errors)]
        min_test_error = min(test_errors)
        plt.plot(optimal_degree, min_test_error, 'g*', markersize=15, 
                label=f'最优度数={optimal_degree}')
        
        plt.xlabel('多项式度数')
        plt.ylabel('均方误差')
        plt.title('过拟合现象：训练误差 vs 测试误差')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        # 过拟合程度
        plt.subplot(1, 2, 2)
        overfitting_gap = np.array(test_errors) - np.array(train_errors)
        plt.plot(degrees, overfitting_gap, 'mo-', linewidth=2)
        plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)
        
        plt.xlabel('多项式度数')
        plt.ylabel('过拟合程度 (测试误差 - 训练误差)')
        plt.title('过拟合程度随模型复杂度变化')
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

class EarlyStoppingDemo:
    """早停法演示"""
    
    def __init__(self):
        self.name = "Early Stopping Demo"
    
    def simulate_training_process(self, n_epochs: int = 200, patience: int = 20):
        """模拟训练过程中的早停"""
        # 模拟训练和验证损失
        np.random.seed(42)
        
        # 训练损失：单调递减 + 噪声
        train_loss_base = np.exp(-np.linspace(0, 3, n_epochs))
        train_noise = 0.02 * np.random.randn(n_epochs)
        train_loss = train_loss_base + train_noise
        train_loss = np.maximum(train_loss, 0.01)  # 确保非负
        
        # 验证损失：先减后增 + 噪声
        val_loss_base = np.exp(-np.linspace(0, 2, 80))  # 前80轮下降
        val_loss_base = np.concatenate([
            val_loss_base,
            val_loss_base[-1] + 0.01 * np.arange(n_epochs - 80)  # 后面上升
        ])
        val_noise = 0.03 * np.random.randn(n_epochs)
        val_loss = val_loss_base + val_noise
        val_loss = np.maximum(val_loss, 0.01)
        
        # 早停逻辑
        best_val_loss = float('inf')
        best_epoch = 0
        patience_counter = 0
        early_stop_epoch = None
        
        for epoch in range(n_epochs):
            if val_loss[epoch] < best_val_loss:
                best_val_loss = val_loss[epoch]
                best_epoch = epoch
                patience_counter = 0
            else:
                patience_counter += 1
                
            if patience_counter >= patience:
                early_stop_epoch = epoch
                break
        
        self.visualize_early_stopping(
            train_loss, val_loss, best_epoch, early_stop_epoch, patience
        )
        
        return train_loss, val_loss, best_epoch, early_stop_epoch
    
    def visualize_early_stopping(self, train_loss: np.ndarray, val_loss: np.ndarray,
                                best_epoch: int, early_stop_epoch: Optional[int], 
                                patience: int):
        """可视化早停过程"""
        epochs = np.arange(len(train_loss))
        
        plt.figure(figsize=(15, 5))
        
        # 损失曲线
        plt.subplot(1, 3, 1)
        plt.plot(epochs, train_loss, 'b-', linewidth=2, label='训练损失')
        plt.plot(epochs, val_loss, 'r-', linewidth=2, label='验证损失')
        
        # 标记关键点
        plt.axvline(x=best_epoch, color='g', linestyle='--', 
                   label=f'最佳模型 (epoch {best_epoch})')
        
        if early_stop_epoch is not None:
            plt.axvline(x=early_stop_epoch, color='orange', linestyle='--', 
                       label=f'早停 (epoch {early_stop_epoch})')
        
        plt.xlabel('训练轮次')
        plt.ylabel('损失')
        plt.title('早停法演示')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 放大早停区域
        plt.subplot(1, 3, 2)
        start_idx = max(0, best_epoch - 30)
        end_idx = min(len(train_loss), best_epoch + 50)
        
        plt.plot(epochs[start_idx:end_idx], train_loss[start_idx:end_idx], 
                'b-', linewidth=2, label='训练损失')
        plt.plot(epochs[start_idx:end_idx], val_loss[start_idx:end_idx], 
                'r-', linewidth=2, label='验证损失')
        
        plt.axvline(x=best_epoch, color='g', linestyle='--', label='最佳模型')
        if early_stop_epoch is not None:
            plt.axvline(x=early_stop_epoch, color='orange', linestyle='--', label='早停')
        
        plt.xlabel('训练轮次')
        plt.ylabel('损失')
        plt.title('早停区域放大')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 耐心机制说明
        plt.subplot(1, 3, 3)
        
        # 计算从最佳点开始的耐心计数
        patience_count = []
        current_patience = 0
        current_best = val_loss[0]
        
        for i, loss in enumerate(val_loss):
            if loss < current_best:
                current_best = loss
                current_patience = 0
            else:
                current_patience += 1
            patience_count.append(current_patience)
        
        plt.plot(epochs, patience_count, 'purple', linewidth=2, label='耐心计数')
        plt.axhline(y=patience, color='red', linestyle='--', 
                   label=f'耐心阈值 = {patience}')
        
        if early_stop_epoch is not None:
            plt.axvline(x=early_stop_epoch, color='orange', linestyle='--', 
                       label='触发早停')
        
        plt.xlabel('训练轮次')
        plt.ylabel('耐心计数')
        plt.title('耐心机制')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

class RegularizationComparison:
    """正则化方法比较"""
    
    def __init__(self):
        self.detector = OverfittingDetector()
    
    def compare_regularization_methods(self, degree: int = 10):
        """比较不同正则化方法"""
        # 生成数据
        X_train, y_train = self.detector.generate_data(30, noise_std=0.2)
        X_test, y_test = self.detector.generate_data(100, noise_std=0.2)
        
        # 正则化强度
        alphas = np.logspace(-4, 2, 50)
        
        # 不同正则化方法
        methods = {
            'Ridge (L2)': Ridge,
            'Lasso (L1)': Lasso,
            'ElasticNet': lambda alpha: ElasticNet(alpha=alpha, l1_ratio=0.5)
        }
        
        results = {}
        
        for method_name, method_class in methods.items():
            print(f"分析 {method_name}...")
            
            train_scores = []
            test_scores = []
            
            for alpha in alphas:
                # 创建管道
                if method_name == 'ElasticNet':
                    model = Pipeline([
                        ('poly', PolynomialFeatures(degree=degree)),
                        ('reg', method_class(alpha))
                    ])
                else:
                    model = Pipeline([
                        ('poly', PolynomialFeatures(degree=degree)),
                        ('reg', method_class(alpha=alpha))
                    ])
                
                # 训练和评估
                model.fit(X_train, y_train)
                
                train_pred = model.predict(X_train)
                test_pred = model.predict(X_test)
                
                train_score = mean_squared_error(y_train, train_pred)
                test_score = mean_squared_error(y_test, test_pred)
                
                train_scores.append(train_score)
                test_scores.append(test_score)
            
            results[method_name] = {
                'alphas': alphas,
                'train_scores': train_scores,
                'test_scores': test_scores
            }
        
        self.visualize_regularization_comparison(results, degree)
        return results
    
    def visualize_regularization_comparison(self, results: , degree: int):
        """可视化正则化比较"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        colors = ['blue', 'red', 'green']
        
        # 1. 验证曲线
        ax1 = axes[0, 0]
        
        for i, (method_name, result) in enumerate(results.items()):
            alphas = result['alphas']
            train_scores = result['train_scores']
            test_scores = result['test_scores']
            
            ax1.semilogx(alphas, train_scores, '--', color=colors[i], 
                        alpha=0.7, label=f'{method_name} (训练)')
            ax1.semilogx(alphas, test_scores, '-', color=colors[i], 
                        linewidth=2, label=f'{method_name} (测试)')
        
        ax1.set_xlabel('正则化强度 α')
        ax1.set_ylabel('均方误差')
        ax1.set_title(f'正则化验证曲线 (度数={degree})')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 最优正则化强度
        ax2 = axes[0, 1]
        
        method_names = list(results.keys())
        optimal_alphas = []
        min_test_errors = []
        
        for method_name in method_names:
            test_scores = results[method_name]['test_scores']
            alphas = results[method_name]['alphas']
            
            min_idx = np.argmin(test_scores)
            optimal_alpha = alphas[min_idx]
            min_test_error = test_scores[min_idx]
            
            optimal_alphas.append(optimal_alpha)
            min_test_errors.append(min_test_error)
        
        x_pos = np.arange(len(method_names))
        bars = ax2.bar(x_pos, min_test_errors, color=colors[:len(method_names)], alpha=0.7)
        
        ax2.set_xlabel('正则化方法')
        ax2.set_ylabel('最小测试误差')
        ax2.set_title('不同正则化方法的最佳性能')
        ax2.set_xticks(x_pos)
        ax2.set_xticklabels(method_names)
        
        # 添加最优α值标注
        for i, (bar, alpha) in enumerate(zip(bars, optimal_alphas)):
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height,
                    f'α={alpha:.3f}', ha='center', va='bottom', fontsize=8)
        
        ax2.grid(True, alpha=0.3)
        
        # 3. 过拟合程度比较
        ax3 = axes[1, 0]
        
        for i, (method_name, result) in enumerate(results.items()):
            alphas = result['alphas']
            train_scores = result['train_scores']
            test_scores = result['test_scores']
            
            overfitting_gap = np.array(test_scores) - np.array(train_scores)
            ax3.semilogx(alphas, overfitting_gap, color=colors[i], 
                        linewidth=2, label=method_name)
        
        ax3.axhline(y=0, color='black', linestyle='--', alpha=0.5)
        ax3.set_xlabel('正则化强度 α')
        ax3.set_ylabel('过拟合程度 (测试误差 - 训练误差)')
        ax3.set_title('过拟合程度随正则化强度变化')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 系数路径 (以Ridge为例)
        ax4 = axes[1, 1]
        
        # 重新训练Ridge模型以获取系数路径
        X_train, y_train = self.detector.generate_data(30, noise_std=0.2)
        
        alphas_path = np.logspace(-4, 2, 20)
        coef_paths = []
        
        for alpha in alphas_path:
            model = Pipeline([
                ('poly', PolynomialFeatures(degree=degree)),
                ('ridge', Ridge(alpha=alpha))
            ])
            model.fit(X_train, y_train)
            coef_paths.append(model.named_steps['ridge'].coef_)
        
        coef_paths = np.array(coef_paths)
        
        # 绘制系数路径
        for i in range(min(10, coef_paths.shape[1])):  # 只显示前10个系数
            ax4.semilogx(alphas_path, coef_paths[:, i], 
                        linewidth=1, alpha=0.7, label=f'θ_{i}' if i < 5 else "")
        
        ax4.set_xlabel('正则化强度 α')
        ax4.set_ylabel('系数值')
        ax4.set_title('Ridge回归系数路径')
        if coef_paths.shape[1] <= 5:
            ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

def demonstrate_overfitting_analysis():
    """演示过拟合分析"""
    print("=== 过拟合现象分析 ===\n")
    
    # 1. 过拟合现象演示
    print("1. 过拟合发展过程演示")
    detector = OverfittingDetector()
    degrees, train_errors, test_errors = detector.demonstrate_overfitting_progression(max_degree=15)
    
    # 2. 早停法演示
    print("\n2. 早停法演示")
    early_stopping = EarlyStoppingDemo()
    train_loss, val_loss, best_epoch, early_stop_epoch = early_stopping.simulate_training_process(
        n_epochs=200, patience=20
    )
    
    print(f"   最佳模型在第 {best_epoch} 轮")
    if early_stop_epoch:
        print(f"   早停触发在第 {early_stop_epoch} 轮")
        print(f"   节省了 {200 - early_stop_epoch} 轮训练")
    
    # 3. 正则化方法比较
    print("\n3. 正则化方法比较")
    reg_comparison = RegularizationComparison()
    reg_results = reg_comparison.compare_regularization_methods(degree=10)
    
    # 4. 总结
    print("\n=== 过拟合防止策略总结 ===")
    print("1. 监控验证误差，及时发现过拟合")
    print("2. 使用早停法防止过度训练")
    print("3. 应用正则化技术控制模型复杂度")
    print("4. L1正则化可以进行特征选择")
    print("5. L2正则化可以防止系数过大")
    print("6. ElasticNet结合了L1和L2的优点")

if __name__ == "__main__":
    demonstrate_overfitting_analysis()