"""
Week 8: 泛化理论分析实现
Generalization Theory Analysis Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Callable
from sklearn.model_selection import cross_val_score, learning_curve
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class GeneralizationAnalyzer:
    """泛化分析器"""
    
    def __init__(self):
        self.name = "Generalization Analyzer"
    
    def true_function(self, x: np.ndarray) -> np.ndarray:
        """真实函数"""
        return 0.5 * x + 0.3 * x**2 - 0.1 * x**3 + 0.05 * x**4
    
    def generate_data(self, n_samples: int, noise_std: float = 0.3, 
                     x_range: [float, float] = (-2, 2)) -> [np.ndarray, np.ndarray]:
        """生成数据"""
        np.random.seed(42)
        x = np.random.uniform(x_range[0], x_range[1], n_samples)
        y_true = self.true_function(x)
        noise = np.random.normal(0, noise_std, n_samples)
        y = y_true + noise
        return x.reshape(-1, 1), y
    
    def polynomial_model_complexity_analysis(self, max_degree: int = 15, 
                                           train_sizes: [int] = None):
        """分析多项式模型复杂度对泛化的影响"""
        if train_sizes is None:
            train_sizes = [20, 50, 100, 200, 500]
        
        degrees = range(1, max_degree + 1)
        
        # 生成大量测试数据
        X_test_large, y_test_large = self.generate_data(1000, noise_std=0.3)
        
        results = {}
        
        for train_size in train_sizes:
            print(f"分析训练集大小: {train_size}")
            
            # 生成训练数据
            X_train, y_train = self.generate_data(train_size, noise_std=0.3)
            
            train_errors = []
            test_errors = []
            
            for degree in degrees:
                # 创建多项式特征
                poly_features = PolynomialFeatures(degree=degree, include_bias=True)
                X_train_poly = poly_features.fit_transform(X_train)
                X_test_poly = poly_features.transform(X_test_large)
                
                # 拟合模型
                try:
                    theta = np.linalg.inv(X_train_poly.T @ X_train_poly) @ X_train_poly.T @ y_train
                except np.linalg.LinAlgError:
                    theta = np.linalg.pinv(X_train_poly) @ y_train
                
                # 计算误差
                train_pred = X_train_poly @ theta
                test_pred = X_test_poly @ theta
                
                train_error = np.mean((train_pred - y_train) ** 2)
                test_error = np.mean((test_pred - y_test_large) ** 2)
                
                train_errors.append(train_error)
                test_errors.append(test_error)
            
            results[train_size] = {
                'train_errors': train_errors,
                'test_errors': test_errors,
                'degrees': list(degrees)
            }
        
        self.visualize_complexity_analysis(results)
        return results
    
    def visualize_complexity_analysis(self, results: dict):
        """可视化复杂度分析结果"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 1. 不同训练集大小的泛化曲线
        ax1 = axes[0, 0]
        colors = plt.cm.viridis(np.linspace(0, 1, len(results)))
        
        for i, (train_size, result) in enumerate(results.items()):
            degrees = result['degrees']
            train_errors = result['train_errors']
            test_errors = result['test_errors']
            
            ax1.plot(degrees, train_errors, '--', color=colors[i], 
                    label=f'训练误差 (n={train_size})', alpha=0.7)
            ax1.plot(degrees, test_errors, '-', color=colors[i], 
                    label=f'测试误差 (n={train_size})', linewidth=2)
        
        ax1.set_xlabel('多项式度数')
        ax1.set_ylabel('均方误差')
        ax1.set_title('模型复杂度 vs 泛化误差')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        ax1.set_yscale('log')
        
        # 2. 泛化间隙分析
        ax2 = axes[0, 1]
        
        for i, (train_size, result) in enumerate(results.items()):
            degrees = result['degrees']
            train_errors = result['train_errors']
            test_errors = result['test_errors']
            
            generalization_gap = np.array(test_errors) - np.array(train_errors)
            ax2.plot(degrees, generalization_gap, '-o', color=colors[i], 
                    label=f'n={train_size}', markersize=4)
        
        ax2.set_xlabel('多项式度数')
        ax2.set_ylabel('泛化间隙')
        ax2.set_title('泛化间隙 vs 模型复杂度')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 最优复杂度分析
        ax3 = axes[1, 0]
        
        train_sizes = list(results.keys())
        optimal_degrees = []
        min_test_errors = []
        
        for train_size in train_sizes:
            test_errors = results[train_size]['test_errors']
            optimal_degree = np.argmin(test_errors) + 1  # +1因为度数从1开始
            min_test_error = min(test_errors)
            
            optimal_degrees.append(optimal_degree)
            min_test_errors.append(min_test_error)
        
        ax3.plot(train_sizes, optimal_degrees, 'bo-', markersize=8, linewidth=2)
        ax3.set_xlabel('训练集大小')
        ax3.set_ylabel('最优多项式度数')
        ax3.set_title('最优模型复杂度 vs 训练集大小')
        ax3.grid(True, alpha=0.3)
        
        # 4. 最小测试误差
        ax4 = axes[1, 1]
        
        ax4.plot(train_sizes, min_test_errors, 'ro-', markersize=8, linewidth=2)
        ax4.set_xlabel('训练集大小')
        ax4.set_ylabel('最小测试误差')
        ax4.set_title('最佳泛化性能 vs 训练集大小')
        ax4.grid(True, alpha=0.3)
        ax4.set_yscale('log')
        
        plt.tight_layout()
        plt.show()

class CrossValidationAnalyzer:
    """交叉验证分析器"""
    
    def __init__(self):
        self.name = "Cross Validation Analyzer"
    
    def analyze_cross_validation(self, X: np.ndarray, y: np.ndarray, 
                               max_degree: int = 10, cv_folds: int = 5):
        """交叉验证分析"""
        degrees = range(1, max_degree + 1)
        
        cv_scores_mean = []
        cv_scores_std = []
        train_scores = []
        
        for degree in degrees:
            # 创建多项式回归管道
            poly_reg = Pipeline([
                ('poly', PolynomialFeatures(degree=degree)),
                ('ridge', Ridge(alpha=0.01))  # 添加少量正则化防止数值问题
            ])
            
            # 交叉验证
            cv_scores = cross_val_score(poly_reg, X, y, cv=cv_folds, 
                                      scoring='neg_mean_squared_error')
            cv_scores = -cv_scores  # 转换为正的MSE
            
            # 训练分数
            poly_reg.fit(X, y)
            train_pred = poly_reg.predict(X)
            train_score = np.mean((train_pred - y) ** 2)
            
            cv_scores_mean.append(np.mean(cv_scores))
            cv_scores_std.append(np.std(cv_scores))
            train_scores.append(train_score)
        
        self.visualize_cross_validation(degrees, cv_scores_mean, cv_scores_std, train_scores)
        
        # 找到最优复杂度
        optimal_degree = degrees[np.argmin(cv_scores_mean)]
        print(f"交叉验证选择的最优度数: {optimal_degree}")
        
        return degrees, cv_scores_mean, cv_scores_std, train_scores
    
    def visualize_cross_validation(self, degrees: [int], cv_mean: [float], 
                                 cv_std: [float], train_scores: [float]):
        """可视化交叉验证结果"""
        plt.figure(figsize=(12, 5))
        
        # 交叉验证曲线
        plt.subplot(1, 2, 1)
        
        plt.plot(degrees, train_scores, 'o-', color='blue', label='训练误差', linewidth=2)
        plt.plot(degrees, cv_mean, 'o-', color='red', label='交叉验证误差', linewidth=2)
        
        # 添加误差条
        cv_mean_array = np.array(cv_mean)
        cv_std_array = np.array(cv_std)
        plt.fill_between(degrees, cv_mean_array - cv_std_array, 
                        cv_mean_array + cv_std_array, alpha=0.3, color='red')
        
        plt.xlabel('多项式度数')
        plt.ylabel('均方误差')
        plt.title('交叉验证模型选择')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        # 最优模型选择
        plt.subplot(1, 2, 2)
        
        optimal_idx = np.argmin(cv_mean)
        optimal_degree = degrees[optimal_idx]
        
        plt.bar(degrees, cv_mean, alpha=0.7, color='lightblue')
        plt.bar(optimal_degree, cv_mean[optimal_idx], color='red', 
               label=f'最优度数: {optimal_degree}')
        
        plt.xlabel('多项式度数')
        plt.ylabel('交叉验证误差')
        plt.title('模型选择结果')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

class LearningCurveAnalyzer:
    """学习曲线分析器"""
    
    def __init__(self):
        self.name = "Learning Curve Analyzer"
    
    def analyze_learning_curves(self, X: np.ndarray, y: np.ndarray, 
                              degrees: [int] = [2, 5, 10]):
        """分析学习曲线"""
        train_sizes = np.linspace(0.1, 1.0, 10)
        
        plt.figure(figsize=(15, 5))
        
        for i, degree in enumerate(degrees):
            plt.subplot(1, len(degrees), i + 1)
            
            # 创建多项式回归模型
            poly_reg = Pipeline([
                ('poly', PolynomialFeatures(degree=degree)),
                ('ridge', Ridge(alpha=0.01))
            ])
            
            # 计算学习曲线
            train_sizes_abs, train_scores, val_scores = learning_curve(
                poly_reg, X, y, train_sizes=train_sizes, cv=5,
                scoring='neg_mean_squared_error', n_jobs=-1
            )
            
            # 转换为正的MSE
            train_scores = -train_scores
            val_scores = -val_scores
            
            # 计算均值和标准差
            train_mean = np.mean(train_scores, axis=1)
            train_std = np.std(train_scores, axis=1)
            val_mean = np.mean(val_scores, axis=1)
            val_std = np.std(val_scores, axis=1)
            
            # 绘制学习曲线
            plt.plot(train_sizes_abs, train_mean, 'o-', color='blue', 
                    label='训练误差', linewidth=2)
            plt.fill_between(train_sizes_abs, train_mean - train_std,
                           train_mean + train_std, alpha=0.3, color='blue')
            
            plt.plot(train_sizes_abs, val_mean, 'o-', color='red', 
                    label='验证误差', linewidth=2)
            plt.fill_between(train_sizes_abs, val_mean - val_std,
                           val_mean + val_std, alpha=0.3, color='red')
            
            plt.xlabel('训练集大小')
            plt.ylabel('均方误差')
            plt.title(f'学习曲线 (度数={degree})')
            plt.legend()
            plt.grid(True, alpha=0.3)
            plt.yscale('log')
        
        plt.tight_layout()
        plt.show()

def demonstrate_generalization_theory():
    """演示泛化理论"""
    print("=== 泛化理论演示 ===\n")
    
    # 1. 生成数据
    analyzer = GeneralizationAnalyzer()
    X, y = analyzer.generate_data(n_samples=200, noise_std=0.3)
    
    print("1. 模型复杂度与泛化分析")
    # 分析模型复杂度对泛化的影响
    train_sizes = [30, 50, 100, 200]
    complexity_results = analyzer.polynomial_model_complexity_analysis(
        max_degree=12, train_sizes=train_sizes
    )
    
    print("\n2. 交叉验证模型选择")
    # 交叉验证分析
    cv_analyzer = CrossValidationAnalyzer()
    cv_results = cv_analyzer.analyze_cross_validation(X, y, max_degree=10, cv_folds=5)
    
    print("\n3. 学习曲线分析")
    # 学习曲线分析
    lc_analyzer = LearningCurveAnalyzer()
    lc_analyzer.analyze_learning_curves(X, y, degrees=[2, 5, 8])

def analyze_sample_complexity():
    """分析样本复杂度"""
    print("\n=== 样本复杂度分析 ===\n")
    
    analyzer = GeneralizationAnalyzer()
    
    # 不同的真实函数复杂度
    sample_sizes = np.logspace(1, 3, 20).astype(int)  # 10 到 1000
    target_degrees = [2, 4, 6, 8]
    
    results = {}
    
    for target_degree in target_degrees:
        print(f"分析目标复杂度: 度数 {target_degree}")
        
        test_errors = []
        
        for n_samples in sample_sizes:
            # 生成数据
            X_train, y_train = analyzer.generate_data(n_samples, noise_std=0.2)
            X_test, y_test = analyzer.generate_data(500, noise_std=0.2)  # 固定测试集
            
            # 使用目标复杂度训练模型
            poly_features = PolynomialFeatures(degree=target_degree, include_bias=True)
            X_train_poly = poly_features.fit_transform(X_train)
            X_test_poly = poly_features.transform(X_test)
            
            # 添加正则化防止过拟合
            ridge_reg = Ridge(alpha=0.01)
            ridge_reg.fit(X_train_poly, y_train)
            
            # 测试误差
            test_pred = ridge_reg.predict(X_test_poly)
            test_error = np.mean((test_pred - y_test) ** 2)
            test_errors.append(test_error)
        
        results[target_degree] = test_errors
    
    # 可视化样本复杂度
    plt.figure(figsize=(10, 6))
    
    colors = ['blue', 'red', 'green', 'orange']
    for i, (degree, errors) in enumerate(results.items()):
        plt.loglog(sample_sizes, errors, 'o-', color=colors[i], 
                  label=f'度数 {degree}', linewidth=2, markersize=4)
    
    plt.xlabel('训练样本数')
    plt.ylabel('测试误差')
    plt.title('样本复杂度分析')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 添加理论曲线 O(1/n)
    theoretical_curve = 1.0 / sample_sizes
    plt.loglog(sample_sizes, theoretical_curve, '--', color='black', 
              alpha=0.7, label='理论 O(1/n)')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    demonstrate_generalization_theory()
    analyze_sample_complexity()