"""
Week 10: 方差-偏差权衡深入分析
Variance-Bias Tradeoff Deep Analysis
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable
from sklearn.ensemble import BaggingRegressor, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class BiasVarianceDecomposer:
    """偏差-方差分解器"""
    
    def __init__(self):
        self.name = "Bias-Variance Decomposer"
    
    def true_function(self, x: np.ndarray) -> np.ndarray:
        """真实函数: f(x) = sin(1.5πx)"""
        return np.sin(1.5 * np.pi * x)
    
    def generate_dataset(self, n_samples: int, noise_std: float = 0.3, 
                        x_range: tuple[float, float] = (0, 1)) -> tuple[np.ndarray, np.ndarray]:
        """生成数据集"""
        x = np.random.uniform(x_range[0], x_range[1], n_samples)
        y_true = self.true_function(x)
        noise = np.random.normal(0, noise_std, n_samples)
        y = y_true + noise
        return x.reshape(-1, 1), y
    
    def polynomial_model_factory(self, degree: int, alpha: float = 0.0):
        """多项式模型工厂"""
        def create_model():
            return Pipeline([
                ('poly', PolynomialFeatures(degree=degree)),
                ('ridge', Ridge(alpha=alpha))
            ])
        return create_model
    
    def tree_model_factory(self, max_depth: int = None, min_samples_leaf: int = 1):
        """决策树模型工厂"""
        def create_model():
            return DecisionTreeRegressor(
                max_depth=max_depth, 
                min_samples_leaf=min_samples_leaf,
                random_state=None  # 每次都不同
            )
        return create_model
    
    def bias_variance_decomposition(self, model_factory: Callable, 
                                  n_experiments: int = 100, 
                                  n_samples: int = 50, 
                                  noise_std: float = 0.3,
                                  test_points: np.ndarray = None):
        """
        执行偏差-方差分解
        """
        if test_points is None:
            test_points = np.linspace(0, 1, 50).reshape(-1, 1)
        
        # 真实函数值
        y_true = self.true_function(test_points.flatten())
        
        # 存储所有实验的预测
        predictions = []
        
        print(f"执行 {n_experiments} 次实验...")
        
        for exp in range(n_experiments):
            if (exp + 1) % 20 == 0:
                print(f"  完成 {exp + 1}/{n_experiments} 次实验")
            
            # 生成训练数据
            X_train, y_train = self.generate_dataset(n_samples, noise_std)
            
            # 训练模型
            model = model_factory()
            model.fit(X_train, y_train)
            
            # 预测
            y_pred = model.predict(test_points)
            predictions.append(y_pred)
        
        # 转换为数组
        predictions = np.array(predictions)  # shape: (n_experiments, n_test_points)
        
        # 计算偏差、方差和噪声
        mean_prediction = np.mean(predictions, axis=0)
        
        # 偏差²
        bias_squared = np.mean((mean_prediction - y_true) ** 2)
        
        # 方差
        variance = np.mean(np.var(predictions, axis=0))
        
        # 噪声
        noise = noise_std ** 2
        
        # 总误差 (经验估计)
        total_error = np.mean(np.mean((predictions - y_true) ** 2, axis=1))
        
        # 可约误差
        reducible_error = bias_squared + variance
        
        results = {
            'bias_squared': bias_squared,
            'variance': variance,
            'noise': noise,
            'total_error': total_error,
            'reducible_error': reducible_error,
            'predictions': predictions,
            'mean_prediction': mean_prediction,
            'test_points': test_points,
            'y_true': y_true
        }
        
        print(f"偏差²: {bias_squared:.4f}")
        print(f"方差: {variance:.4f}")
        print(f"噪声: {noise:.4f}")
        print(f"总误差: {total_error:.4f}")
        print(f"理论总误差: {bias_squared + variance + noise:.4f}")
        print(f"可约误差: {reducible_error:.4f}")
        
        return results

class ModelComplexityAnalyzer:
    """模型复杂度分析器"""
    
    def __init__(self):
        self.decomposer = BiasVarianceDecomposer()
    
    def analyze_polynomial_complexity(self, degrees: list[int] = None, 
                                    regularization_strengths: list[float] = None):
        """分析多项式模型复杂度"""
        if degrees is None:
            degrees = [1, 2, 3, 5, 8, 12, 15]
        if regularization_strengths is None:
            regularization_strengths = [0.0, 0.01, 0.1, 1.0]
        
        results = {}
        
        # 测试点
        test_points = np.linspace(0, 1, 50).reshape(-1, 1)
        
        for alpha in regularization_strengths:
            print(f"\n分析正则化强度 α = {alpha}")
            results[alpha] = {}
            
            for degree in degrees:
                print(f"  度数 {degree}...")
                
                model_factory = self.decomposer.polynomial_model_factory(degree, alpha)
                result = self.decomposer.bias_variance_decomposition(
                    model_factory, n_experiments=50, n_samples=30, 
                    test_points=test_points
                )
                
                results[alpha][degree] = result
        
        self.visualize_complexity_analysis(results, degrees, regularization_strengths)
        return results
    
    def visualize_complexity_analysis(self, results: dict, degrees: list[int], 
                                    alphas: list[float]):
        """可视化复杂度分析"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        
        colors = plt.cm.viridis(np.linspace(0, 1, len(alphas)))
        
        # 1. 偏差随复杂度变化
        ax1 = axes[0, 0]
        for i, alpha in enumerate(alphas):
            bias_values = [results[alpha][d]['bias_squared'] for d in degrees]
            ax1.plot(degrees, bias_values, 'o-', color=colors[i], 
                    label=f'α={alpha}', linewidth=2)
        
        ax1.set_xlabel('多项式度数')
        ax1.set_ylabel('偏差²')
        ax1.set_title('偏差² vs 模型复杂度')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        ax1.set_yscale('log')
        
        # 2. 方差随复杂度变化
        ax2 = axes[0, 1]
        for i, alpha in enumerate(alphas):
            variance_values = [results[alpha][d]['variance'] for d in degrees]
            ax2.plot(degrees, variance_values, 's-', color=colors[i], 
                    label=f'α={alpha}', linewidth=2)
        
        ax2.set_xlabel('多项式度数')
        ax2.set_ylabel('方差')
        ax2.set_title('方差 vs 模型复杂度')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        ax2.set_yscale('log')
        
        # 3. 总误差随复杂度变化
        ax3 = axes[0, 2]
        for i, alpha in enumerate(alphas):
            total_errors = [results[alpha][d]['total_error'] for d in degrees]
            ax3.plot(degrees, total_errors, '^-', color=colors[i], 
                    label=f'α={alpha}', linewidth=2)
        
        ax3.set_xlabel('多项式度数')
        ax3.set_ylabel('总误差')
        ax3.set_title('总误差 vs 模型复杂度')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 偏差-方差权衡 (选择一个正则化强度)
        ax4 = axes[1, 0]
        alpha_selected = alphas[1]  # 选择中等正则化
        
        bias_values = [results[alpha_selected][d]['bias_squared'] for d in degrees]
        variance_values = [results[alpha_selected][d]['variance'] for d in degrees]
        noise_values = [results[alpha_selected][d]['noise'] for d in degrees]
        total_errors = [results[alpha_selected][d]['total_error'] for d in degrees]
        
        ax4.plot(degrees, bias_values, 'r-o', label='偏差²', linewidth=2)
        ax4.plot(degrees, variance_values, 'b-s', label='方差', linewidth=2)
        ax4.plot(degrees, noise_values, 'g--', label='噪声', linewidth=2)
        ax4.plot(degrees, total_errors, 'k-^', label='总误差', linewidth=3)
        
        ax4.set_xlabel('多项式度数')
        ax4.set_ylabel('误差')
        ax4.set_title(f'偏差-方差权衡 (α={alpha_selected})')
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        # 5. 正则化效果
        ax5 = axes[1, 1]
        degree_selected = degrees[4]  # 选择中等复杂度
        
        bias_reg = [results[alpha][degree_selected]['bias_squared'] for alpha in alphas]
        variance_reg = [results[alpha][degree_selected]['variance'] for alpha in alphas]
        total_reg = [results[alpha][degree_selected]['total_error'] for alpha in alphas]
        
        ax5.semilogx(alphas, bias_reg, 'r-o', label='偏差²', linewidth=2)
        ax5.semilogx(alphas, variance_reg, 'b-s', label='方差', linewidth=2)
        ax5.semilogx(alphas, total_reg, 'k-^', label='总误差', linewidth=3)
        
        ax5.set_xlabel('正则化强度 α')
        ax5.set_ylabel('误差')
        ax5.set_title(f'正则化效果 (度数={degree_selected})')
        ax5.legend()
        ax5.grid(True, alpha=0.3)
        
        # 6. 预测示例 (选择几个模型)
        ax6 = axes[1, 2]
        
        # 真实函数
        test_points = results[alphas[0]][degrees[0]]['test_points']
        y_true = results[alphas[0]][degrees[0]]['y_true']
        ax6.plot(test_points.flatten(), y_true, 'k-', linewidth=3, label='真实函数')
        
        # 几个不同复杂度的模型
        selected_degrees = [degrees[1], degrees[3], degrees[5]]
        selected_colors = ['blue', 'red', 'green']
        
        for degree, color in zip(selected_degrees, selected_colors):
            mean_pred = results[alphas[0]][degree]['mean_prediction']
            predictions = results[alphas[0]][degree]['predictions']
            
            # 绘制预测区间
            pred_std = np.std(predictions, axis=0)
            ax6.fill_between(test_points.flatten(), 
                           mean_pred - pred_std, mean_pred + pred_std,
                           alpha=0.3, color=color)
            ax6.plot(test_points.flatten(), mean_pred, color=color, 
                    linewidth=2, label=f'度数={degree}')
        
        ax6.set_xlabel('x')
        ax6.set_ylabel('y')
        ax6.set_title('不同复杂度模型的预测')
        ax6.legend()
        ax6.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

class EnsembleAnalyzer:
    """集成方法分析器"""
    
    def __init__(self):
        self.decomposer = BiasVarianceDecomposer()
    
    def compare_ensemble_methods(self):
        """比较不同集成方法的偏差-方差特性"""
        
        # 定义不同的模型
        models = {
            '单个决策树': lambda: DecisionTreeRegressor(max_depth=8, random_state=None),
            'Bagging': lambda: BaggingRegressor(
                base_estimator=DecisionTreeRegressor(max_depth=8),
                n_estimators=50, random_state=None
            ),
            '随机森林': lambda: RandomForestRegressor(
                n_estimators=50, max_depth=8, random_state=None
            ),
            '深度树': lambda: DecisionTreeRegressor(max_depth=None, random_state=None),
            'Bagging深度树': lambda: BaggingRegressor(
                base_estimator=DecisionTreeRegressor(max_depth=None),
                n_estimators=50, random_state=None
            )
        }
        
        results = {}
        test_points = np.linspace(0, 1, 50).reshape(-1, 1)
        
        for name, model_factory in models.items():
            print(f"\n分析 {name}...")
            
            result = self.decomposer.bias_variance_decomposition(
                model_factory, n_experiments=30, n_samples=50, 
                test_points=test_points
            )
            
            results[name] = result
        
        self.visualize_ensemble_comparison(results)
        return results
    
    def visualize_ensemble_comparison(self, results: dict):
        """可视化集成方法比较"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        model_names = list(results.keys())
        
        # 1. 偏差-方差-噪声分解
        ax1 = axes[0, 0]
        
        bias_values = [results[name]['bias_squared'] for name in model_names]
        variance_values = [results[name]['variance'] for name in model_names]
        noise_values = [results[name]['noise'] for name in model_names]
        
        x_pos = np.arange(len(model_names))
        width = 0.25
        
        ax1.bar(x_pos - width, bias_values, width, label='偏差²', alpha=0.8)
        ax1.bar(x_pos, variance_values, width, label='方差', alpha=0.8)
        ax1.bar(x_pos + width, noise_values, width, label='噪声', alpha=0.8)
        
        ax1.set_xlabel('模型')
        ax1.set_ylabel('误差')
        ax1.set_title('偏差-方差-噪声分解')
        ax1.set_xticks(x_pos)
        ax1.set_xticklabels(model_names, rotation=45, ha='right')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 总误差比较
        ax2 = axes[0, 1]
        
        total_errors = [results[name]['total_error'] for name in model_names]
        colors = plt.cm.viridis(np.linspace(0, 1, len(model_names)))
        
        bars = ax2.bar(model_names, total_errors, color=colors, alpha=0.8)
        ax2.set_ylabel('总误差')
        ax2.set_title('总误差比较')
        ax2.tick_params(axis='x', rotation=45)
        ax2.grid(True, alpha=0.3)
        
        # 添加数值标注
        for bar, error in zip(bars, total_errors):
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height,
                    f'{error:.3f}', ha='center', va='bottom')
        
        # 3. 预测可视化
        ax3 = axes[1, 0]
        
        # 真实函数
        test_points = results[model_names[0]]['test_points']
        y_true = results[model_names[0]]['y_true']
        ax3.plot(test_points.flatten(), y_true, 'k-', linewidth=3, label='真实函数')
        
        # 选择几个代表性模型
        selected_models = [model_names[0], model_names[1], model_names[2]]
        colors = ['red', 'blue', 'green']
        
        for model, color in zip(selected_models, colors):
            mean_pred = results[model]['mean_prediction']
            ax3.plot(test_points.flatten(), mean_pred, color=color, 
                    linewidth=2, label=model)
        
        ax3.set_xlabel('x')
        ax3.set_ylabel('y')
        ax3.set_title('不同模型的平均预测')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 方差减少效果
        ax4 = axes[1, 1]
        
        # 比较单个模型vs集成模型的方差
        single_tree_var = results['单个决策树']['variance']
        bagging_var = results['Bagging']['variance']
        rf_var = results['随机森林']['variance']
        
        single_deep_var = results['深度树']['variance']
        bagging_deep_var = results['Bagging深度树']['variance']
        
        categories = ['浅树', '深树']
        single_vars = [single_tree_var, single_deep_var]
        ensemble_vars = [bagging_var, bagging_deep_var]
        
        x_pos = np.arange(len(categories))
        width = 0.35
        
        ax4.bar(x_pos - width/2, single_vars, width, label='单个模型', alpha=0.8)
        ax4.bar(x_pos + width/2, ensemble_vars, width, label='Bagging集成', alpha=0.8)
        
        ax4.set_xlabel('模型类型')
        ax4.set_ylabel('方差')
        ax4.set_title('集成方法的方差减少效果')
        ax4.set_xticks(x_pos)
        ax4.set_xticklabels(categories)
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

def demonstrate_variance_bias_tradeoff():
    """演示方差-偏差权衡"""
    print("=== 方差-偏差权衡深入分析 ===\n")
    
    # 1. 模型复杂度分析
    print("1. 多项式模型复杂度分析")
    complexity_analyzer = ModelComplexityAnalyzer()
    complexity_results = complexity_analyzer.analyze_polynomial_complexity(
        degrees=[1, 2, 3, 5, 8, 12], 
        regularization_strengths=[0.0, 0.01, 0.1, 1.0]
    )
    
    # 2. 集成方法分析
    print("\n2. 集成方法偏差-方差分析")
    ensemble_analyzer = EnsembleAnalyzer()
    ensemble_results = ensemble_analyzer.compare_ensemble_methods()
    
    # 3. 总结
    print("\n=== 方差-偏差权衡总结 ===")
    print("1. 模型复杂度增加 → 偏差减少，方差增加")
    print("2. 正则化 → 偏差略增，方差显著减少")
    print("3. Bagging → 偏差不变，方差减少")
    print("4. 最优模型在偏差和方差间找到平衡")
    print("5. 集成方法是减少方差的有效途径")

if __name__ == "__main__":
    demonstrate_variance_bias_tradeoff()