"""
Week 16: 核方法实现
Kernel Methods Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Callable, Optional
from sklearn.datasets import make_classification, make_regression, make_circles, make_swiss_roll
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, KernelPCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class KernelLibrary:
    """核函数库"""
    
    @staticmethod
    def linear_kernel(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
        """线性核"""
        return X @ Y.T
    
    @staticmethod
    def polynomial_kernel(X: np.ndarray, Y: np.ndarray, 
                         degree: int = 3, gamma: float = 1.0, coef0: float = 0.0) -> np.ndarray:
        """多项式核"""
        return (gamma * X @ Y.T + coef0) ** degree
    
    @staticmethod
    def rbf_kernel(X: np.ndarray, Y: np.ndarray, gamma: float = 1.0) -> np.ndarray:
        """径向基函数核 (RBF/高斯核)"""
        # 计算欧氏距离的平方
        X_norm = np.sum(X**2, axis=1, keepdims=True)
        Y_norm = np.sum(Y**2, axis=1, keepdims=True)
        distances_sq = X_norm + Y_norm.T - 2 * X @ Y.T
        return np.exp(-gamma * distances_sq)
    
    @staticmethod
    def sigmoid_kernel(X: np.ndarray, Y: np.ndarray, 
                      gamma: float = 1.0, coef0: float = 0.0) -> np.ndarray:
        """Sigmoid核"""
        return np.tanh(gamma * X @ Y.T + coef0)
    
    @staticmethod
    def laplacian_kernel(X: np.ndarray, Y: np.ndarray, gamma: float = 1.0) -> np.ndarray:
        """拉普拉斯核"""
        # 计算曼哈顿距离
        distances = np.sum(np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :]), axis=2)
        return np.exp(-gamma * distances)
    
    @staticmethod
    def cosine_kernel(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
        """余弦核"""
        # 归一化向量
        X_norm = X / np.linalg.norm(X, axis=1, keepdims=True)
        Y_norm = Y / np.linalg.norm(Y, axis=1, keepdims=True)
        return X_norm @ Y_norm.T

class KernelPCAImplementation:
    """核主成分分析实现"""
    
    def __init__(self, n_components: int = 2, kernel: str = 'rbf', 
                 gamma: float = 1.0, degree: int = 3, coef0: float = 0.0):
        self.n_components = n_components
        self.kernel = kernel
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        
        # 模型参数
        self.X_train = None
        self.eigenvalues = None
        self.eigenvectors = None
        self.K_train = None
    
    def _get_kernel_function(self) -> Callable:
        """获取核函数"""
        if self.kernel == 'linear':
            return KernelLibrary.linear_kernel
        elif self.kernel == 'poly':
            return lambda X, Y: KernelLibrary.polynomial_kernel(
                X, Y, self.degree, self.gamma, self.coef0)
        elif self.kernel == 'rbf':
            return lambda X, Y: KernelLibrary.rbf_kernel(X, Y, self.gamma)
        elif self.kernel == 'sigmoid':
            return lambda X, Y: KernelLibrary.sigmoid_kernel(X, Y, self.gamma, self.coef0)
        else:
            raise ValueError(f"未知核函数: {self.kernel}")
    
    def fit(self, X: np.ndarray):
        """训练核PCA"""
        self.X_train = X
        n_samples = X.shape[0]
        
        # 计算核矩阵
        kernel_func = self._get_kernel_function()
        K = kernel_func(X, X)
        
        # 中心化核矩阵
        one_n = np.ones((n_samples, n_samples)) / n_samples
        K_centered = K - one_n @ K - K @ one_n + one_n @ K @ one_n
        
        self.K_train = K_centered
        
        # 特征值分解
        eigenvalues, eigenvectors = np.linalg.eigh(K_centered)
        
        # 按特征值降序排列
        idx = np.argsort(eigenvalues)[::-1]
        self.eigenvalues = eigenvalues[idx]
        self.eigenvectors = eigenvectors[:, idx]
        
        # 归一化特征向量
        for i in range(self.n_components):
            if self.eigenvalues[i] > 0:
                self.eigenvectors[:, i] /= np.sqrt(self.eigenvalues[i])
    
    def transform(self, X: np.ndarray) -> np.ndarray:
        """变换数据"""
        if self.X_train is None:
            raise ValueError("模型未训练")
        
        kernel_func = self._get_kernel_function()
        
        # 计算测试数据与训练数据的核矩阵
        K_test = kernel_func(X, self.X_train)
        
        # 中心化
        n_train = self.X_train.shape[0]
        one_n = np.ones((n_train, n_train)) / n_train
        one_m = np.ones((X.shape[0], n_train)) / n_train
        
        K_test_centered = (K_test - one_m @ self.K_train - 
                          K_test @ one_n + one_m @ self.K_train @ one_n)
        
        # 投影到主成分
        return K_test_centered @ self.eigenvectors[:, :self.n_components]
    
    def fit_transform(self, X: np.ndarray) -> np.ndarray:
        """训练并变换"""
        self.fit(X)
        return self.transform(X)

class KernelRegression:
    """核回归实现"""
    
    def __init__(self, kernel: str = 'rbf', gamma: float = 1.0, 
                 alpha: float = 1e-6):
        self.kernel = kernel
        self.gamma = gamma
        self.alpha = alpha  # 正则化参数
        
        # 模型参数
        self.X_train = None
        self.y_train = None
        self.weights = None
    
    def _get_kernel_function(self) -> Callable:
        """获取核函数"""
        if self.kernel == 'linear':
            return KernelLibrary.linear_kernel
        elif self.kernel == 'rbf':
            return lambda X, Y: KernelLibrary.rbf_kernel(X, Y, self.gamma)
        elif self.kernel == 'poly':
            return lambda X, Y: KernelLibrary.polynomial_kernel(X, Y, gamma=self.gamma)
        else:
            raise ValueError(f"未知核函数: {self.kernel}")
    
    def fit(self, X: np.ndarray, y: np.ndarray):
        """训练核回归"""
        self.X_train = X
        self.y_train = y
        
        # 计算核矩阵
        kernel_func = self._get_kernel_function()
        K = kernel_func(X, X)
        
        # 添加正则化项
        K_reg = K + self.alpha * np.eye(K.shape[0])
        
        # 求解权重: (K + αI)w = y
        self.weights = np.linalg.solve(K_reg, y)
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        if self.X_train is None:
            raise ValueError("模型未训练")
        
        kernel_func = self._get_kernel_function()
        K_test = kernel_func(X, self.X_train)
        
        return K_test @ self.weights

class KernelComparison:
    """核方法比较"""
    
    def __init__(self):
        self.kernels = {
            'linear': {'kernel': 'linear'},
            'poly_2': {'kernel': 'poly', 'degree': 2, 'gamma': 1.0},
            'poly_3': {'kernel': 'poly', 'degree': 3, 'gamma': 1.0},
            'rbf_0.1': {'kernel': 'rbf', 'gamma': 0.1},
            'rbf_1.0': {'kernel': 'rbf', 'gamma': 1.0},
            'rbf_10': {'kernel': 'rbf', 'gamma': 10.0}
        }
    
    def compare_kernel_pca(self):
        """比较核PCA在不同数据上的效果"""
        print("=== 核PCA比较 ===")
        
        # 生成不同类型的数据
        datasets = {
            'circles': make_circles(n_samples=300, noise=0.1, factor=0.3, random_state=42),
            'swiss_roll': make_swiss_roll(n_samples=300, noise=0.1, random_state=42)
        }
        
        results = {}
        
        for data_name, (X, y) in datasets.items():
            print(f"处理 {data_name} 数据...")
            
            if data_name == 'swiss_roll':
                # Swiss roll是3D数据，取前两维用于可视化
                X_2d = X[:, [0, 2]]
            else:
                X_2d = X
            
            # 标准化
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X_2d)
            
            data_results = {}
            
            # 普通PCA
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X_scaled)
            data_results['PCA'] = X_pca
            
            # 不同核的核PCA
            for kernel_name, kernel_params in self.kernels.items():
                if kernel_name.startswith('poly') and data_name == 'swiss_roll':
                    continue  # 跳过多项式核在Swiss roll上的计算
                
                kpca = KernelPCAImplementation(n_components=2, **kernel_params)
                X_kpca = kpca.fit_transform(X_scaled)
                data_results[f'KPCA_{kernel_name}'] = X_kpca
            
            results[data_name] = {
                'original': (X_2d, y),
                'scaled': (X_scaled, y),
                'results': data_results
            }
        
        self.visualize_kernel_pca_comparison(results)
        return results
    
    def visualize_kernel_pca_comparison(self, results: ):
        """可视化核PCA比较"""
        n_datasets = len(results)
        n_methods = len(list(results.values())[0]['results'])
        
        fig, axes = plt.subplots(n_datasets, min(n_methods, 6), figsize=(24, 8))
        if n_datasets == 1:
            axes = axes.reshape(1, -1)
        
        for i, (data_name, data_info) in enumerate(results.items()):
            X_original, y = data_info['original']
            
            # 绘制原始数据
            ax = axes[i, 0]
            scatter = ax.scatter(X_original[:, 0], X_original[:, 1], c=y, cmap='viridis', alpha=0.7)
            ax.set_title(f'{data_name.title()} - 原始数据')
            ax.set_xlabel('特征 1')
            ax.set_ylabel('特征 2')
            
            # 绘制不同方法的结果
            method_names = list(data_info['results'].keys())
            for j, method_name in enumerate(method_names[:5]):  # 只显示前5个方法
                if j + 1 >= axes.shape[1]:
                    break
                
                ax = axes[i, j + 1]
                X_transformed = data_info['results'][method_name]
                
                scatter = ax.scatter(X_transformed[:, 0], X_transformed[:, 1], 
                                   c=y, cmap='viridis', alpha=0.7)
                ax.set_title(f'{method_name}')
                ax.set_xlabel('主成分 1')
                ax.set_ylabel('主成分 2')
        
        plt.tight_layout()
        plt.show()
    
    def compare_kernel_regression(self):
        """比较核回归"""
        print("\n=== 核回归比较 ===")
        
        # 生成非线性回归数据
        np.random.seed(42)
        X = np.linspace(0, 4*np.pi, 100).reshape(-1, 1)
        y = np.sin(X).ravel() + 0.1 * np.random.randn(100)
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.3, random_state=42
        )
        
        # 标准化
        scaler_X = StandardScaler()
        X_train_scaled = scaler_X.fit_transform(X_train)
        X_test_scaled = scaler_X.transform(X_test)
        
        results = {}
        
        # 测试不同核函数
        kernel_configs = {
            'Linear': {'kernel': 'linear'},
            'RBF (γ=0.1)': {'kernel': 'rbf', 'gamma': 0.1},
            'RBF (γ=1.0)': {'kernel': 'rbf', 'gamma': 1.0},
            'RBF (γ=10)': {'kernel': 'rbf', 'gamma': 10.0},
            'Poly (d=2)': {'kernel': 'poly', 'gamma': 1.0},
            'Poly (d=3)': {'kernel': 'poly', 'gamma': 1.0}
        }
        
        for kernel_name, kernel_params in kernel_configs.items():
            print(f"训练 {kernel_name} 核回归...")
            
            kr = KernelRegression(**kernel_params, alpha=1e-3)
            kr.fit(X_train_scaled, y_train)
            
            # 预测
            y_train_pred = kr.predict(X_train_scaled)
            y_test_pred = kr.predict(X_test_scaled)
            
            # 计算误差
            train_mse = mean_squared_error(y_train, y_train_pred)
            test_mse = mean_squared_error(y_test, y_test_pred)
            
            results[kernel_name] = {
                'model': kr,
                'train_mse': train_mse,
                'test_mse': test_mse,
                'train_pred': y_train_pred,
                'test_pred': y_test_pred
            }
        
        self.visualize_kernel_regression_comparison(
            X_train_scaled, y_train, X_test_scaled, y_test, results
        )
        
        return results
    
    def visualize_kernel_regression_comparison(self, X_train: np.ndarray, y_train: np.ndarray,
                                            X_test: np.ndarray, y_test: np.ndarray, 
                                            results: ):
        """可视化核回归比较"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 10))
        axes = axes.flatten()
        
        # 创建密集的测试点用于绘制平滑曲线
        X_plot = np.linspace(X_train.min(), X_train.max(), 200).reshape(-1, 1)
        
        for i, (kernel_name, result) in enumerate(results.items()):
            if i >= 6:
                break
            
            ax = axes[i]
            
            # 预测密集点
            y_plot = result['model'].predict(X_plot)
            
            # 绘制数据点
            ax.scatter(X_train, y_train, color='blue', alpha=0.6, s=30, label='训练数据')
            ax.scatter(X_test, y_test, color='red', alpha=0.6, s=30, label='测试数据')
            
            # 绘制预测曲线
            ax.plot(X_plot, y_plot, color='green', linewidth=2, label='预测')
            
            ax.set_title(f'{kernel_name}\n训练MSE: {result["train_mse"]:.4f}, '
                        f'测试MSE: {result["test_mse"]:.4f}')
            ax.set_xlabel('X')
            ax.set_ylabel('y')
            ax.legend()
            ax.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
        
        # 性能比较
        self.plot_regression_performance(results)
    
    def plot_regression_performance(self, results: ):
        """绘制回归性能比较"""
        fig, axes = plt.subplots(1, 2, figsize=(12, 5))
        
        kernel_names = list(results.keys())
        train_mses = [results[name]['train_mse'] for name in kernel_names]
        test_mses = [results[name]['test_mse'] for name in kernel_names]
        
        # 1. MSE比较
        ax1 = axes[0]
        x_pos = np.arange(len(kernel_names))
        width = 0.35
        
        ax1.bar(x_pos - width/2, train_mses, width, label='训练MSE', alpha=0.7)
        ax1.bar(x_pos + width/2, test_mses, width, label='测试MSE', alpha=0.7)
        
        ax1.set_xlabel('核函数')
        ax1.set_ylabel('均方误差')
        ax1.set_title('不同核函数的MSE比较')
        ax1.set_xticks(x_pos)
        ax1.set_xticklabels(kernel_names, rotation=45)
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 过拟合程度
        ax2 = axes[1]
        overfitting = np.array(test_mses) - np.array(train_mses)
        
        bars = ax2.bar(kernel_names, overfitting, alpha=0.7)
        ax2.axhline(y=0, color='red', linestyle='--', alpha=0.5)
        
        ax2.set_xlabel('核函数')
        ax2.set_ylabel('过拟合程度 (测试MSE - 训练MSE)')
        ax2.set_title('过拟合程度比较')
        ax2.tick_params(axis='x', rotation=45)
        ax2.grid(True, alpha=0.3)
        
        # 添加数值标注
        for bar, value in zip(bars, overfitting):
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height,
                    f'{value:.4f}', ha='center', 
                    va='bottom' if height >= 0 else 'top')
        
        plt.tight_layout()
        plt.show()

def demonstrate_kernel_methods():
    """演示核方法"""
    print("=== 核方法综合演示 ===\n")
    
    # 1. 核函数可视化
    print("1. 核函数可视化")
    
    # 生成示例数据
    X = np.array([[0, 0], [1, 1], [2, 0]])
    Y = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
    
    # 计算不同核函数的核矩阵
    kernels = {
        'Linear': KernelLibrary.linear_kernel(X, Y),
        'RBF (γ=1)': KernelLibrary.rbf_kernel(X, Y, gamma=1.0),
        'Poly (d=2)': KernelLibrary.polynomial_kernel(X, Y, degree=2),
        'Cosine': KernelLibrary.cosine_kernel(X, Y)
    }
    
    fig, axes = plt.subplots(1, 4, figsize=(16, 4))
    
    for i, (kernel_name, K) in enumerate(kernels.items()):
        im = axes[i].imshow(K, cmap='viridis', aspect='auto')
        axes[i].set_title(f'{kernel_name} 核矩阵')
        axes[i].set_xlabel('Y样本')
        axes[i].set_ylabel('X样本')
        plt.colorbar(im, ax=axes[i])
        
        # 添加数值标注
        for j in range(K.shape[0]):
            for k in range(K.shape[1]):
                axes[i].text(k, j, f'{K[j, k]:.2f}', 
                           ha='center', va='center', color='white')
    
    plt.tight_layout()
    plt.show()
    
    # 2. 核PCA比较
    print("\n2. 核PCA比较")
    comparison = KernelComparison()
    pca_results = comparison.compare_kernel_pca()
    
    # 3. 核回归比较
    print("\n3. 核回归比较")
    regression_results = comparison.compare_kernel_regression()
    
    # 4. 总结
    print("\n=== 核方法总结 ===")
    print("1. 核函数允许在高维空间中进行计算而无需显式映射")
    print("2. RBF核是最常用的核函数，适用于大多数问题")
    print("3. 多项式核适合具有多项式关系的数据")
    print("4. 核PCA可以发现非线性的主成分")
    print("5. 核回归可以拟合复杂的非线性关系")
    print("6. 核参数的选择对性能有重要影响")
    print("7. 核方法的计算复杂度通常较高")

if __name__ == "__main__":
    demonstrate_kernel_methods()