"""
Week 15: 支持向量机实现
Support Vector Machine Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import , Optional, Callable
from sklearn.datasets import make_classification, make_circles, make_moons
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class LinearSVM:
    """线性SVM实现"""
    
    def __init__(self, C: float = 1.0, max_iter: int = 1000, tol: float = 1e-6):
        self.C = C
        self.max_iter = max_iter
        self.tol = tol
        
        # 模型参数
        self.w = None
        self.b = None
        self.alpha = None
        self.support_vectors = None
        self.support_vector_labels = None
        self.support_vector_indices = None
    
    def _kernel(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
        """线性核函数"""
        return X1 @ X2.T
    
    def fit(self, X: np.ndarray, y: np.ndarray):
        """训练SVM"""
        n_samples, n_features = X.shape
        
        # 将标签转换为{-1, 1}
        y = np.where(y <= 0, -1, 1)
        
        # 初始化拉格朗日乘数
        self.alpha = np.zeros(n_samples)
        self.b = 0
        
        # 计算核矩阵
        K = self._kernel(X, X)
        
        # SMO算法简化版本
        for iteration in range(self.max_iter):
            alpha_prev = self.alpha.copy()
            
            for i in range(n_samples):
                # 计算预测值
                prediction = np.sum(self.alpha * y * K[i]) + self.b
                
                # 计算误差
                E_i = prediction - y[i]
                
                # 检查KKT条件
                if (y[i] * E_i < -self.tol and self.alpha[i] < self.C) or \
                   (y[i] * E_i > self.tol and self.alpha[i] > 0):
                    
                    # 选择第二个变量j
                    j = self._select_j(i, n_samples, E_i, X, y, K)
                    
                    if j == i:
                        continue
                    
                    # 计算边界
                    if y[i] != y[j]:
                        L = max(0, self.alpha[j] - self.alpha[i])
                        H = min(self.C, self.C + self.alpha[j] - self.alpha[i])
                    else:
                        L = max(0, self.alpha[i] + self.alpha[j] - self.C)
                        H = min(self.C, self.alpha[i] + self.alpha[j])
                    
                    if L == H:
                        continue
                    
                    # 计算eta
                    eta = 2 * K[i, j] - K[i, i] - K[j, j]
                    if eta >= 0:
                        continue
                    
                    # 更新alpha_j
                    E_j = np.sum(self.alpha * y * K[j]) + self.b - y[j]
                    alpha_j_new = self.alpha[j] - y[j] * (E_i - E_j) / eta
                    
                    # 裁剪alpha_j
                    if alpha_j_new > H:
                        alpha_j_new = H
                    elif alpha_j_new < L:
                        alpha_j_new = L
                    
                    if abs(alpha_j_new - self.alpha[j]) < 1e-5:
                        continue
                    
                    # 更新alpha_i
                    alpha_i_new = self.alpha[i] + y[i] * y[j] * (self.alpha[j] - alpha_j_new)
                    
                    # 更新偏置b
                    b1 = self.b - E_i - y[i] * (alpha_i_new - self.alpha[i]) * K[i, i] - \
                         y[j] * (alpha_j_new - self.alpha[j]) * K[i, j]
                    b2 = self.b - E_j - y[i] * (alpha_i_new - self.alpha[i]) * K[i, j] - \
                         y[j] * (alpha_j_new - self.alpha[j]) * K[j, j]
                    
                    if 0 < alpha_i_new < self.C:
                        self.b = b1
                    elif 0 < alpha_j_new < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2
                    
                    # 更新alpha
                    self.alpha[i] = alpha_i_new
                    self.alpha[j] = alpha_j_new
            
            # 检查收敛
            if np.linalg.norm(self.alpha - alpha_prev) < self.tol:
                break
        
        # 计算权重向量
        self.w = np.sum((self.alpha * y).reshape(-1, 1) * X, axis=0)
        
        # 找到支持向量
        sv_indices = self.alpha > 1e-5
        self.support_vector_indices = np.where(sv_indices)[0]
        self.support_vectors = X[sv_indices]
        self.support_vector_labels = y[sv_indices]
        
        print(f"训练完成，支持向量数量: {len(self.support_vectors)}")
    
    def _select_j(self, i: int, n_samples: int, E_i: float, 
                  X: np.ndarray, y: np.ndarray, K: np.ndarray) -> int:
        """选择第二个变量j (简化版本)"""
        # 简单随机选择
        j = i
        while j == i:
            j = np.random.randint(0, n_samples)
        return j
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        if self.w is None:
            raise ValueError("模型未训练")
        
        return np.sign(X @ self.w + self.b)
    
    def decision_function(self, X: np.ndarray) -> np.ndarray:
        """决策函数值"""
        if self.w is None:
            raise ValueError("模型未训练")
        
        return X @ self.w + self.b

class KernelSVM:
    """核SVM实现"""
    
    def __init__(self, kernel: str = 'rbf', C: float = 1.0, 
                 gamma: float = 1.0, degree: int = 3, coef0: float = 0.0):
        self.kernel = kernel
        self.C = C
        self.gamma = gamma
        self.degree = degree
        self.coef0 = coef0
        
        # 模型参数
        self.alpha = None
        self.b = None
        self.X_train = None
        self.y_train = None
        self.support_vectors = None
        self.support_vector_labels = None
        self.support_vector_alphas = None
    
    def _kernel_function(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
        """核函数"""
        if self.kernel == 'linear':
            return X1 @ X2.T
        elif self.kernel == 'poly':
            return (self.gamma * X1 @ X2.T + self.coef0) ** self.degree
        elif self.kernel == 'rbf':
            # RBF核
            X1_norm = np.sum(X1**2, axis=1, keepdims=True)
            X2_norm = np.sum(X2**2, axis=1, keepdims=True)
            distances = X1_norm + X2_norm.T - 2 * X1 @ X2.T
            return np.exp(-self.gamma * distances)
        else:
            raise ValueError(f"未知核函数: {self.kernel}")
    
    def fit(self, X: np.ndarray, y: np.ndarray):
        """训练核SVM (使用sklearn的SVC作为参考实现)"""
        # 将标签转换为{-1, 1}
        y = np.where(y <= 0, -1, 1)
        
        self.X_train = X
        self.y_train = y
        
        # 使用sklearn的SVC获得alpha和支持向量
        if self.kernel == 'rbf':
            svm = SVC(kernel='rbf', C=self.C, gamma=self.gamma)
        elif self.kernel == 'poly':
            svm = SVC(kernel='poly', C=self.C, gamma=self.gamma, 
                     degree=self.degree, coef0=self.coef0)
        else:
            svm = SVC(kernel='linear', C=self.C)
        
        svm.fit(X, y)
        
        # 提取支持向量信息
        self.support_vectors = svm.support_vectors_
        self.support_vector_labels = y[svm.support_]
        self.support_vector_alphas = np.abs(svm.dual_coef_[0])
        self.b = svm.intercept_[0]
        
        print(f"训练完成，支持向量数量: {len(self.support_vectors)}")
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        decision_values = self.decision_function(X)
        return np.sign(decision_values)
    
    def decision_function(self, X: np.ndarray) -> np.ndarray:
        """决策函数值"""
        if self.support_vectors is None:
            raise ValueError("模型未训练")
        
        # 计算核矩阵
        K = self._kernel_function(X, self.support_vectors)
        
        # 计算决策函数值
        decision_values = np.sum(
            (self.support_vector_alphas * self.support_vector_labels) * K, axis=1
        ) + self.b
        
        return decision_values

class SVMComparison:
    """SVM比较分析"""
    
    def __init__(self):
        self.datasets = {
            'linear': self._generate_linear_data,
            'circles': self._generate_circle_data,
            'moons': self._generate_moon_data
        }
    
    def _generate_linear_data(self) -> [np.ndarray, np.ndarray]:
        """生成线性可分数据"""
        X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
                                 n_informative=2, n_clusters_per_class=1,
                                 random_state=42)
        return X, y
    
    def _generate_circle_data(self) -> [np.ndarray, np.ndarray]:
        """生成圆形数据"""
        X, y = make_circles(n_samples=200, noise=0.1, factor=0.3, random_state=42)
        return X, y
    
    def _generate_moon_data(self) -> [np.ndarray, np.ndarray]:
        """生成月牙形数据"""
        X, y = make_moons(n_samples=200, noise=0.1, random_state=42)
        return X, y
    
    def compare_svm_methods(self):
        """比较不同SVM方法"""
        print("=== SVM方法比较 ===")
        
        # SVM配置
        svm_configs = {
            'Linear SVM': {'kernel': 'linear', 'C': 1.0},
            'Polynomial SVM': {'kernel': 'poly', 'C': 1.0, 'degree': 3, 'gamma': 1.0},
            'RBF SVM': {'kernel': 'rbf', 'C': 1.0, 'gamma': 1.0}
        }
        
        results = {}
        
        for dataset_name, data_generator in self.datasets.items():
            print(f"\n处理 {dataset_name} 数据集...")
            
            # 生成数据
            X, y = data_generator()
            
            # 标准化
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
            
            # 分割数据
            X_train, X_test, y_train, y_test = train_test_split(
                X_scaled, y, test_size=0.3, random_state=42
            )
            
            dataset_results = {}
            
            for svm_name, config in svm_configs.items():
                print(f"  训练 {svm_name}...")
                
                # 训练SVM
                svm = KernelSVM(**config)
                svm.fit(X_train, y_train)
                
                # 预测
                train_pred = svm.predict(X_train)
                test_pred = svm.predict(X_test)
                
                # 计算准确率
                train_acc = np.mean(train_pred == y_train)
                test_acc = np.mean(test_pred == y_test)
                
                dataset_results[svm_name] = {
                    'svm': svm,
                    'train_acc': train_acc,
                    'test_acc': test_acc,
                    'n_support_vectors': len(svm.support_vectors)
                }
            
            results[dataset_name] = {
                'data': (X_scaled, y),
                'train_test_split': (X_train, X_test, y_train, y_test),
                'results': dataset_results
            }
        
        self.visualize_svm_comparison(results)
        return results
    
    def visualize_svm_comparison(self, results: ):
        """可视化SVM比较"""
        fig, axes = plt.subplots(3, 4, figsize=(20, 15))
        
        dataset_names = list(results.keys())
        svm_names = ['Linear SVM', 'Polynomial SVM', 'RBF SVM']
        
        for i, dataset_name in enumerate(dataset_names):
            data_info = results[dataset_name]
            X, y = data_info['data']
            X_train, X_test, y_train, y_test = data_info['train_test_split']
            
            # 绘制原始数据
            ax_data = axes[i, 0]
            scatter = ax_data.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', alpha=0.7)
            ax_data.set_title(f'{dataset_name.title()} 数据集')
            ax_data.set_xlabel('特征 1')
            ax_data.set_ylabel('特征 2')
            plt.colorbar(scatter, ax=ax_data)
            
            # 绘制不同SVM的决策边界
            for j, svm_name in enumerate(svm_names):
                ax = axes[i, j + 1]
                
                svm_result = data_info['results'][svm_name]
                svm = svm_result['svm']
                
                # 创建网格
                h = 0.02
                x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
                y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
                xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                                   np.arange(y_min, y_max, h))
                
                # 预测网格点
                grid_points = np.c_[xx.ravel(), yy.ravel()]
                Z = svm.decision_function(grid_points)
                Z = Z.reshape(xx.shape)
                
                # 绘制决策边界
                ax.contourf(xx, yy, Z, levels=50, alpha=0.8, cmap='RdYlBu')
                ax.contour(xx, yy, Z, levels=[0], colors='black', linewidths=2)
                
                # 绘制数据点
                ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, 
                          cmap='viridis', alpha=0.7, s=50, edgecolors='black')
                
                # 绘制支持向量
                if hasattr(svm, 'support_vectors') and svm.support_vectors is not None:
                    ax.scatter(svm.support_vectors[:, 0], svm.support_vectors[:, 1],
                              s=100, facecolors='none', edgecolors='red', linewidths=2)
                
                ax.set_title(f'{svm_name}\n测试准确率: {svm_result["test_acc"]:.3f}\n'
                           f'支持向量: {svm_result["n_support_vectors"]}')
                ax.set_xlabel('特征 1')
                ax.set_ylabel('特征 2')
        
        plt.tight_layout()
        plt.show()
        
        # 性能比较图
        self.plot_performance_comparison(results)
    
    def plot_performance_comparison(self, results: ):
        """绘制性能比较图"""
        fig, axes = plt.subplots(1, 3, figsize=(18, 5))
        
        dataset_names = list(results.keys())
        svm_names = ['Linear SVM', 'Polynomial SVM', 'RBF SVM']
        
        # 1. 测试准确率比较
        ax1 = axes[0]
        
        test_accs = []
        for dataset_name in dataset_names:
            dataset_accs = []
            for svm_name in svm_names:
                acc = results[dataset_name]['results'][svm_name]['test_acc']
                dataset_accs.append(acc)
            test_accs.append(dataset_accs)
        
        test_accs = np.array(test_accs)
        
        x = np.arange(len(dataset_names))
        width = 0.25
        
        for i, svm_name in enumerate(svm_names):
            ax1.bar(x + i * width, test_accs[:, i], width, label=svm_name, alpha=0.8)
        
        ax1.set_xlabel('数据集')
        ax1.set_ylabel('测试准确率')
        ax1.set_title('测试准确率比较')
        ax1.set_xticks(x + width)
        ax1.set_xticklabels([name.title() for name in dataset_names])
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 2. 支持向量数量比较
        ax2 = axes[1]
        
        n_svs = []
        for dataset_name in dataset_names:
            dataset_svs = []
            for svm_name in svm_names:
                n_sv = results[dataset_name]['results'][svm_name]['n_support_vectors']
                dataset_svs.append(n_sv)
            n_svs.append(dataset_svs)
        
        n_svs = np.array(n_svs)
        
        for i, svm_name in enumerate(svm_names):
            ax2.bar(x + i * width, n_svs[:, i], width, label=svm_name, alpha=0.8)
        
        ax2.set_xlabel('数据集')
        ax2.set_ylabel('支持向量数量')
        ax2.set_title('支持向量数量比较')
        ax2.set_xticks(x + width)
        ax2.set_xticklabels([name.title() for name in dataset_names])
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 训练vs测试准确率
        ax3 = axes[2]
        
        for i, dataset_name in enumerate(dataset_names):
            train_accs = []
            test_accs = []
            
            for svm_name in svm_names:
                result = results[dataset_name]['results'][svm_name]
                train_accs.append(result['train_acc'])
                test_accs.append(result['test_acc'])
            
            ax3.plot(train_accs, test_accs, 'o-', label=dataset_name.title(), 
                    markersize=8, linewidth=2)
        
        # 绘制对角线
        ax3.plot([0.5, 1.0], [0.5, 1.0], 'k--', alpha=0.5, label='完美拟合')
        
        ax3.set_xlabel('训练准确率')
        ax3.set_ylabel('测试准确率')
        ax3.set_title('训练 vs 测试准确率')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        ax3.set_xlim(0.5, 1.0)
        ax3.set_ylim(0.5, 1.0)
        
        plt.tight_layout()
        plt.show()

def demonstrate_svm():
    """演示SVM"""
    print("=== 支持向量机实现与比较 ===\n")
    
    # 1. 线性SVM演示
    print("1. 线性SVM演示")
    
    # 生成线性可分数据
    X, y = make_classification(n_samples=100, n_features=2, n_redundant=0,
                             n_informative=2, n_clusters_per_class=1,
                             random_state=42)
    
    # 标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    # 训练线性SVM
    linear_svm = LinearSVM(C=1.0)
    linear_svm.fit(X_scaled, y)
    
    # 可视化结果
    plt.figure(figsize=(10, 4))
    
    plt.subplot(1, 2, 1)
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=y, cmap='viridis', alpha=0.7)
    plt.scatter(linear_svm.support_vectors[:, 0], linear_svm.support_vectors[:, 1],
               s=100, facecolors='none', edgecolors='red', linewidths=2, label='支持向量')
    
    # 绘制决策边界
    ax = plt.gca()
    xlim = ax.get_xlim()
    ylim = ax.get_ylim()
    
    xx = np.linspace(xlim[0], xlim[1], 30)
    yy = np.linspace(ylim[0], ylim[1], 30)
    YY, XX = np.meshgrid(yy, xx)
    xy = np.vstack([XX.ravel(), YY.ravel()]).T
    Z = linear_svm.decision_function(xy).reshape(XX.shape)
    
    ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
              linestyles=['--', '-', '--'])
    
    plt.title('线性SVM')
    plt.xlabel('特征 1')
    plt.ylabel('特征 2')
    plt.legend()
    
    # 2. SVM方法比较
    print("\n2. SVM方法比较")
    comparison = SVMComparison()
    comparison_results = comparison.compare_svm_methods()
    
    # 3. 总结
    print("\n=== SVM总结 ===")
    print("1. 线性SVM适用于线性可分数据")
    print("2. 核SVM可以处理非线性问题")
    print("3. RBF核通常是好的默认选择")
    print("4. 支持向量决定了模型的复杂度")
    print("5. C参数控制间隔和错误的权衡")
    print("6. SVM在高维数据上表现良好")

if __name__ == "__main__":
    demonstrate_svm()