import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

class SimpleSVM:
    def __init__(self, C=1.0, tol=0.001, max_passes=5, standardize=True):
        """
        简化版SVM实现
        
        参数:
        C: 正则化参数，控制软间隔
        tol: 容忍度，KKT条件的违反阈值
        max_passes: 最大迭代次数
        standardize: 是否对数据进行标准化
        """
        self.C = C
        self.tol = tol
        self.max_passes = max_passes
        self.standardize = standardize
        self.alphas = None
        self.b = 0
        self.w = None
        self.X_train = None
        self.y_train = None
        self.scaler = None  # 新增：标准化器
        
    def fit(self, X, y):
        """训练SVM模型"""
        n_samples, n_features = X.shape
        y = y.copy()
        y[y == 0] = -1  # 将标签转为±1
        
        # 数据标准化
        if self.standardize:
            self.scaler = StandardScaler()
            X = self.scaler.fit_transform(X)
        else:
            self.scaler = None
        
        # 存储训练数据
        self.X_train = X.copy()
        self.y_train = y.copy()
        
        # 初始化参数
        self.alphas = np.zeros(n_samples)
        self.b = 0
        E = np.zeros(n_samples)  # 误差缓存
        
        passes = 0
        while passes < self.max_passes:
            alpha_changed = 0
            
            for i in range(n_samples):
                # 计算误差 E_i = f(x_i) - y_i
                E[i] = self._decision_function(X[i]) - y[i]
                
                # 检查KKT条件是否违反
                if ((y[i] * E[i] < -self.tol and self.alphas[i] < self.C) or
                    (y[i] * E[i] > self.tol and self.alphas[i] > 0)):
                    
                    # 随机选择第二个alpha
                    j = self._select_second_alpha(i, n_samples)
                    E[j] = self._decision_function(X[j]) - y[j]
                    
                    alpha_i_old = self.alphas[i].copy()
                    alpha_j_old = self.alphas[j].copy()
                    
                    # 计算边界L和H
                    if y[i] != y[j]:
                        L = max(0, self.alphas[j] - self.alphas[i])
                        H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
                    else:
                        L = max(0, self.alphas[i] + self.alphas[j] - self.C)
                        H = min(self.C, self.alphas[i] + self.alphas[j])
                    
                    if L == H:
                        continue
                    
                    # 计算eta = 2*K(x_i,x_j) - K(x_i,x_i) - K(x_j,x_j)
                    eta = 2 * X[i] @ X[j] - X[i] @ X[i] - X[j] @ X[j]
                    if eta >= 0:
                        continue
                    
                    # 更新alpha_j
                    self.alphas[j] -= y[j] * (E[i] - E[j]) / eta
                    self.alphas[j] = np.clip(self.alphas[j], L, H)
                    
                    if abs(self.alphas[j] - alpha_j_old) < 1e-5:
                        continue
                    
                    # 更新alpha_i
                    self.alphas[i] += y[i] * y[j] * (alpha_j_old - self.alphas[j])
                    
                    # 更新偏置b
                    b1 = (self.b - E[i] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[i]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[i] @ X[j]))
                    b2 = (self.b - E[j] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[j]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[j] @ X[j]))
                    
                    if 0 < self.alphas[i] < self.C:
                        self.b = b1
                    elif 0 < self.alphas[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2
                    
                    alpha_changed += 1
            
            if alpha_changed == 0:
                passes += 1
            else:
                passes = 0
        
        # 计算权重向量w
        self.w = np.zeros(n_features)
        for i in range(n_samples):
            self.w += self.alphas[i] * y[i] * X[i]
        
        return self
    
    def _decision_function(self, x):
        """决策函数 f(x) = w·x + b"""
        if self.w is not None:
            return self.w @ x + self.b
        else:
            # 当w还没有计算时，使用支持向量计算
            if self.X_train is None or self.y_train is None:
                return 0
            result = 0
            for i in range(len(self.alphas)):
                if self.alphas[i] > 0:  # 只使用支持向量
                    result += self.alphas[i] * self.y_train[i] * (self.X_train[i] @ x)
            return result + self.b
    
    def _select_second_alpha(self, i, n_samples):
        """选择第二个alpha"""
        j = i
        while j == i:
            j = np.random.randint(0, n_samples)
        return j
    
    def predict(self, X):
        """预测"""
        # 如果训练时进行了标准化，预测时也要进行相同的标准化
        if self.scaler is not None:
            X = self.scaler.transform(X)
        
        if len(X.shape) == 1:
            # 单个样本
            decision_value = self._decision_function(X)
        else:
            # 多个样本
            decision_value = np.array([self._decision_function(x) for x in X])
        
        return np.sign(decision_value).astype(int)

# 数据准备 - 西瓜数据集
X = np.array([
    [0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0],
    [0, 0, 1, 0, 0, 0], [2, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 1],
    [1, 1, 0, 1, 1, 1], [1, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 0],
    [0, 2, 2, 0, 2, 1], [2, 2, 2, 2, 2, 0], [2, 0, 0, 2, 2, 1],
    [0, 1, 0, 1, 0, 0], [2, 1, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1],
    [2, 0, 0, 2, 2, 0], [0, 0, 1, 1, 1, 0]
])

y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])

# 划分训练测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

print("原始数据统计:")
print(f"训练集形状: {X_train.shape}")
print(f"测试集形状: {X_test.shape}")
print(f"特征均值: {np.mean(X_train, axis=0)}")
print(f"特征标准差: {np.std(X_train, axis=0)}")

# 训练SVM模型（启用标准化）
print("\n=== 使用标准化 ===")
svm_standardized = SimpleSVM(C=1.0, standardize=True)
svm_standardized.fit(X_train, y_train)

# 预测和评估
y_pred_std = svm_standardized.predict(X_test)
accuracy_std = np.mean(y_pred_std == y_test)

print(f"测试集准确率: {accuracy_std:.2f}")
print(f"支持向量数量: {np.sum(svm_standardized.alphas > 1e-5)}")
print(f"权重向量w: {svm_standardized.w}")
print(f"偏置项b: {svm_standardized.b:.4f}")

# 训练SVM模型（不启用标准化）
print("\n=== 不使用标准化 ===")
svm_raw = SimpleSVM(C=1.0, standardize=False)
svm_raw.fit(X_train, y_train)

y_pred_raw = svm_raw.predict(X_test)
accuracy_raw = np.mean(y_pred_raw == y_test)

print(f"测试集准确率: {accuracy_raw:.2f}")
print(f"支持向量数量: {np.sum(svm_raw.alphas > 1e-5)}")
print(f"权重向量w: {svm_raw.w}")
print(f"偏置项b: {svm_raw.b:.4f}")

# 可视化支持向量（使用前两个特征）
plt.figure(figsize=(12, 5))

# 标准化后的数据可视化
plt.subplot(1, 2, 1)
if svm_standardized.scaler is not None:
    X_train_std = svm_standardized.scaler.transform(X_train)
else:
    X_train_std = X_train

for i, (x, label) in enumerate(zip(X_train_std, y_train)):
    if label == 1:
        plt.scatter(x[0], x[1], c='red', marker='o', label='好瓜' if i == 0 else "", alpha=0.7)
    else:
        plt.scatter(x[0], x[1], c='blue', marker='s', label='坏瓜' if i == 0 else "", alpha=0.7)

# 标记支持向量
support_vector_indices = np.where(svm_standardized.alphas > 1e-5)[0]
for i in support_vector_indices:
    plt.scatter(X_train_std[i, 0], X_train_std[i, 1], s=200, facecolors='none', edgecolors='black', linewidth=2)

plt.xlabel('特征1 (标准化后)')
plt.ylabel('特征2 (标准化后)')
plt.title('SVM分类结果 - 标准化数据')
plt.legend()
plt.grid(True, alpha=0.3)

# 原始数据可视化
plt.subplot(1, 2, 2)
for i, (x, label) in enumerate(zip(X_train, y_train)):
    if label == 1:
        plt.scatter(x[0], x[1], c='red', marker='o', label='好瓜' if i == 0 else "", alpha=0.7)
    else:
        plt.scatter(x[0], x[1], c='blue', marker='s', label='坏瓜' if i == 0 else "", alpha=0.7)

# 标记支持向量
support_vector_indices_raw = np.where(svm_raw.alphas > 1e-5)[0]
for i in support_vector_indices_raw:
    plt.scatter(X_train[i, 0], X_train[i, 1], s=200, facecolors='none', edgecolors='black', linewidth=2)

plt.xlabel('特征1 (原始)')
plt.ylabel('特征2 (原始)')
plt.title('SVM分类结果 - 原始数据')
plt.legend()
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# 比较结果
print("\n=== 标准化效果比较 ===")
print(f"标准化准确率: {accuracy_std:.3f}")
print(f"原始数据准确率: {accuracy_raw:.3f}")
print(f"标准化支持向量数: {np.sum(svm_standardized.alphas > 1e-5)}")
print(f"原始数据支持向量数: {np.sum(svm_raw.alphas > 1e-5)}")