import numpy as np
from sklearn.discriminant_analysis import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

"""
用了SOM
SMO算法通过将原始的优化问题分解为多个子问题来求解，每次仅优化两个拉格朗日乘子α1和α2，其余乘子保持不变。
"""
class LinearSVM:
    """
    线性 SVM
    alphas: 拉格朗日乘子，维度=n_samples
    w: 权重向量，维度=n_features——超平面的系数
    b: 截距
    C: 正则化参数
    tol: 容忍度
    max_passes: 最大迭代次数
    X_train，y_train: 训练数据和标签
    """
    def __init__(self, C=1.0, tol=1e-3, max_passes=5):
        self.alphas = None
        self.w = None
        self.b = 0.0
        self.C = C
        self.tol = tol
        self.max_passes = max_passes
        self.X_train = None  
        self.y_train = None  
    
    def fit(self, X, y):
        """训练SVM模型"""
        # n_samples: 样本数量, n_features: 特征数量
        n_samples, n_features = X.shape
        # y的标签转换为 -1 和 1
        y = y.copy()
        y[y == 0] = -1
        # 存储训练数据，用于后续的决策函数计算
        self.X_train = X.copy()
        self.y_train = y.copy()
        # 初始化参数
        self.alphas = np.zeros(n_samples)
        self.b = 0.0
        E = np.zeros(n_samples)  # 误差缓存，存储每个样本的预测误差

        passes = 0
        for _ in range(self.max_passes):
            alpha_changed = 0

            for i in range(n_samples):
                # 计算误差 E_i
                E[i] = self._decision_function(X[i]) - y[i]

                # SOM 算法
                # 检查KKT条件是否违反
                if ((y[i] * E[i] < -self.tol and self.alphas[i] < self.C) or
                    (y[i] * E[i] > self.tol and self.alphas[i] > 0)):
                    # 随机选择第二个alpha
                    j = self._select_second_alpha(i, n_samples)
                    E[j] = self._decision_function(X[j]) - y[j]
                    alpha_i_old = self.alphas[i].copy()
                    alpha_j_old = self.alphas[j].copy()
                    # 计算边界L和H
                    if y[i] != y[j]:
                        L = max(0, self.alphas[j] - self.alphas[i])
                        H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
                    else:
                        L = max(0, self.alphas[i] + self.alphas[j] - self.C)
                        H = min(self.C, self.alphas[i] + self.alphas[j])
                    
                    if L == H:
                        continue
                    
                    # 计算eta
                    eta = 2 * X[i] @ X[j] - X[i] @ X[i] - X[j] @ X[j]
                    if eta >= 0:
                        continue
                    
                    # 更新alpha_j
                    self.alphas[j] -= y[j] * (E[i] - E[j]) / eta
                    self.alphas[j] = np.clip(self.alphas[j], L, H)
                    
                    if abs(self.alphas[j] - alpha_j_old) < 1e-5:
                        continue
                    # 更新alpha_i
                    self.alphas[i] += y[i] * y[j] * (alpha_j_old - self.alphas[j])
                    # 更新b
                    b1 = (self.b - E[i] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[i]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[i] @ X[j]))
                    b2 = (self.b - E[j] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[j]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[j] @ X[j]))
                    
                    if 0 < self.alphas[i] < self.C:
                        self.b = b1
                    elif 0 < self.alphas[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2
                    alpha_changed += 1
            if alpha_changed == 0:
                passes += 1
            else:
                passes = 0
        # 计算权重向量w
        self.w = np.zeros(n_features)
        for i in range(n_samples):
            self.w += self.alphas[i] * y[i] * X[i]
        return self
    
    def _decision_function(self, x):
        """决策函数，返回我们w^t * x + b算出的值"""
        if self.w is not None:
            return self.w @ x + self.b
        else:
            # 当w还没有计算时，使用支持向量和alpha计算决策函数
            if self.X_train is None or self.y_train is None:
                return 0
            result = 0
            for i in range(len(self.alphas)):
                if self.alphas[i] > 0:  # 只使用支持向量
                    result += self.alphas[i] * self.y_train[i] * (self.X_train[i] @ x)
            return result + self.b
        
    def _select_second_alpha(self, i, n_samples):
        """选择第二个alpha"""
        j = i
        while j == i:
            j = np.random.randint(0, n_samples)
        return j
    
    def predict(self, X):
        """预测"""
        # 修正：对于单个样本，直接计算；对于多个样本，需要遍历
        if len(X.shape) == 1:
            # 单个样本
            decision_value = self._decision_function(X)
        else:
            # 多个样本
            decision_value = np.array([self._decision_function(x) for x in X])
        return np.sign(decision_value).astype(int)

# 手写支持向量机
def main():
    # 数据准备
    # Integr_data
    X = np.array([
        [0, 0, 0, 0, 0, 0],  
        [1, 0, 1, 0, 0, 0],  
        [1, 0, 0, 0, 0, 0],  
        [0, 0, 1, 0, 0, 0],  
        [2, 0, 0, 0, 0, 0],  
        [0, 1, 0, 0, 1, 1],  
        [1, 1, 0, 1, 1, 1],  
        [1, 1, 0, 0, 1, 0],  
        [1, 1, 1, 1, 1, 0],  
        [0, 2, 2, 0, 2, 1],  
        [2, 2, 2, 2, 2, 0],  
        [2, 0, 0, 2, 2, 1],  
        [0, 1, 0, 1, 0, 0],  
        [2, 1, 1, 1, 0, 0],  
        [1, 1, 0, 0, 1, 1],  
        [2, 0, 0, 2, 2, 0],  
        [0, 0, 1, 1, 1, 0]   
    ])
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    # 0坏瓜1好瓜
    y = np.array([1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]) 
    # feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)

    # 支持向量机
    svm = LinearSVM(C=1.0)
    svm.fit(X_train, y_train)

    # 预测和评估
    y_pred = svm.predict(X_test)
    accuracy = np.mean(y_pred == y_test)
    print(f"测试集准确率: {accuracy:.2f}")
    print(f"支持向量数量: {np.sum(svm.alphas > 1e-5)}")
    print(f"权重向量w: {svm.w}")
    print(f"偏置项b: {svm.b:.4f}")

    # 可视化支持向量（使用前两个特征）
    plt.figure(figsize=(10, 6))

    # 绘制数据点
    for i, (x, label) in enumerate(zip(X_train, y_train)):
        if label == 1:
            plt.scatter(x[0], x[1], c='red', marker='o', label='好瓜' if i == 0 else "")
        else:
            plt.scatter(x[0], x[1], c='blue', marker='s', label='坏瓜' if i == 0 else "")

if __name__ == "__main__":
    main()