import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

class SimpleSVM:
    def __init__(self, C=1.0, tol=0.001, max_passes=20):
        """
        简化版SVM实现
        C: 正则化参数
        tol: 容忍度
        max_passes: 最大迭代次数
        """
        self.C = C
        self.tol = tol
        self.max_passes = max_passes
        self.E = None
        self.alphas = None
        self.b = 0
        self.w = None
        self.X_train = None  # 新增：存储训练数据
        self.y_train = None  # 新增：存储训练标签
        
    def fit(self, X, y):
        """训练SVM模型"""
        n_samples, n_features = X.shape
        y = y.copy()
        y[y == 0] = -1  # 将标签转为±1
        
        # 存储训练数据，用于后续的决策函数计算
        self.X_train = X.copy()
        self.y_train = y.copy()
        
        # 初始化参数
        self.alphas = np.zeros(n_samples)
        self.b = 0
        E = np.zeros(n_samples)    # 误差缓存
        
        passes = 0
        while passes < self.max_passes:
            alpha_changed = 0
            
            for i in range(n_samples):
                # 计算误差 E_i
                E[i] = self._decision_function(X[i]) - y[i]
                self.E = E  # 更新误差缓存
                
                # 检查KKT条件是否违反
                if ((y[i] * E[i] < -self.tol and self.alphas[i] < self.C) or
                    (y[i] * E[i] > self.tol and self.alphas[i] > 0)):
                    
                    # 随机选择第二个alpha
                    j = self._select_second_alpha(i, n_samples)
                    E[j] = self._decision_function(X[j]) - y[j]
                    
                    alpha_i_old = self.alphas[i].copy()
                    alpha_j_old = self.alphas[j].copy()
                    
                    # 计算边界L和H
                    if y[i] != y[j]:
                        L = max(0, self.alphas[j] - self.alphas[i])
                        H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
                    else:
                        L = max(0, self.alphas[i] + self.alphas[j] - self.C)
                        H = min(self.C, self.alphas[i] + self.alphas[j])
                    
                    if L == H:
                        continue
                    
                    # 计算eta
                    eta = 2.0 * X[i] @ X[j] - X[i] @ X[i] - X[j] @ X[j]
                    if eta >= 0:
                        continue
                    
                    # 更新alpha_j
                    self.alphas[j] -= y[j] * (E[i] - E[j]) / eta
                    # self.alphas[j] = np.clip(self.alphas[j], L, H)
                    
                    # 裁剪到边界
                    if self.alphas[j] > H:
                        self.alphas[j] = H
                    elif self.alphas[j] < L:
                        self.alphas[j] = L

                    # 检查alpha_j是否显著变化
                    if abs(self.alphas[j] - alpha_j_old) < 1e-8:
                        continue
                    
                    # 更新alpha_i
                    self.alphas[i] += y[i] * y[j] * (alpha_j_old - self.alphas[j])
                    
                    # 更新b
                    b1 = (self.b - E[i] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[i]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[i] @ X[j]))
                    b2 = (self.b - E[j] - y[i] * (self.alphas[i] - alpha_i_old) * 
                          (X[i] @ X[j]) - y[j] * (self.alphas[j] - alpha_j_old) * (X[j] @ X[j]))
                    
                    if 0 < self.alphas[i] < self.C:
                        self.b = b1
                    elif 0 < self.alphas[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2.0
                    # 更新误差缓存
                    self._update_error_cache(X, y, i, j)
                    alpha_changed += 1
            
            if alpha_changed == 0:
                passes += 1
            else:
                passes = 0
        
        # 计算权重向量w
        self.w = np.zeros(n_features)
        for i in range(n_samples):
            if self.alphas[i] > 0:  # 只使用支持向量
                self.w += self.alphas[i] * y[i] * X[i]
        self._compute_errors(X, y)
        return self
    
    def _decision_function(self, x):
        """决策函数"""
        if self.w is not None:
            return self.w @ x + self.b
        else:
            # 当w还没有计算时，使用支持向量和alpha计算决策函数
            if self.X_train is None or self.y_train is None:
                return 0
            result = 0
            for i in range(len(self.alphas)):
                if self.alphas[i] > 0:  # 只使用支持向量
                    result += self.alphas[i] * self.y_train[i] * (self.X_train[i] @ x)
            return result + self.b
    
    def _select_second_alpha(self, i, n_samples):
        """第二个alpha选择策略"""
        j = i
        while j == i:
            j = np.random.randint(0, n_samples)
        return j
    
    def predict(self, X):
        """预测"""
        # 修正：对于单个样本，直接计算；对于多个样本，需要遍历
        if len(X.shape) == 1:
            # 单个样本
            decision_value = self._decision_function(X)
        else:
            # 多个样本
            decision_value = np.array([self._decision_function(x) for x in X])
        
        return np.sign(decision_value).astype(int)
    
    def _update_error_cache(self, X, y, i, j):
        """更新误差缓存"""
        # 更新所有支持向量的误差
        support_vector_indices = np.where((self.alphas > 1e-5) & (self.alphas < self.C - 1e-5))[0]
        
        # 更新i和j的误差
        self.E[i] = self._decision_function(X[i]) - y[i]
        self.E[j] = self._decision_function(X[j]) - y[j]
        
        # 更新支持向量的误差
        for k in support_vector_indices:
            if k != i and k != j:
                self.E[k] = self._decision_function(X[k]) - y[k]
    def _compute_errors(self, X, y):
        """计算所有样本的误差"""
        for i in range(len(X)):
            self.E[i] = self._decision_function(X[i]) - y[i]
    


# 数据准备
X = np.array([
    [0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0],
    [0, 0, 1, 0, 0, 0], [2, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 1],
    [1, 1, 0, 1, 1, 1], [1, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 0],
    [0, 2, 2, 0, 2, 1], [2, 2, 2, 2, 2, 0], [2, 0, 0, 2, 2, 1],
    [0, 1, 0, 1, 0, 0], [2, 1, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1],
    [2, 0, 0, 2, 2, 0], [0, 0, 1, 1, 1, 0]
])

y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 划分训练测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)

# 训练SVM模型
svm = SimpleSVM(C=1.0, max_passes=500)
svm.fit(X_train, y_train)

# 预测和评估
y_pred = svm.predict(X_test)
accuracy = np.mean(y_pred == y_test)

print(f"测试集准确率: {accuracy:.2f}")
print(f"支持向量数量: {np.sum(svm.alphas > 1e-5)}")
print(f"权重向量w: {svm.w}")
print(f"偏置项b: {svm.b:.4f}")

# 可视化支持向量（使用前两个特征）
plt.figure(figsize=(10, 6))

# 绘制数据点
for i, (x, label) in enumerate(zip(X_train, y_train)):
    if label == 1:
        plt.scatter(x[0], x[1], c='red', marker='o', label='好瓜' if i == 0 else "")
    else:
        plt.scatter(x[0], x[1], c='blue', marker='s', label='坏瓜' if i == 0 else "")

# 标记支持向量
support_vector_indices = np.where(svm.alphas > 1e-5)[0]
for i in support_vector_indices:
    plt.scatter(X_train[i, 0], X_train[i, 1], s=200, facecolors='none', edgecolors='black')

plt.xlabel('特征1 (标准化后)')
plt.ylabel('特征2 (标准化后)')
plt.title('SVM分类结果 - 西瓜数据集')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()