import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt

class SimpleSVM:
    def __init__(self, C=1.0, tol=0.001, max_passes=20):
        """
        改进版SVM实现
        """
        self.C = C
        self.tol = tol
        self.max_passes = max_passes
        self.alphas = None
        self.b = 0
        self.w = None
        self.X_train = None
        self.y_train = None
        self.scaler = None
        self.E = None  # 误差缓存
        
    def fit(self, X, y):
        """训练SVM模型"""
        n_samples, n_features = X.shape
        y = y.copy()
        y[y == 0] = -1  # 将标签转为±1
        
        # 数据标准化
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)
        
        # 存储训练数据
        self.X_train = X.copy()
        self.y_train = y.copy()
        
        # 初始化参数
        self.alphas = np.zeros(n_samples)
        self.b = 0
        self.E = -y.copy().astype(float)  # 初始误差 E = f(x) - y = 0 - y = -y
        
        passes = 0
        while passes < self.max_passes:
            alpha_changed = 0
            
            for i in range(n_samples):
                E_i = self.E[i]
                
                # 检查KKT条件是否违反
                kkt_violation = False
                if (y[i] * E_i < -self.tol and self.alphas[i] < self.C) or \
                   (y[i] * E_i > self.tol and self.alphas[i] > 0):
                    kkt_violation = True
                
                if kkt_violation:
                    # 选择第二个alpha - 使用启发式策略
                    j = self._select_second_alpha(i, n_samples)
                    E_j = self.E[j]
                    
                    alpha_i_old = self.alphas[i].copy()
                    alpha_j_old = self.alphas[j].copy()
                    
                    # 计算边界L和H
                    if y[i] != y[j]:
                        L = max(0, self.alphas[j] - self.alphas[i])
                        H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
                    else:
                        L = max(0, self.alphas[i] + self.alphas[j] - self.C)
                        H = min(self.C, self.alphas[i] + self.alphas[j])
                    
                    if L == H:
                        continue
                    
                    # 计算eta
                    eta = 2.0 * np.dot(X[i], X[j]) - np.dot(X[i], X[i]) - np.dot(X[j], X[j])
                    
                    if eta >= 0:
                        continue
                    
                    # 更新alpha_j
                    self.alphas[j] -= y[j] * (E_i - E_j) / eta
                    
                    # 裁剪到边界
                    if self.alphas[j] > H:
                        self.alphas[j] = H
                    elif self.alphas[j] < L:
                        self.alphas[j] = L
                    
                    # 检查alpha_j是否显著变化
                    if abs(self.alphas[j] - alpha_j_old) < 1e-8:
                        continue
                    
                    # 更新alpha_i
                    self.alphas[i] += y[i] * y[j] * (alpha_j_old - self.alphas[j])
                    
                    # 更新偏置b
                    b1 = self.b - E_i - y[i] * (self.alphas[i] - alpha_i_old) * np.dot(X[i], X[i]) - \
                         y[j] * (self.alphas[j] - alpha_j_old) * np.dot(X[i], X[j])
                    b2 = self.b - E_j - y[i] * (self.alphas[i] - alpha_i_old) * np.dot(X[i], X[j]) - \
                         y[j] * (self.alphas[j] - alpha_j_old) * np.dot(X[j], X[j])
                    
                    if 0 < self.alphas[i] < self.C:
                        self.b = b1
                    elif 0 < self.alphas[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2.0
                    
                    # 更新误差缓存
                    self._update_error_cache(X, y, i, j)
                    
                    alpha_changed += 1
            
            if alpha_changed == 0:
                passes += 1
            else:
                passes = 0
        
        # 计算权重向量w
        self.w = np.zeros(n_features)
        for i in range(n_samples):
            if self.alphas[i] > 0:
                self.w += self.alphas[i] * y[i] * X[i]
        
        # 重新计算最终的误差缓存
        self._compute_errors(X, y)
        
        return self
    
    def _update_error_cache(self, X, y, i, j):
        """更新误差缓存"""
        # 更新所有支持向量的误差
        support_vector_indices = np.where((self.alphas > 1e-5) & (self.alphas < self.C - 1e-5))[0]
        
        # 更新i和j的误差
        self.E[i] = self._decision_function(X[i]) - y[i]
        self.E[j] = self._decision_function(X[j]) - y[j]
        
        # 更新支持向量的误差
        for k in support_vector_indices:
            if k != i and k != j:
                self.E[k] = self._decision_function(X[k]) - y[k]
    
    def _compute_errors(self, X, y):
        """计算所有样本的误差"""
        for i in range(len(X)):
            self.E[i] = self._decision_function(X[i]) - y[i]
    
    def _select_second_alpha(self, i, n_samples):
        """改进的第二个alpha选择策略"""
        # 首先尝试选择误差差异最大的alpha
        max_delta = 0
        j = -1
        
        # 查找非边界样本
        non_bound_indices = np.where((self.alphas > 1e-5) & (self.alphas < self.C - 1e-5))[0]
        
        if len(non_bound_indices) > 1:
            # 从非边界样本中选择
            max_idx = -1
            max_val = -1
            for idx in non_bound_indices:
                if idx != i:
                    delta = abs(self.E[i] - self.E[idx])
                    if delta > max_val:
                        max_val = delta
                        max_idx = idx
            if max_idx != -1:
                return max_idx
        
        # 如果找不到合适的非边界样本，随机选择
        j = i
        while j == i:
            j = np.random.randint(0, n_samples)
        return j
    
    def _decision_function(self, x):
        """决策函数"""
        if self.w is not None:
            return np.dot(self.w, x) + self.b
        else:
            # 使用支持向量计算
            result = 0
            for i in range(len(self.alphas)):
                if self.alphas[i] > 1e-5:
                    result += self.alphas[i] * self.y_train[i] * np.dot(self.X_train[i], x)
            return result + self.b
    
    def predict(self, X):
        """预测"""
        if self.scaler is not None:
            X = self.scaler.transform(X)
        
        if len(X.shape) == 1:
            decision_value = self._decision_function(X)
        else:
            decision_value = np.array([self._decision_function(x) for x in X])
        
        # 将-1标签转换回0
        predictions = np.sign(decision_value).astype(int)
        predictions[predictions == -1] = 0
        return predictions

# 数据准备
X = np.array([
    [0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0],
    [0, 0, 1, 0, 0, 0], [2, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 1],
    [1, 1, 0, 1, 1, 1], [1, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 0],
    [0, 2, 2, 0, 2, 1], [2, 2, 2, 2, 2, 0], [2, 0, 0, 2, 2, 1],
    [0, 1, 0, 1, 0, 0], [2, 1, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1],
    [2, 0, 0, 2, 2, 0], [0, 0, 1, 1, 1, 0]
])

y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])

# 划分训练测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

print("数据信息:")
print(f"训练集: {X_train.shape}, 测试集: {X_test.shape}")
print(f"训练标签分布: {np.bincount(y_train)}")
print(f"测试标签分布: {np.bincount(y_test)}")

# 使用改进的自定义SVM
print("\n=== 改进的自定义SVM ===")
svm_custom = SimpleSVM(C=1.0, max_passes=50)
svm_custom.fit(X_train, y_train)

y_pred_custom = svm_custom.predict(X_test)
accuracy_custom = np.mean(y_pred_custom == y_test)

print(f"测试集准确率: {accuracy_custom:.3f}")
print(f"支持向量数量: {np.sum(svm_custom.alphas > 1e-5)}")
print(f"支持向量alpha值: {svm_custom.alphas[svm_custom.alphas > 1e-5]}")
print(f"偏置项b: {svm_custom.b:.6f}")

# 使用sklearn的SVM进行比较
print("\n=== sklearn的SVM ===")
svm_sklearn = SVC(kernel='linear', C=1.0)
svm_sklearn.fit(X_train, y_train)
y_pred_sklearn = svm_sklearn.predict(X_test)
accuracy_sklearn = np.mean(y_pred_sklearn == y_test)

print(f"测试集准确率: {accuracy_sklearn:.3f}")
print(f"支持向量数量: {len(svm_sklearn.support_vectors_)}")
print(f"支持向量索引: {svm_sklearn.support_}")

# 详细比较预测结果
print("\n=== 详细比较 ===")
print(f"真实标签:      {y_test}")
print(f"自定义SVM预测: {y_pred_custom}")
print(f"sklearn预测:   {y_pred_sklearn}")
print(f"自定义SVM正确: {y_pred_custom == y_test}")
print(f"sklearn正确:   {y_pred_sklearn == y_test}")

# 可视化结果
plt.figure(figsize=(15, 5))

# 自定义SVM结果
plt.subplot(1, 3, 1)
X_train_scaled = svm_custom.scaler.transform(X_train)
for i, (x, label) in enumerate(zip(X_train_scaled, y_train)):
    color = 'red' if label == 1 else 'blue'
    marker = 'o' if label == 1 else 's'
    plt.scatter(x[0], x[1], c=color, marker=marker, alpha=0.7)

# 标记支持向量
support_vector_indices = np.where(svm_custom.alphas > 1e-5)[0]
for i in support_vector_indices:
    plt.scatter(X_train_scaled[i, 0], X_train_scaled[i, 1], s=200, 
                facecolors='none', edgecolors='black', linewidth=2)

plt.xlabel('特征1')
plt.ylabel('特征2')
plt.title(f'自定义SVM (准确率: {accuracy_custom:.3f})')
plt.grid(True, alpha=0.3)

# sklearn SVM结果
plt.subplot(1, 3, 2)
for i, (x, label) in enumerate(zip(X_train, y_train)):
    color = 'red' if label == 1 else 'blue'
    marker = 'o' if label == 1 else 's'
    plt.scatter(x[0], x[1], c=color, marker=marker, alpha=0.7)

# 标记支持向量
for i in svm_sklearn.support_:
    plt.scatter(X_train[i, 0], X_train[i, 1], s=200, 
                facecolors='none', edgecolors='black', linewidth=2)

plt.xlabel('特征1')
plt.ylabel('特征2')
plt.title(f'sklearn SVM (准确率: {accuracy_sklearn:.3f})')
plt.grid(True, alpha=0.3)

# 决策边界比较
plt.subplot(1, 3, 3)
# 创建网格点
x_min, x_max = X_train_scaled[:, 0].min() - 0.5, X_train_scaled[:, 0].max() + 0.5
y_min, y_max = X_train_scaled[:, 1].min() - 0.5, X_train_scaled[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
                     np.arange(y_min, y_max, 0.02))

# 预测整个网格
Z_custom = svm_custom.predict(np.c_[xx.ravel(), yy.ravel(), np.zeros_like(xx.ravel()), 
                                   np.zeros_like(xx.ravel()), np.zeros_like(xx.ravel()), 
                                   np.zeros_like(xx.ravel())])
Z_custom = Z_custom.reshape(xx.shape)

plt.contourf(xx, yy, Z_custom, alpha=0.3, cmap=plt.cm.RdBu)
for i, (x, label) in enumerate(zip(X_train_scaled, y_train)):
    color = 'red' if label == 1 else 'blue'
    marker = 'o' if label == 1 else 's'
    plt.scatter(x[0], x[1], c=color, marker=marker, alpha=0.7)

plt.xlabel('特征1')
plt.ylabel('特征2')
plt.title('决策边界')
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()