import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split

# 生成二分类数据集
X, y = make_classification(
    n_samples=100,
    n_features=2,
    n_redundant=0,
    n_informative=2,
    random_state=23,
    n_clusters_per_class=1
)

# 将标签转换为-1和1
y = np.where(y == 0, -1, 1)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)


class SVM:
    def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000):
        self.lr = learning_rate
        self.lambda_param = lambda_param  # 正则化参数
        self.n_iters = n_iters
        self.w = None
        self.b = None

    def fit(self, X, y):
        n_samples, n_features = X.shape

        # 初始化参数
        self.w = np.zeros(n_features)
        self.b = 0

        # 梯度下降优化
        for _ in range(self.n_iters):
            for idx, x_i in enumerate(X):
                # 检查是否满足约束条件 y_i(w·x_i + b) >= 1
                condition = y[idx] * (np.dot(x_i, self.w) + self.b) >= 1

                if condition:
                    # 如果满足约束，更新w但不更新b
                    self.w -= self.lr * (2 * self.lambda_param * self.w)
                else:
                    # 如果不满足约束，同时更新w和b
                    self.w -= self.lr * (2 * self.lambda_param * self.w - np.dot(x_i, y[idx]))
                    self.b -= self.lr * (-y[idx])

    def predict(self, X):
        # 计算线性模型输出
        linear_output = np.dot(X, self.w) + self.b
        # 使用sign函数得到分类结果(-1或1)
        return np.sign(linear_output)


# 训练自定义SVM模型
svm = SVM(learning_rate=0.01, lambda_param=0.01, n_iters=1000)
svm.fit(X_train, y_train)
predictions = svm.predict(X_test)

# 计算准确率
accuracy = np.sum(predictions == y_test) / len(y_test)
print(f"模型准确率: {accuracy:.2f}")

# 可视化决策边界和支持向量
plt.figure(figsize=(10, 6))

# 绘制训练数据点
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train,
            cmap=plt.cm.Paired, edgecolors='k', label='训练数据')

# 绘制测试数据点（用不同形状表示）
plt.scatter(X_test[:, 0], X_test[:, 1], c=predictions,
            cmap=plt.cm.Paired, marker='s', edgecolors='k', label='预测结果')

# 绘制决策边界
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()

# 创建网格点以绘制决策边界
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = np.dot(xy, svm.w) + svm.b
Z = Z.reshape(XX.shape)

# 绘制决策边界和间隔边界
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
           linestyles=['--', '-', '--'])

# 标记支持向量（近似找到离边界最近的点）
margin = 1 / np.linalg.norm(svm.w)
support_vectors = []
for x_i in X_train:
    dist = np.abs(np.dot(x_i, svm.w) + svm.b) / np.linalg.norm(svm.w)
    if np.isclose(dist, margin, atol=0.1):
        support_vectors.append(x_i)

if support_vectors:
    support_vectors = np.array(support_vectors)
    plt.scatter(support_vectors[:, 0], support_vectors[:, 1], s=100,
                linewidth=1, facecolors='none', edgecolors='k', label='支持向量')

plt.title('自定义SVM分类器的决策边界')
plt.xlabel('特征1')
plt.ylabel('特征2')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()