import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import random


class SVM:
    def __init__(self, max_iter=100, kernel_type='linear', C=1.0, epsilon=0.001):
        """
        初始化SVM模型
        :param max_iter: 最大迭代次数
        :param kernel_type: 核函数类型，可选 'linear', 'poly', 'rbf'
        :param C: 惩罚参数
        :param epsilon: 精度
        """
        self.kernels = {
            'linear': self.kernel_linear,
            'poly': self.kernel_poly,
            'rbf': self.kernel_rbf
        }
        self.max_iter = max_iter
        self.kernel_type = kernel_type
        self.C = C
        self.epsilon = epsilon
        self.b = 0
        self.alpha = None
        self.support_vectors = None
        self.support_vector_labels = None
        self.support_vector_indices = None
        self.w = None  # 仅用于线性核

    def kernel_linear(self, x1, x2):
        """线性核函数"""
        return np.dot(x1, x2)

    def kernel_poly(self, x1, x2, p=3):
        """多项式核函数"""
        return (np.dot(x1, x2) + 1) ** p

    def kernel_rbf(self, x1, x2, gamma=0.5):
        """高斯RBF核函数"""
        return np.exp(-gamma * np.linalg.norm(x1 - x2) ** 2)

    def fit(self, X, y):
        """
        训练SVM模型（使用SMO算法）
        :param X: 训练数据特征矩阵
        :param y: 训练数据标签向量
        """
        n_samples, n_features = X.shape

        # 转换标签为-1和1
        y = np.where(y <= 0, -1, 1)

        # 初始化alpha和b
        self.alpha = np.zeros(n_samples)
        self.b = 0

        # 获取核函数
        kernel = self.kernels[self.kernel_type]

        # 计算核矩阵
        K = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                K[i, j] = kernel(X[i], X[j])

        # SMO算法
        iter_count = 0
        while iter_count < self.max_iter:
            alpha_changed = 0  # alpha值是指在迭代过程中发生变化的alpha的数量

            for i in range(n_samples):

                # 选取的i是违反KKT条件的样本点

                # 计算Ei
                Ei = self._calculate_error(i, X, y, K)

                # 检查KKT条件
                if ((y[i] * Ei < -self.epsilon) and (self.alpha[i] < self.C)) or \
                   ((y[i] * Ei > self.epsilon) and (self.alpha[i] > 0)):

                    # 随机选择j
                    j = i
                    while j == i:
                        j = random.randint(0, n_samples - 1)

                    # 计算Ej
                    Ej = self._calculate_error(j, X, y, K)

                    # 保存旧alpha值
                    alpha_i_old = self.alpha[i]
                    alpha_j_old = self.alpha[j]

                    # 计算上下界
                    if y[i] != y[j]:
                        L = max(0, self.alpha[j] - self.alpha[i])
                        H = min(self.C, self.C + self.alpha[j] - self.alpha[i])
                    else:
                        L = max(0, self.alpha[i] + self.alpha[j] - self.C)
                        H = min(self.C, self.alpha[i] + self.alpha[j])

                    if L == H:
                        continue

                    # 计算eta
                    eta = 2 * K[i, j] - K[i, i] - K[j, j]
                    if eta >= 0:
                        continue
                    # 使用牛顿法更新alpha

                    # 更新alpha_j
                    self.alpha[j] = alpha_j_old - (y[j] * (Ei - Ej)) / eta

                    # 裁剪alpha_j
                    self.alpha[j] = max(L, min(H, self.alpha[j]))

                    if abs(self.alpha[j] - alpha_j_old) < 1e-5:
                        continue

                    # 使用alpha_j更新alpha_i

                    # 更新alpha_i
                    self.alpha[i] = alpha_i_old + y[i] * \
                        y[j] * (alpha_j_old - self.alpha[j])

                    # 更新b
                    b1 = self.b - Ei - y[i] * (self.alpha[i] - alpha_i_old) * K[i, i] - \
                        y[j] * (self.alpha[j] - alpha_j_old) * K[i, j]
                    b2 = self.b - Ej - y[i] * (self.alpha[i] - alpha_i_old) * K[i, j] - \
                        y[j] * (self.alpha[j] - alpha_j_old) * K[j, j]

                    if 0 < self.alpha[i] < self.C:
                        self.b = b1
                    elif 0 < self.alpha[j] < self.C:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2

                    alpha_changed += 1

            if alpha_changed == 0:
                # 如果alpha值不再发生变化，则准备停止迭代
                iter_count += 1
            else:
                # 如果alpha值发生变化，则将迭代次数重置为0
                iter_count = 0

        # 提取支持向量
        # 支持向量的alpha值大于0，影响超平面构建的样本点
        support_vector_indices = np.where(self.alpha > self.epsilon)[0]
        self.support_vectors = X[support_vector_indices]
        self.support_vector_labels = y[support_vector_indices]
        self.support_vector_indices = support_vector_indices
        self.alpha = self.alpha[support_vector_indices]

        # 对于线性核，计算权重向量w
        if self.kernel_type == 'linear':
            self.w = np.zeros(n_features)
            for i in range(len(self.alpha)):
                self.w += self.alpha[i] * \
                    self.support_vector_labels[i] * self.support_vectors[i]

        return self

    def _calculate_error(self, i, X, y, K):
        """计算误差"""
        output = self._decision_function(i, X, y, K)
        return output - y[i]

    def _decision_function(self, i, X, y, K):
        """决策函数"""
        return np.sum(self.alpha * y * K[i]) + self.b

    def predict(self, X):
        """
        预测新数据的类别
        :param X: 特征矩阵
        :return: 预测类别
        """
        if self.kernel_type == 'linear' and self.w is not None:
            return np.sign(np.dot(X, self.w) + self.b)
        else:
            y_predict = np.zeros(len(X))
            for i in range(len(X)):
                s = 0
                for alpha, sv_y, sv in zip(self.alpha, self.support_vector_labels, self.support_vectors):
                    s += alpha * sv_y * \
                        self.kernels[self.kernel_type](X[i], sv)
                y_predict[i] = s + self.b
            return np.sign(y_predict)

    def score(self, X, y):
        """计算准确率"""
        y_pred = self.predict(X)
        # 将标签转换为-1和1
        y = np.where(y <= 0, -1, 1)
        return np.mean(y_pred == y)

    def plot_decision_boundary(self, X, y, title='SVM Decision Boundary'):
        """绘制决策边界"""
        # 仅对二维数据有效
        if X.shape[1] != 2:
            print("只能绘制二维数据的决策边界")
            return

        plt.figure(figsize=(10, 8))

        # 创建网格以绘制决策边界
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
                             np.arange(y_min, y_max, 0.02))

        # 进行预测
        Z = self.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)

        # 绘制决策边界和边距
        plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.coolwarm)
        plt.contour(xx, yy, Z, colors='k', linestyles='-', linewidths=2)

        # 绘制数据点
        plt.scatter(X[:, 0], X[:, 1], c=y,
                    cmap=plt.cm.coolwarm, s=30, edgecolors='k')

        # 高亮支持向量
        plt.scatter(self.support_vectors[:, 0], self.support_vectors[:, 1],
                    s=100, facecolors='none', edgecolors='k')

        plt.title(title)
        plt.xlabel('Feature 1')
        plt.ylabel('Feature 2')
        plt.tight_layout()
        plt.show()
