import random
import numpy as np

class SupportVectorMachine:
    def __init__(self, x_train, y_train, C, toler, max_iter):
        """
        :param x_train: 训练集的特征矩阵
        :param y_train: 训练集的类别矩阵
        :param C: 正则常数，C > 0
        :param toler: 允许的预测误差
        :param max_iter: 最大迭代次数
        """
        self.x_train = np.mat(x_train) # mat 函数为字符串以分号分割或者为列表以逗号分割
        self.y_train = np.mat(y_train)
        self.C = C
        self.toler = toler
        self.max_iter = max_iter
        m, n = np.shape(self.x_train)
        self.alphas = np.zeros((m, 1))
        self.b = 0

    # 从 m 个对偶因子中随机选择一个与 alpha_i 不同的 alpha_j
    def rand_select_j(self, i, m):
        """
        :param i: alpha_i 的编号
        :param m: 对偶因子的总数
        :return j: alpha_j 的编号
        """
        j = i
        while(j == i):
            j = int(random.uniform(0, m))
        return j

    # 根据对偶因子的上限和下限修建对偶因子
    def clip_alpha(self, aj, H, L):
        """
        :param aj: 对偶因子 alpha_j
        :param H: 对偶因子的上限
        :param L: 对偶因子的下限
        :return aj: 修建之后的对偶因子 alpha_j
        """
        if aj > H:
            aj = H
        if aj < L:
            aj = L
        return aj

    # SMO 算法求解
    def smo(self):
        iters = 0
        # m: 训练样本个数
        m, n = np.shape(self.x_train)
        while iters < self.max_iter:
            alpha_pairs_changed = 0
            for i in range(m):
                # 选取第一个对偶因子 alpha_i
                # 计算当前迭代的模型法向量 W_i
                W_i = np.dot(np.multiply(self.alphas, self.y_train).T, self.x_train)
                # 计算模型对样本 x_i 的预测输出
                f_x_i = float(np.dot(W_i, self.x_train[i, :].T))+self.b
                # 计算模型对样本 x_i 的预测误差
                E_i = f_x_i-float(self.y_train[i])
                if ((self.y_train[i]*E_i < -self.toler)and(self.alphas[i] < self.C))or((self.y_train[i]*E_i > self.toler)and(self.alphas[i]>0)):
                    # 选取另一个不相同的对偶因子 alpha_j
                    j = self.rand_select_j(i, m)
                    # 计算当前迭代的模型法向量 W_j
                    W_j = np.dot(np.multiply(self.alphas, self.y_train).T, self.x_train)
                    # 计算模型对 x_j 的预测输出
                    f_x_j = float(np.dot(W_j, self.x_train[j, :].T))+self.b
                    # 计算模型对 x_j 的预测误差
                    E_j = f_x_j-float(self.y_train[j])
                    # 保存旧的对偶因子 alpha_i 和 alpha_j
                    alpha_iold = self.alphas[i].copy()
                    alpha_jold = self.alphas[j].copy()
                    # 设置对偶因子的上界和下界
                    if(self.y_train[i] != self.y_train[j]):
                        L = max(0, self.alphas[j]-self.alphas[i])
                        H = min(self.C, self.C+self.alphas[j]-self.alphas[i])
                    else:
                        L = max(0, self.alphas[j]+self.alphas[i]-self.C)
                        H = min(self.C, self.alphas[j]+self.alphas[i])
                    if H == L:
                        continue
                    # 计算对偶因子 alpha_j 的更新值
                    eta = 2.0*self.x_train[i, :]*self.x_trian[j, :].T-self.x_train[i, :]*self.x_train[i, :].T-self.x_train[j, :]*self.x_train[j, :].T
                    if eta >= 0:
                        continue
                    # 更新 alpha_j
                    self.alphas[j] = (self.alphas[j]-self.y_train[j]*(E_i-E_j))/eta
                    # 对 alpha_j 按照上界和下界进行修剪
                    self.alphas[j] = self.clip_alpha(self.alphas[j], H, L)
                    if(abs(self.alphas[j]-alpha_jold)<0.00001):
                        continue
                    # 根据 alpha_j 的值更新 alpha_i
                    self.alphas[i] = self.alphas[i]+self.y_train[j]*self.y_train[i]*(alpha_jold-self.alphas[j])
                    # 更新常数项 b
                    b1 = self.b-E_i+self.y_train[i]*(alpha_iold-self.alphas[i])*np.dot(self.x_train[i, :], self.x_train[i, :].T)+self.y_train[j]*(alpha_jold-self.alphas[j])*np.dot(self.x_train[i, :], self.x_train[j, :].T)
                    b2 = self.b-E_j+self.y_train[i]*(alpha_iold-self.alphas[i])*np.dot(self.x_train[i, :], self.x_train[j, :].T)+self.y_train[j]*(alpha_jold-self.alphas[j])*np.dot(self.x_train[j, :], self.x_train[j, :].T)
                    if (self.alphas[i]>0)and(self.C>self.alphas[i]):
                        self.b = b1
                    elif (self.alphas[j]>0)and(self.C>self.alphas[j]):
                        self.b = b2
                    else:
                        self.b = (b1+b2)/2.0
                alpha_pairs_changed += 1 # 完成一对对偶因子的更新
                if alpha_pairs_changed == 0:
                    iters += 1
                else:
                    iters = 0

if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import pandas as pd
    from sklearn.datasets import make_blobs

    # 生成一个包含 200 个，每个样本有 2 维特征的二分类数据集
    data, label = make_blobs(n_samples = 200, n_features = 2, centers = 2)
    for i in range(len(label)):
        if label[i] == 0:
            label[i] = -1
    df = pd.DataFrame()
    df['x1'] = data[:, 0]
    df['x2'] = data[:, 1]
    df['class'] = label
    # 绘制训练数据
    positive = df[df['class'] == 1]
    negative = df[df['class'] == -1]
    fig, ax = plt.subplots(figsize=(10, 5))
    ax.scatter(positive['x1'], positive['x2'], s = 30, c = 'b', marker = 'o', label = 'class1')
    ax.scatter(negative["x1"], negative["x2"], s = 30, c = "r", marker = "x", label = "class -1")
    ax.legend()
    ax.set_xlabel("x1")
    ax.set_ylabel("x2")