import numpy as np
import scipy.io as scio
from matplotlib import pyplot as plt

sigma = 15.0

def loaddataset():
    # 读取数据集(3 x 3000的数组)
    datafile = 'nonlineardata.mat'
    data = scio.loadmat(datafile)

    # 选取 200 个样本进行训练
    dataset = data['nonlinear'].T[0: 200, :]

    # 对样本集进行分类

    positive = np.array([0, 0, 0])
    negative = np.array([0, 0, 0])
    for i in range(dataset.shape[0]):
        if dataset[i][2] == 1:
            positive = np.row_stack((positive, np.array(dataset[i])))
        else:
            negative = np.row_stack((negative, np.array(dataset[i])))

    return positive[1:, :], negative[1:, :], dataset

def kernel(xi, xj):       # 高斯核函数
    M = xi.shape[0]
    K = np.zeros((M, 1))

    for i in range(M):
        A = np.array(xi[i]) - xj
        K[i] = np.exp(-0.5 * float(A.dot(A.T))) / (sigma ** 2)

    return K

def findnonbound(alpha, C):   # 寻找非边界点
    nonbound = []
    for i in range(len(alpha)):
        if 0 < alpha[i] < C:
            nonbound.append(i)
    return nonbound

def randomlyselect(i, N):    # 从0 ~ N - 1中随机选择一个数，i除外
    j = i
    while j == i:
        j = int(np.random.uniform(0, N))
    return j

class SupportVectorMachine(object):
    def __init__(self, X, Y, C, epsilon):
        self.X = X                  # 数据集N * D(N组数据，D维特征)
        self.Y = Y                  # 标签
        self.N = X.shape[0]         # 数据集大小
        self.C = C                  # 惩罚系数
        self.epsilon = epsilon
        self.alpha = np.zeros((self.N, 1))      # 拉格朗日乘子N * 1
        self.b = 0                              # 偏移量
        self.E = np.zeros((self.N, 2))          # 误差缓存表N * 2, 首列为更新状态(0: 未更新, 1: 已更新), 第二列为缓存值

    def compute_ek(self, k):        # 计算误差项
        xk = np.array([self.X[k]])
        y = np.array([self.Y]).T

        gxk = float(self.alpha.T.dot(y * kernel(self.X, xk))) + self.b
        return gxk - self.Y[k]

    def update_ek(self, k):
        ek = self.compute_ek(k)
        self.E[k] = [1, ek]

    # 使选取两变量所对应的样本间隔最大，使目标函数值有更大的变化
    def select_j(self, i, Ei):                   # 根据i选择j
        self.E[i] = [1, Ei]                      # 更新Ei
        valid_e = np.nonzero(self.E[:, 0])[0]    # 更新状态为1的缓存项的行索引
        if len(valid_e) > 1:
            j = 0
            max_delta = 0
            Ej = 0
            for k in valid_e:
                if k == i:
                    continue

                Ek = self.compute_ek(k)
                if abs(Ei - Ek) > max_delta:
                    j = k
                    max_delta = abs(Ei - Ek)
                    Ej = Ek
        else:
            j = randomlyselect(i, self.N)
            Ej = self.compute_ek(j)
        return j, Ej

    def inner_loop(self, i):
        Ei = self.compute_ek(i)

        # Y[i]*Ei = (g(x[i]) - Y[i]) * Y[i] = Y[i] * g(x[i]) - 1  
        if (self.Y[i] * Ei > self.epsilon and float(self.alpha[i]) > 0) or\
            (self.Y[i] * Ei < -self.epsilon and float(self.alpha[i]) < self.C):   # alpha[i]违反了KKT条件

            # alpha[j]选取遵循目标函数值变化最快的原则
            j, Ej = self.select_j(i, Ei)
            alpha_i_old = float(self.alpha[i])
            alpha_j_old = float(self.alpha[j])

            if self.Y[i] is not self.Y[j]:             # 为满足0<=alpah[i]<=C的迭代取值边界
                L = max(0, alpha_i_old - alpha_j_old)
                H = min(self.C, self.C + alpha_j_old - alpha_i_old)
            else:
                L = max(0, alpha_i_old + alpha_i_old - self.C)
                H = min(self.C, alpha_i_old + alpha_j_old)

            if L == H:
                return False

            xi = np.array([self.X[i]])
            xj = np.array([self.X[j]])

            alpha_j_new_unc = alpha_j_old + self.Y[j] * (Ei - Ej)
            if alpha_j_new_unc > H:
                self.alpha[j] = H
            elif alpha_j_new_unc < H:
                self.alpha[j] = L
            else:
                self.alpha[j] = alpha_j_new_unc

            # 更新Ej
            self.update_ek(j)

            if abs(float(self.alpha[j] - alpha_j_old) < 0.0001):
                return False

            # 更新alpha[i]
            self.alpha[i] = alpha_i_old + Y[i] * Y[j] * (alpha_j_old - float(self.alpha[j]))

            # 更新Ei
            self.update_ek(i)

            # 更新b
            bi = - Ei - self.Y[i] * float(kernel(xi, xi))*(float(self.alpha[i]) - alpha_j_old)-\
                self.Y[j] * float(kernel(xj, xi))*(float(self.alpha[j]) - alpha_j_old) + self.b

            bj = - Ej - self.Y[i]*float(kernel(xi,xj))*(float(self.alpha[i])-alpha_j_old)-\
                self.Y[j] * float(kernel(xj, xj)) * (float(self.alpha[j]) - alpha_j_old) + self.b

            if 0 < float(self.alpha[i]) and float(self.alpha[i] < self.C):
                self.b = bi
            elif 0 < float(self.alpha[j]) and float(self.alpha[j] < self.C):
                self.b = bj
            else:
                self.b = 0.5 * (bi + bj)

            return True
        else:
            return False

    def visualize(self, positive, negative):
        plt.xlabel('X1')                                                     # 横坐标
        plt.ylabel('X2')                                                     # 纵坐标
        plt.scatter(positive[:, 0], positive[:, 1],c = 'r', marker = 'o')    # 正样本红色标出
        plt.scatter(negative[:, 0], negative[:, 1],c = 'g', marker = 'o')    # 负样本绿色标出

        non_zero_alpha = self.alpha[np.nonzero(self.alpha)]
        support_vector = X[np.nonzero(self.alpha)[0]]                        # 确定支持向量

        y = np.array([self.Y]).T[np.nonzero(self.alpha)]                     # 支持向量对应的标签
        plt.scatter(support_vector[:, 0], support_vector[:, 1], s = 100, c = 'y',\
                    alpha = 0.5, marker = 'o')                               # 标出支持向量

        print('支持向量的个数', len(non_zero_alpha))
        X1 = np.arange(-50.0, 50.0, 0.1)
        X2 = np.arange(-50.0, 50.0, 0.1)

        x1, x2 = np.meshgrid(X1, X2)
        g = self.b

        for i in range(len(non_zero_alpha)):
            g += non_zero_alpha[i] * y[i] * np.exp(-0.5 * ((x1 - support_vector[i][0])**2 + \
                    (x2 - support_vector[i][1]) ** 2) / (sigma ** 2))
        plt.contour(x1, x2, g, 0, colors = 'b')
        plt.title("sigma: %f" % sigma)
        plt.show()

def sequential_minimal_optimization(X, Y, C, epsilon, max_iters):   # 序列最小优化算法
    svm_classifier = SupportVectorMachine(X, Y, C, epsilon)
    iters = 0
    iter_entire = True
    while iters < max_iters:                                        # 循环在整个样本集和非边界点上切换
        iters += 1
        if iter_entire:                                             # 循环遍历整个样本集
            alpha_pair_changes = 0
            for i in range(svm_classifier.N):
                alpha_pair_changes += svm_classifier.inner_loop(i)
            if alpha_pair_changes == 0:
                break                                               # 整个样本集上无alpha对变化时退出循环
            else:
                iter_entire = False
        else:
            alpha_pair_changes = 0                                  # 非边界点
            non_bound = findnonbound(svm_classifier.alpha, svm_classifier.C)
            for i in non_bound:
                alpha_pair_changes += svm_classifier.inner_loop(i)
            if alpha_pair_changes == 0:
                iter_entire = True
    return svm_classifier



if __name__ == "__main__":

    # 获取数据
    positive, negative, dataset = loaddataset()
    X = dataset[:, 0: 2]
    Y = dataset[:, 2]

    # 采用序列最小优化算法获取支持向量机模型
    svm_classifier = sequential_minimal_optimization(X, Y, 1, 0.001, 40)

    # 绘制分类结果超平面
    svm_classifier.visualize(positive, negative)

    # 绘制ROC曲线
