import numpy as np
from sklearn.svm import SVC
import os


def load_txt(file):
    with open(file, 'r') as f:
        data = f.readlines()

    data_arr = []
    label_arr = []
    for line in data:
        arr = line[: -1].split('\t')
        arr = [float(num) for num in arr]

        data_arr.append(arr[:-1])
        label_arr.append(int(arr[-1]))

    return np.array(data_arr), np.array(label_arr)


class SMO(object):
    def __init__(self, train_data, train_label, c, toler, max_iter, ktup=None):
        """
        :param train_data:
        :param train_label:
        :param c: 常数（alpha的约束）
        :param toler: 容错率
        :param max_iter: 最大循环次数
        """
        if ktup:
            self.ktup = ktup
        else:
            self.ktup = ('rbf', 1)
        self.train_data = train_data
        self.train_label = train_label
        self.c = c
        self.toler = toler
        self.max_iter = max_iter

        self.m, self.n = train_data.shape
        self.e_cache = np.zeros((self.m, 2))

        self.alphas = np.array([0.0001 for _ in range(self.m)])
        self.b = 0

        self.k = np.zeros((self.m, self.m))
        for i in range(self.m):
            self.k[i] = self.kernel_trans(train_data, train_data[i], ktup)

    @staticmethod
    def kernel_trans(train_data, A, ktup):
        """
        径向基核函数:
        k(x, y) = exp((||x - y|| ^ 2) / (-2 * sigmal ^ 2))
        :param A:
        :return:
        """
        m, n = train_data.shape
        k = np.zeros(m)
        if ktup[0] == 'rbf':
            for j in range(m):
                deta_row = train_data[j] - A
                k[j] = np.dot(deta_row, deta_row.reshape(len(deta_row), 1))
            k = np.exp(k / (-1 * ktup[1] ** 2))
        else:
            raise NameError('ktup参数输入错误：第一个为很函数类型的字符串，其他参数为核函数的可选参数')
        return k

    def select_j_rand(self, i):
        """
        获取随机数
        :param i:第一个alpha下标
        :param m:所有alpha数目
        :return:
        """
        j = i
        while j == i:
            j = int(np.random.uniform(0, self.m))
        return j

    def select_j(self, i, ei):
        max_k = -1
        max_delta_e = 0
        ej = 0
        self.e_cache[i] = [1, ei]
        # 非零E值的行的list列表，所对应的alpha值
        valid_e_cache_list = np.nonzero(self.e_cache[:, 0])[0]
        if (len(valid_e_cache_list)) > 1:
            for k in valid_e_cache_list: # 在所有的值上进行循环，并选择其中使得改变最大的那个值
                if k == i:
                    continue

                ek = self.calc_ek(k)
                deta_e = abs(ei - ek)
                if deta_e > max_delta_e:
                    max_k = k
                    max_delta_e = deta_e
                    ej = ek
            return max_k, ej
        else:
            j = self.select_j_rand(i)
            ej = self.calc_ek(j)
            return j, ej

    @staticmethod
    def clip_alpha(aj, h, l):
        """
        在约束范围内调整alpha值
        :param aj:
        :param h:
        :param l:
        :return:
        """
        aj = h if aj > h else aj
        aj = l if aj < l else aj
        return aj

    def calc_ek_kernel(self, k):
        """
        计算误差
        :param k:
        :return:
        """
        mid_weight = np.multiply(self.alphas, self.train_label)
        mid_x = self.k[:, k]

        fxi = np.dot(mid_weight, mid_x) + self.b
        if isinstance(fxi, np.ndarray):
            fxi = fxi[0]
        return fxi - self.train_label[k]

    def calc_ek(self, k):
        """
        计算误差
        :param k:
        :return:
        """
        mid_weight = np.multiply(self.alphas, self.train_label)
        mid_x = np.dot(self.train_data, self.train_data[k].reshape(len(self.train_data[k]), 1))

        fxi = np.dot(mid_weight, mid_x) + self.b
        if isinstance(fxi, np.ndarray):
            fxi = fxi[0]
        return fxi - self.train_label[k]

    def update_ek(self, k):
        """
        更新误差值至缓存
        :param k:
        :return:
        """
        ek = self.calc_ek(k)
        self.e_cache[k] = [1, ek]

    def simple_smo(self):
        """
        简化版SMO算法
        SMO：Sequential Minimal Optimization（序列最小优化）

        算法原理：
        argmax{min(label * (wx + b)) * (1 / ||w||)}

        拉格朗日乘子法带入：
        a 和 label均为向量
        max[sum(a) - 0.5 * sum(label(i) * label(j) * a(i) * a(j) * <x(i), x(j)>)]

        约束条件：
        0 <= a <= c and sum(a(i) * label(i)) = 0

        * 分类超平面（函数间距）：y = w*x + b
        * 分类结果：f(x) = sign(w*x + b)（说明：sign标识>0 为1，<0为-1， =0为0）
        * 点到超平面的几何距离：f(x) = (w*x + b) / ||w||（其中||w||标识矩阵二范数）
        :return:
        """
        # alphas = np.array([0.0001 for _ in range(self.m)])
        # b = 0
        # alphas = np.ones((m, 1))
        iter_times = 0
        while iter_times < self.max_iter:
            alpha_pairs_changed = 0
            for i in range(self.m):
                # 讲alphas 按 train_label 来 乘积
                # mid_weight = np.multiply(alphas, train_label.reshape(len(train_label), 1)).T
                mid_weight = np.multiply(self.alphas, self.train_label)
                mid_x = np.dot(self.train_data, self.train_data[i].reshape(len(self.train_data[i]), 1))

                fxi = np.dot(mid_weight, mid_x) + self.b
                if isinstance(fxi, np.ndarray):
                    fxi = fxi[0]
                ei = fxi - self.train_label[i]
                """
                # 检验训练样本(xi, yi)是否满足KKT条件
                yi*f(i) >= 1 and alpha = 0 (outside the boundary)
                yi*f(i) == 1 and 0<alpha< C (on the boundary)
                yi*f(i) <= 1 and alpha = C (between the boundary)
                """
                # 当预测的误差值ei过大，对alpha和b进行优化更新
                if ((ei * self.train_label[i] < -self.toler) and (self.alphas[i] < self.c)) or \
                        ((ei * self.train_label[i] > self.toler) and (self.alphas[i] > 0)):
                    # 随机选择第二个alpha
                    j = self.select_j_rand(i)
                    mid_x = np.dot(self.train_data, self.train_data[j].reshape(len(self.train_data[j]), 1))
                    fxj = float(np.dot(mid_weight, mid_x)) + self.b
                    if isinstance(fxj, np.ndarray):
                        fxj = fxj[0]
                    ej = fxj - self.train_label[j]

                    alpha_i_old = self.alphas[i].copy()
                    alpha_j_old = self.alphas[j].copy()

                    # 约束条件：保证alpa[j]取值在[0, c]
                    if self.train_label[i] != self.train_label[i]:
                        l = max(0, self.alphas[j] - self.alphas[i])
                        h = min(self.c, self.c + self.alphas[j] - self.alphas[i])
                    else:
                        l = max(0, self.alphas[j] + self.alphas[i] - self.c)
                        h = min(self.c, self.alphas[j] + self.alphas[i])

                    if l == h:
                        print('l == h')
                        continue

                    # alpha[j]的最优修改值
                    eta = 2.0 * np.dot(self.train_data[i], self.train_data[j].reshape(len(self.train_data[j]), 1)) - \
                          np.dot(self.train_data[i], self.train_data[i].reshape(len(self.train_data[i]), 1)) - \
                          np.dot(self.train_data[j], self.train_data[j].reshape(len(self.train_data[j]), 1))
                    if isinstance(eta, np.ndarray):
                        eta = eta[0]

                    if eta >= 0:
                        print('eta >= 0')
                        continue

                    self.alphas[j] -= self.train_label[j] * (ei - ej) / eta
                    # 在约束范围内调整alpha[j]值
                    self.alphas[j] = self.clip_alpha(self.alphas[j], h, l)

                    if abs(self.alphas[j] - alpha_j_old) < 0.00001:
                        print('j not moving enough')
                        continue

                    self.alphas[i] += self.train_label[j] * self.train_label[i] * (alpha_j_old - self.alphas[j])

                    mid_weight_i = self.train_label[i] * (self.alphas[i] - alpha_i_old)
                    mid_weight_j = self.train_label[j] * (self.alphas[j] - alpha_j_old)
                    mid_x_i_i = np.dot(self.train_data[i], self.train_data[i].reshape(len(self.train_data[i]), 1))
                    mid_x_i_j = np.dot(self.train_data[j], self.train_data[i].reshape(len(self.train_data[i]), 1))
                    mid_x_j_j = np.dot(self.train_data[j], self.train_data[j].reshape(len(self.train_data[j]), 1))
                    if isinstance(mid_x_i_i, np.ndarray):
                        mid_x_i_i = mid_x_i_i[0]
                    if isinstance(mid_x_i_j, np.ndarray):
                        mid_x_i_j = mid_x_i_j[0]
                    if isinstance(mid_x_j_j, np.ndarray):
                        mid_x_j_j = mid_x_j_j[0]

                    b1 = self.b - ei - mid_weight_i * mid_x_i_i - mid_weight_j * mid_x_i_j
                    b2 = self.b - ej - mid_weight_i * mid_x_i_j - mid_weight_j * mid_x_j_j

                    if 0 < self.alphas[i] < self.c:
                        self.b = b1
                    elif 0 < self.alphas[j] < self.c:
                        self.b = b2
                    else:
                        self.b = (b1 + b2) / 2

                    alpha_pairs_changed += 1
                    print('i: {}, pairs changed {}'.format(i, alpha_pairs_changed))

            # 一直迭代到alpha不再需要优化为止
            if alpha_pairs_changed == 0:
                iter_times += 1
            else:
                iter_times = 0

            print('iter_times： {}'.format(iter_times))

    def signal_platt_smo_kernel(self, i):
        """
        platt smo 单次优化
        :param i:
        :return:
        """
        ei = self.calc_ek_kernel(i)

        if ((self.train_label[i] * ei < -self.toler) and (self.alphas[i] < self.c)) or \
                ((self.train_label[i] * ei > self.toler) and (self.alphas[i] > 0)):
            j, ej = self.select_j(i, ei)
            alpha_i_old = self.alphas[i].copy()
            alpha_j_old = self.alphas[j].copy()

            if self.train_label[i] != self.train_label[j]:
                l = max(0, self.alphas[j] - self.alphas[i])
                h = min(self.c, self.c + self.alphas[j] - self.alphas[i])
            else:
                l = max(0, self.alphas[j] + self.alphas[i] - self.c)
                h = min(self.c, self.alphas[j] + self.alphas[i])

            if l == h:
                print('l == h')
                return 0

            # alpha[j]的最优修改值
            eta = 2.0 * self.k[i, j] - self.k[i, i] - self.k[j, j]
            if isinstance(eta, np.ndarray):
                eta = eta[0]

            if eta >= 0:
                print('eta >= 0')
                return 0

            self.alphas[j] -= self.train_label[j] * (ei - ej) / eta
            self.alphas[j] = self.clip_alpha(self.alphas[j], h, l)
            self.update_ek(j)

            if abs(self.alphas[j] - alpha_j_old) < 0.00001:
                print('j not moving enough')
                return 0
            self.alphas[i] += self.train_label[j] * self.train_label[i] * (alpha_j_old - self.alphas[j])
            self.update_ek(i)

            mid_weight_i = self.train_label[i] * (self.alphas[i] - alpha_i_old)
            mid_weight_j = self.train_label[j] * (self.alphas[j] - alpha_j_old)
            mid_x_i_i = self.k[i, i]
            mid_x_i_j = self.k[i, j]
            mid_x_j_j = self.k[j, j]
            if isinstance(mid_x_i_i, np.ndarray):
                mid_x_i_i = mid_x_i_i[0]
            if isinstance(mid_x_i_j, np.ndarray):
                mid_x_i_j = mid_x_i_j[0]
            if isinstance(mid_x_j_j, np.ndarray):
                mid_x_j_j = mid_x_j_j[0]

            b1 = self.b - ei - mid_weight_i * mid_x_i_i - mid_weight_j * mid_x_i_j
            b2 = self.b - ej - mid_weight_i * mid_x_i_j - mid_weight_j * mid_x_j_j

            if 0 < self.alphas[i] < self.c:
                self.b = b1
            elif 0 < self.alphas[j] < self.c:
                self.b = b2
            else:
                self.b = (b1 + b2) / 2

            return 1
        return 0

    def signal_platt_smo(self, i):
        """
        platt smo 单次优化
        :param i:
        :return:
        """
        ei = self.calc_ek(i)

        if ((self.train_label[i] * ei < -self.toler) and (self.alphas[i] < self.c)) or \
                ((self.train_label[i] * ei > self.toler) and (self.alphas[i] > 0)):
            j, ej = self.select_j(i, ei)
            alpha_i_old = self.alphas[i].copy()
            alpha_j_old = self.alphas[j].copy()

            if self.train_label[i] != self.train_label[j]:
                l = max(0, self.alphas[j] - self.alphas[i])
                h = min(self.c, self.c + self.alphas[j] - self.alphas[i])
            else:
                l = max(0, self.alphas[j] + self.alphas[i] - self.c)
                h = min(self.c, self.alphas[j] + self.alphas[i])

            if l == h:
                print('l == h')
                return 0

            # alpha[j]的最优修改值
            eta = 2.0 * np.dot(self.train_data[i], self.train_data[j].reshape(len(self.train_data[j]), 1)) - \
                  np.dot(self.train_data[i], self.train_data[i].reshape(len(self.train_data[i]), 1)) - \
                  np.dot(self.train_data[j], self.train_data[j].reshape(len(self.train_data[j]), 1))
            if isinstance(eta, np.ndarray):
                eta = eta[0]

            if eta >= 0:
                print('eta >= 0')
                return 0

            self.alphas[j] -= self.train_label[j] * (ei - ej) / eta
            self.alphas[j] = self.clip_alpha(self.alphas[j], h, l)
            self.update_ek(j)

            if abs(self.alphas[j] - alpha_j_old) < 0.00001:
                print('j not moving enough')
                return 0
            self.alphas[i] += self.train_label[j] * self.train_label[i] * (alpha_j_old - self.alphas[j])
            self.update_ek(i)

            mid_weight_i = self.train_label[i] * (self.alphas[i] - alpha_i_old)
            mid_weight_j = self.train_label[j] * (self.alphas[j] - alpha_j_old)
            mid_x_i_i = np.dot(self.train_data[i], self.train_data[i].reshape(len(self.train_data[i]), 1))
            mid_x_i_j = np.dot(self.train_data[j], self.train_data[i].reshape(len(self.train_data[i]), 1))
            mid_x_j_j = np.dot(self.train_data[j], self.train_data[j].reshape(len(self.train_data[j]), 1))
            if isinstance(mid_x_i_i, np.ndarray):
                mid_x_i_i = mid_x_i_i[0]
            if isinstance(mid_x_i_j, np.ndarray):
                mid_x_i_j = mid_x_i_j[0]
            if isinstance(mid_x_j_j, np.ndarray):
                mid_x_j_j = mid_x_j_j[0]

            b1 = self.b - ei - mid_weight_i * mid_x_i_i - mid_weight_j * mid_x_i_j
            b2 = self.b - ej - mid_weight_i * mid_x_i_j - mid_weight_j * mid_x_j_j

            if 0 < self.alphas[i] < self.c:
                self.b = b1
            elif 0 < self.alphas[j] < self.c:
                self.b = b2
            else:
                self.b = (b1 + b2) / 2

            return 1
        return 0

    def platt_smo(self):
        """
        完整版 Platt SMO 算法
        :param ktup:
        :return:
        """
        iter_time = 0
        entire_set = True
        alpha_pairs_changed = 0
        while (iter_time < self.max_iter) and (alpha_pairs_changed > 0 or entire_set):
            alpha_pairs_changed = 0
            if entire_set:
                for i in range(self.m):
                    alpha_pairs_changed += self.signal_platt_smo(i)
                    print('full set, iter: {}, i: {}, pairs changed: {}'.format(iter_time, i, alpha_pairs_changed))
                iter_time += 1
            else:
                non_bound_is = np.nonzero((self.alphas > 0) * (self.alphas < self.c))[0]
                for i in non_bound_is:
                    alpha_pairs_changed += self.signal_platt_smo(i)
                    print('non-bound, iter: {}, i: {}, pairs changed: {}'.format(iter_time, i, alpha_pairs_changed))
                iter_time += 1

            if entire_set:
                entire_set = False
            elif alpha_pairs_changed == 0:
                entire_set = True
            print('iteration number: {}'.format(iter_time))

    def platt_smo_kernel(self):
        """
        完整版 Platt SMO 算法
        :param ktup:
        :return:
        """
        iter_time = 0
        entire_set = True
        alpha_pairs_changed = 0
        while (iter_time < self.max_iter) and (alpha_pairs_changed > 0 or entire_set):
            alpha_pairs_changed = 0
            if entire_set:
                for i in range(self.m):
                    alpha_pairs_changed += self.signal_platt_smo_kernel(i)
                    print('full set, iter: {}, i: {}, pairs changed: {}'.format(iter_time, i, alpha_pairs_changed))
                iter_time += 1
            else:
                non_bound_is = np.nonzero((self.alphas > 0) * (self.alphas < self.c))[0]
                for i in non_bound_is:
                    alpha_pairs_changed += self.signal_platt_smo_kernel(i)
                    print('non-bound, iter: {}, i: {}, pairs changed: {}'.format(iter_time, i, alpha_pairs_changed))
                iter_time += 1

            if entire_set:
                entire_set = False
            elif alpha_pairs_changed == 0:
                entire_set = True
            print('iteration number: {}'.format(iter_time))

    def calc_weight(self):
        """
        计算分割超平面的权重w
        :return:
        """
        x = np.mat(self.train_data)
        train_label = np.mat(self.train_label).transpose()
        w = np.zeros((self.n, 1))
        for i in range(self.m):
            w += np.multiply(self.alphas[i] * train_label[i], x[i, :].T)
        return w

    def predict(self, test_data):
        """
        predict
        :param test_data:
        :return:
        """
        weight = self.calc_weight()
        pre = np.mat(test_data) * np.mat(weight) + self.b
        return 1 if pre > 0 else -1


def run():
    file = './data/B/chp6/testSet.txt'
    data_arr, label_arr = load_txt(file)

    np.random.seed(10)
    smo = SMO(train_data=data_arr, train_label=label_arr, c=0.6, toler=0.001, max_iter=40)
    # smo.simple_smo()
    smo.platt_smo()

    print('b: {}'.format(smo.b))
    print('有效alpha：{}'.format(smo.alphas[smo.alphas > 0.001]))
    print('weight: {}'.format(smo.calc_weight()))

    support_vector = []
    for num in range(len(smo.train_data)):
        if smo.alphas[num] > 0.001:
            mid_arr = list(smo.train_data[num])
            mid_arr.append(smo.train_label[num])
            support_vector.append(mid_arr)
    print('支持向量/最小间隔的数据点： {}'.format(support_vector))

    # predict
    pre_label = smo.predict(test_data=data_arr[0])
    print(pre_label)
    print(label_arr[0])


def test_rbf(k1=1.3):
    file = './data/chp6/testSetRBF.txt'
    data_arr, label_arr = load_txt(file)

    test_file = './data/chp6/testSetRBF2.txt'
    test_data_arr, test_label_arr = load_txt(test_file)

    # np.random.seed(10)
    smo = SMO(train_data=data_arr, train_label=label_arr, c=200, toler=0.0001, max_iter=10000, ktup=('rbf', k1))
    smo.platt_smo_kernel()

    # 构建支持向量矩阵
    sv_ind = np.nonzero(smo.alphas > 0)[0]
    svs = data_arr[sv_ind]
    label_sv = label_arr[sv_ind]

    print('支持向量数：{}'.format(svs.shape[0]))

    # 训练
    error_count = 0
    for i in range(smo.m):
        kernel_value = smo.kernel_trans(svs, data_arr[i], ('rbf', k1))
        weight = np.multiply(label_sv, smo.alphas[sv_ind])
        predict = np.dot(kernel_value, weight.reshape(len(weight), 1)) + smo.b
        predict = predict[0]
        if np.sign(predict) != np.sign(label_arr[i]):
            error_count += 1

    print('训练误差率：{}'.format(error_count / smo.m))

    # 预测
    error_count = 0
    m, n = test_data_arr.shape
    for i in range(m):
        kernel_value = smo.kernel_trans(svs, test_data_arr[i], ('rbf', k1))
        weight = np.multiply(label_sv, smo.alphas[sv_ind])
        predict = np.dot(kernel_value, weight.reshape(len(weight), 1)) + smo.b
        predict = predict[0]
        if np.sign(predict) != np.sign(test_label_arr[i]):
            error_count += 1
    print('预测误差率：{}'.format(error_count / m))

    # sklearn
    clf = SVC(C=200, tol=0.0001, max_iter=10000)
    clf.fit(data_arr, label_arr)
    predict = clf.predict(test_data_arr)
    # 计算错误率
    error_arr = np.ones(len(predict))
    error_count = error_arr[predict != test_label_arr].sum()
    print('sklearn预测误差率：{}'.format(error_count / len(test_label_arr)))


def svm_write_num(ktup=('rbf', 10)):
    """
    手写数字识别
    :return:
    """
    def img2vector(file):
        with open(file, 'r') as f:
            data = f.readlines()

        arr = [list(info[: -1]) for info in data]
        return [int(x) for y in arr for x in y]

    def load_datas(file_path):
        """
        :param file_path:
        :return:
        """
        files = os.listdir(file_path)
        data = []
        labels = []
        for num, file_name in enumerate(files):
            class_num_split = file_name.split('.')[0]
            class_num = int(class_num_split.split('_')[0])
            file = os.path.join(file_path, file_name)
            if class_num == 9:
                labels.append(-1)
            else:
                labels.append(1)
            file_info = img2vector(file)
            data.append(file_info)

        return np.array(data), np.array(labels)

    train_file_path = './data/chp6/trainingDigits'
    test_file_path = './data/chp6/testDigits'
    train_data, train_label = load_datas(train_file_path)
    test_data, test_label = load_datas(test_file_path)

    # sklearn
    clf = SVC(C=200, tol=0.0001, max_iter=10000)
    clf.fit(train_data, train_label)
    predict = clf.predict(test_data)
    # 计算错误率
    error_arr = np.ones(len(predict))
    error_count = error_arr[predict != test_label].sum()
    print('sklearn预测误差率：{}'.format(error_count / len(test_label)))

    # code
    smo = SMO(train_data=train_data, train_label=train_label, c=200, toler=0.0001, max_iter=10000, ktup=ktup)
    smo.platt_smo_kernel()

    sv_ind = np.nonzero(smo.alphas > 0)[0]
    svs = train_data[sv_ind]
    label_sv = train_label[sv_ind]
    print('支持向量数：{}'.format(svs.shape[0]))

    train_error_cnt = 0
    for i, data in enumerate(train_data):
        kernel_eval = smo.kernel_trans(svs, data, ktup)
        weight = np.multiply(label_sv, smo.alphas[sv_ind])
        predict = np.dot(kernel_eval, weight.reshape(len(weight), 1)) + smo.b
        predict = predict[0]
        if np.sign(predict) != np.sign(train_label[i]):
            train_error_cnt += 1

    print('训练错误率为： {}'.format(train_error_cnt / len(train_data)))

    test_error_cnt = 0
    for i, data in enumerate(test_data):
        kernel_eval = smo.kernel_trans(svs, data, ktup)
        weight = np.multiply(label_sv, smo.alphas[sv_ind])
        predict = np.dot(kernel_eval, weight.reshape(len(weight), 1)) + smo.b
        predict = predict[0]
        if np.sign(predict) != np.sign(test_label[i]):
            test_error_cnt += 1

    print('预测错误率为： {}'.format(test_error_cnt / len(test_data)))


if __name__ == '__main__':
    # run()
    test_rbf()
    # svm_write_num(ktup=('rbf', 100))
