import numpy as np
import time


class SVM(object):
    def __init__(self):
        self.X = None
        self.y = None
        self.C = None
        self.tol = None
        self.train_size = 0
        self.alphas = None
        self.b = 0
        self.E_cache = None
        self.K = None
        self.kTup = ('linear', 0)

    def kernelTrans(self, input_vec, kTup):
        Kx = np.zeros((self.train_size,), np.float)
        if kTup[0] == 'linear':
            Kx = np.dot(self.X, input_vec)

        elif kTup[0] == 'rbf':
            deltaRow = self.X - input_vec
            Kx = np.sum(deltaRow * deltaRow, axis=1)
            Kx = np.exp(Kx / (-2.0 * kTup[1] ** 2))

        else:
            raise NameError('kernel function name is not support')
        return Kx

    def calcEk(self, k):
        fXk = np.sum(self.alphas * self.y * self.K[:, k]) + self.b
        Ek = fXk - float(self.y[k])
        return Ek

    def updateEk(self, k):
        Ek = self.calcEk(k)
        self.E_cache[k] = [1, Ek]

    def select_J(self, i, Ei):
        maxJ = -1
        maxDeldaE = 0
        Ej = 0

        self.E_cache[i] = [1, Ei]
        E_cacheList = np.nonzero(self.E_cache[:, 0])[0]
        if (len(E_cacheList)) > 1:
            for k in E_cacheList:
                if k == i:
                    continue
                Ek = self.calcEk(k)
                deltaE = np.abs(Ei - Ek)
                if (deltaE > maxDeldaE):
                    maxJ = k
                    maxDeldaE = deltaE
                    Ej = Ek
            return maxJ, Ej
        else:
            j = self.select_J_rand(i, self.train_size)
            Ej = self.calcEk(j)
            return j, Ej

    def innerL(self, i):
        Ei = self.calcEk(i)
        if ((self.y[i] * Ei < -self.tol) and (self.alphas[i] < self.C)) or \
                ((self.y[i] * Ei > self.tol) and (self.alphas[i] > 0)):
            j, Ej = self.select_J(i, Ei)
            alpha_i_old = self.alphas[i]
            alpha_j_old = self.alphas[j]
            if self.y[i] != self.y[j]:
                L = max(0, self.alphas[j] - self.alphas[i])
                H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
            else:
                L = max(0, self.alphas[j] + self.alphas[i] - self.C)
                H = min(self.C, self.alphas[j] + self.alphas[i])
            if L == H:
                # print("L==H")
                return 0
            eta = 2.0 * self.K[i, j] - self.K[i, i] - self.K[j, j]
            if eta >= 0:
                # print("eta>=0")
                return 0

            self.alphas[j] -= self.y[j] * (Ei - Ej) / eta
            self.alphas[j] = self.clipAlpha(self.alphas[j], H, L)
            self.updateEk(j)
            if abs(self.alphas[j] - alpha_j_old) < 0.00001:
                # print("j not moving enough")
                return 0
            self.alphas[i] += self.y[j] * self.y[i] * (alpha_j_old - self.alphas[j])
            self.updateEk(i)

            b1 = self.b - Ei - self.y[i] * (self.alphas[i] - alpha_i_old) * self.K[i, i] \
                 - self.y[j] * (self.alphas[j] - alpha_j_old) * self.K[j, i]

            b2 = self.b - Ej - self.y[i] * (self.alphas[i] - alpha_i_old) * self.K[i, j] \
                 - self.y[j] * (self.alphas[j] - alpha_j_old) * self.K[j, j]

            if (0 < self.alphas[i]) and (self.C > self.alphas[i]):
                self.b = b1
            elif (0 < self.alphas[j]) and (self.C > self.alphas[j]):
                self.b = b2
            else:
                self.b = (b1 + b2) / 2.0
            return 1
        else:
            return 0

    def clipAlpha(self, param, H, L):
        return max(min(H, param), L)

    def select_J_rand(self, i, train_size):
        j = i
        while j == i:
            j = int(np.random.uniform(0, train_size))
        return j

    def smo_process(self, max_iter):
        iter = 0
        entire_set = True
        alphaPairsChanged = 0
        while (iter < max_iter) and ((alphaPairsChanged > 0) or entire_set):
            alphaPairsChanged = 0
            if entire_set:
                for i in range(self.train_size):
                    alphaPairsChanged += self.innerL(i)
                print('fullSet,iter: %d, pairs changed %d' % (iter, alphaPairsChanged))
                iter += 1
            else:
                nonBound_i = np.nonzero((self.alphas > 0) * (self.alphas < self.C))[0]
                for i in nonBound_i:
                    alphaPairsChanged += self.innerL(i)
                print('non-bound,iter: %d, pairs changed %d' % (iter, alphaPairsChanged))
                iter += 1
            if entire_set:
                entire_set = False
            elif alphaPairsChanged == 0:
                entire_set = True
            print('iteration number: %d' % iter)
        return self.alphas, self.b

    def train(self, data_set, labels, C=200, kernel_func=('linear', 0), toler=0.0001, max_iter=10000):
        data_array = np.array(data_set, np.float)
        assert len(data_array.shape) == 2, 'data_set必须是维数为2的向量'
        labels_array = np.array(labels)
        assert len(labels_array.shape) == 1
        assert data_array.shape[0] == labels_array.shape[0], '样本和标注的长度必须相等'

        labels_array[labels_array > 0] = 1
        labels_array[labels_array <= 0] = -1

        print('SVM start training...')
        t1 = time.time()
        self.X = data_array
        self.y = labels_array
        self.train_size = data_array.shape[0]
        self.alphas = np.zeros((self.train_size,), np.float)
        self.E_cache = np.zeros((self.train_size, 2), np.float)

        assert C > 0, 'C must >0'
        self.C = C
        assert toler > 0, 'toler must >0'
        self.tol = toler

        self.kTup = kernel_func
        self.K = np.zeros((self.train_size, self.train_size), np.float)
        print('calculate Kernel Value')
        for i in range(self.train_size):
            self.K[:, i] = self.kernelTrans(self.X[i], self.kTup)

        t2 = time.time()
        print('data-set prepared time is %.2fs' % (t2 - t1))
        self.smo_process(max_iter)
        sv_idx = np.nonzero(self.alphas > 0)[0]
        self.X = self.X[sv_idx]
        self.y = self.y[sv_idx]
        self.alphas = self.alphas[sv_idx]
        self.train_size = self.alphas.shape[0]

        del self.E_cache
        del self.K
        self.E_cache = None
        self.K = None

        t3 = time.time()
        print('SVM training is finished,spend time is %.2f' % (t3 - t1))

    def predict(self, test_x):
        kernel_x = self.kernelTrans(test_x, self.kTup)
        pre = np.sum(self.alphas * self.y * kernel_x) + self.b
        return np.sign(pre)


if __name__ == '__main__':
    svm = SVM()
    base_num = 1000

    train_images = np.random.randint(-1000, 1000 + 1, (2 * base_num, 3))
    train_images[0:base_num] += [2000, 0, 2000]
    train_images[base_num:2 * base_num] += [-2000, 0, 2000]

    train_images = train_images.astype(np.float) / 1000

    train_labels = np.array([x // base_num for x in range(2 * base_num)])

    perm = np.random.permutation(2 * base_num)
    train_images = train_images[perm]
    train_labels = train_labels[perm]

    test_images = train_images[:base_num]
    test_labels = train_labels[:base_num]

    train_images = train_images[base_num:]
    train_labels = train_labels[base_num:]

    svm.train(train_images, train_labels)
    # svm.train(train_images, train_labels, kernel_func=('rbf', 1.0))

    print('support vectors number is:', svm.alphas.shape[0], svm.alphas)

    error_count = 0
    for vec, label in zip(test_images, test_labels):
        pred = svm.predict(vec)
        label = np.sign(np.sign(label) - 0.01)
        if np.sign(label) != np.sign(pred):
            error_count += 1
    print('the test error rate is: %.2f' % (1.0 * error_count / test_labels.shape[0]))

    print(svm.K)
