import sys

sys.path.append('..')
import iris_data
import numpy as np
from sklearn.model_selection import train_test_split


class SVM():
    """
    svm
    make some notes here
    g(x): the prediction,  g(x) = <w x> + b
    yi: the ith label
    Ei: the delta between g(x) and yi, Ei = g(x) - yi
    """

    def __init__(self, X, y, C=1.0, sigma=.3, max_iter=500):
        self.X = X
        self.y = y
        self.C = C
        self.sigma = sigma
        self.max_iter = max_iter

        self.N, self.P = X.shape
        self.k = self.kernel_function()
        self.b = 0
        # len of alpha equals to len of X
        self.alpha = [0] * self.N
        # as alpha = 0, that mean w = 0, gx = 0. so e = -yi
        self.E = [-i for i in self.y]
        self.support_vector_index = []
    
    '''
    as the data is linear separable, so we didn't need to apply the kernal function
    def kernel_function(self):
        """
        kernel function.
        apply gauss kernel here
        k(x1, x2) = exp(-(x1 - x2).T * (x1 - x2) / (2 * sigma**2))
        store it in a matrix
        """
        # Matrix of K N*N.
        k = [[0 for i in range(self.N)] for j in range(self.N)]
        # K[i][j] = k(xi, xj)
        for i in range(self.N):
            xi = self.X[i, :].T
            for j in range(self.N):
                xj = self.X[j, :].T
                # calculate (x1 - x2).T.dot(x1 - x2)
                numerator = -(xi - xj).T.dot(xi - xj)
                res = np.exp(numerator / 2 / self.sigma ** 2)
                k[i][j] = res
                k[j][i] = res
        # return kernel matrix
        return k
    
    def kernel_predict(self, x1, x2):
        """
        still need kernel while doing the prediction
        so we write it again
        :param x1:
        :param x2:
        :return: the result of kernel function
        """
        numerator = (x1 - x2).T.dot(x1 - x2)
        res = np.exp(-numerator / 2 / self.sigma)

        return res
    '''
    
    #linear kernal 
    def kernel_function(self):
        """
        linear kernel, actually this is not kernel funcion
        just the dot product
        """
        k = [[0 for i in range(self.N)] for j in range(self.N)]
        for i in range(self.N):
            xi = self.X[i, :]
            for j in range(i, self.N):
                xj = self.X[j, :]
                res = xi.T.dot(xj)
                k[i][j] = res
                k[j][i] = res
        return k
    
    def kernel_predict(self, x1, x2):
        """
        kernal funtion. prediction job needs it
        """
        return x1.reshape((1, self.P)).dot(x2.reshape((self.P, 1)))

    def gxi(self, i):
        """
        actually it's not the prediction, but i call it prediction.
        gxi = <w x_i> + b
        which w = sum(alpha_j * y_j * x_j)
        """
        # if alpha = 0, alpha * yi & xi = 0. so we just need to get
        # alpha which not equals to 0
        index = [_ for _, alpha in enumerate(self.alpha) if alpha != 0]
        gxi = 0
        # <w*x>
        for j in index:
            gxi += self.alpha[j] * self.y[j] * self.k[i][j]

        gxi += self.b
        return gxi

    def KKT(self, i):
        """
        check ith a is satisfied KKT condition or not
        """
        yi = self.y[i]
        gxi = self.gxi(i)
        ai = self.alpha[i]
        if ai == 0 and yi * gxi >= 1:
            return True
        elif ai < self.C and yi * gxi == 1:
            return True
        elif ai == self.C and yi * gxi <= 1:
            return True

        return False
    
    def get_E2(self, i):
        """
        return the index which have max distance
        """
        distance = -1
        a2_index = -1 
        for index, e in enumerate(self.E):
            if abs(e - self.E[i]) > distance:
                distance = abs(e - self.E[i])
                a2_index = index
        
        return a2_index

    def SMO(self):
        step = 0
        change = 1
        #if step bigger than max_step or their is no change, stop the iteration
        while step < self.max_iter and change > 0:
            step += 1
            change = 0
            ###step 1, find value which not satisfied KKT
            for i in range(self.N):
                if self.KKT(i) == False:
                    E1 = self.E[i]
                    ###step2, get E2 by E1's value. quick way, 
                    # but not guaranteed to be the farthest E2
                    a2_index = self.get_E2(i)
                    '''
                    if E1 > 0:
                        a2_index = np.argmin(self.E)
                    else:
                        a2_index = np.argmax(self.E)
                    '''
                    # get E2
                    E2 = self.E[a2_index]

                    # Replace with the symbols in our formula
                    # it would be more easy to read and understand the logic
                    a1 = self.alpha[i]
                    a2 = self.alpha[a2_index]
                    y1 = self.y[i]
                    y2 = self.y[a2_index]

                    # calculated the upper and lower boundary
                    if y1 == y2:
                        upper = min(self.C, a1 + a2)
                        lower = max(0, a1 + a2 - self.C)
                    else:
                        upper = min(self.C, self.C + a2 - a1)
                        lower = max(0, a2 - a1)
                    ###step3, updated a2's value
                    # get eta from kernel matrix
                    k11 = self.k[i][i]
                    k12 = self.k[i][a2_index]
                    k22 = self.k[a2_index][a2_index]
                    # calculated a2_new's value
                    a2_new = a2 + y2 * (E1 - E2) / (k11 + k22 - 2 * k12)
                    # calculated a1 a2
                    if a2_new > upper:
                        a2_new = upper
                    if a2_new < lower:
                        a2_new = lower
                    a1_new = a1 + y1 * y2 * (a2 - a2_new)
                    # update a1 a2
                    self.alpha[i] = a1_new
                    self.alpha[a2_index] = a2_new

                    ### step4, update b, E
                    # calculated b1 b2
                    b1_new = -E1 - y1 * k11 * (a1_new - a1) - y2 * k12 * (a2_new - a2) + self.b
                    b2_new = -E2 - y1 * k12 * (a1_new - a1) - y2 * k22 * (a2_new - a2) + self.b
                    # if both a1, a2 >0 and < c, b1 = b2
                    # if not that we use the average to substitute
                    self.b = (b1_new + b2_new) / 2
                    # update E
                    self.E[i] = self.gxi(i) - self.y[i]
                    self.E[a2_index] = self.gxi(a2_index) - y[a2_index]

                    # if a2 didn't change much.
                    # we believe the model has converged, exclude the iterator
                    if abs(a2_new - a2) >= 1e-6:
                        change += 1 
        # as we use kernel function, we need to keep the index of support vector
        for i in range(self.N):
            if self.alpha[i] > 0:
                self.support_vector_index.append(i)



    def predict(self, X):
        """
        predict
        :param X:
        :return: the class
        """
        res = 0
        for i in self.support_vector_index:
            kernel = self.kernel_predict(X, self.X[i, :])
            res += self.alpha[i] * self.y[i] * kernel
        res += self.b

        return np.sign(res)


if __name__ == '__main__':
    X, y = iris_data.get_data()
    # caused the label of iris data is 0 and 1. convert to -1 and 1
    y = np.where(y == 0, -1, y)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=10)
    obj = SVM(X_train, y_train)
    obj.SMO()
    
    pre = []
    error = 0
    for i in range(len(X_test)):
        y_pre = obj.predict(X_test[i,:])
        pre.append(y_pre)
        if y_pre != y_test[i]:
            error += 1
    
    print (error / len(y_test))
        
    pre = np.array(pre)