#!/usr/bin/env python3

__author__ = 'z.Wick.Tone.Burst'
__doc__ = """ Support Vector Machine of ML."""


import random
import numpy as np
import zt.ML.linear as mline
import zt.ML.utils as mltls

from scipy.optimize import minimize



class SVM(object):
    def predict(self, x):
        return self.W.dot(x)



class LinearSVM(SVM):
    """ Linear Hard-Margin Support Vector Machine.
        - Q = [0, 0d, 0d, Id], P = 0[d+1], an = y[1=x0 x], c = 1
        - [b=w0 w] <- QP(Q, P, A, c)
        - return [b, w] as g_svm
    """
    def __init__(self, D):
        W = mline.LinearRegression(D).W
        nd = W.size - 1
        x, y = D.X, D.Y

        # only Q, cauz P is 0
        Q = np.zeros((nd + 1, nd + 1))
        I = np.ones((nd, nd))
        Q[1:, 1:] = I
        # the main function
        fn = lambda u: .5 * u.dot(Q).dot(u) # + P.dot(u)
        cons = {
            # the constraint function is non-nagetive
            'type': 'ineq',
            # A
            'fun' : lambda u, x, y: np.sum(y * x.dot(u)) - 1,
            # other args for the constraint function
            'args': (x, y),
        }
        # The QP function(Quadratic Programming) with a default torlence rho
        self.W = minimize(fn, W, method='SLSQP', constraints=cons).x.round(4)



class DualSVM(SVM):
    def __init__(self, D):
        W = mline.LinearRegression(D).W
        nd = W.size - 1
        sz = D.size
        x, y = D.pure

        # Q = np.empty((sz, sz))
        # for i in range(sz):
        #     for j in range(sz):
        #         Q[i][j] = y[i] * y[j] * x[i].dot(x[j])
        _Q = np.conjugate([ y[i] * x[i] for i in range(sz) ])
        Q = _Q.dot(_Q.T)
        P = -np.ones(sz)
        a = np.ones(sz)
        fn = lambda a: .5 * a.dot(Q).dot(a) + P.dot(a)
        cons = {
            'type': 'eq',
            'fun': lambda a, y: y.dot(a),
            'args': (y,),
        }
        alpha = minimize(fn, a,
                         bounds=[(0, None)] * sz,
                         method='SLSQP',
                         constraints=cons).x
        print("Alpha =", alpha.round(4))
        print("Sum of Alpha =", np.sum(alpha))
        w = (alpha * y).dot(x)
        while True:
            i = random.randint(0, sz - 1)
            if alpha[i] > 0:
                b = y[i] - w.dot(x[i])
                break
        self.W = np.append(np.array(b), w).round(4)



# class KernelSVM(SVM):
#     def __init__(self, kernel, D):
#         W = mline.LinearRegression(D).W
#         nd = W.size - 1
#         sz = D.size
#         x, y = Dx.pure

#         a = np.ones(sz)
#         Q = np.empty((sz, sz))
#         for i in range(sz):
#             for j in range(sz):
#                 Q[i][j] = y[i] * y[j] * kernel(x[i], x[j])
#         P = -np.ones(sz)
#         fn = lambda a: .5 * a.dot(Q).dot(a) + P.dot(a)
#         cons = {
#             'type': 'eq',
#             'fun': lambda a, y: y.dot(a),
#             'args': (y,),
#         }
#         alpha = minimize(fn, a,
#                          bounds=[(0, None)] * sz,
#                          method='SLSQP',
#                          constraints=cons,).x
#         print("Alpha =", alpha.round(4))
#         print("Sum of Alpha =", np.sum(alpha))
#         w = (alpha * y).dot(x)
#         while True:
#             s = random.randint(0, sz - 1)
#             if alpha[s] > 1e-9:
#                 k = np.array([ kernel(x[s], x[i]) for i in range(sz) ])
#                 b = y[s] - (alpha * y).dot(k)
#                 break
#         self.W = np.append(np.array(b), w).round(4)




class KernelSVM(SVM):
    def __init__(self, kernel, D, C=None):
        self.kernel = kernel
        W = mline.LinearRegression(D).W
        nd = W.size - 1
        self.sz = D.size
        self.X, self.Y = D.pure
        print("> Data Loaded.")
        self.alpha = np.zeros(self.sz)
        ctr = 0
        while ctr < self.sz:
            x, y = self.X[ctr:ctr+500], self.Y[ctr:ctr+500]
            sz = len(y)

            alpha = np.ones(sz)
            Q = np.empty((sz, sz))
            for i in range(sz):
                for j in range(sz):
                    Q[i][j] = y[i] * y[j] * kernel(x[i], x[j])
            # print("> Q matrix mapped.")
            P = -np.ones(sz)
            fn = lambda a: .5 * alpha.dot(Q).dot(alpha) + P.dot(alpha)
            cons = {
                'type': 'eq',
                'fun': lambda alpha, y: y.dot(alpha),
                'args': (y,),
            }
            # print("> Solving Quadratic Programming ...")
            Alpha = minimize(fn, alpha, bounds=[(0, C)] * sz,
                             method='SLSQP', constraints=cons,).x
            # print(Alpha)
            # np.append(self.alpha, Alpha.copy())
            self.alpha[ctr:ctr+500] = Alpha.copy()
            self.b = 0
            while True:
                s = random.randint(0, sz - 1)
                if Alpha[s] > 1e-9:
                    k = np.array([ kernel(x[s], x[i]) for i in range(sz) ])
                    self.b += (y[s] - (Alpha * y).dot(k)) * sz
                    break
            print(self.b, sz)
            ctr += 500
        self.b /= self.sz
        print(self.alpha, self.b)


    def predict(self, x):
        K = np.array([ self.kernel(self.X[i], x) for i in range(self.X.shape[0]) ])
        p = (self.alpha * self.Y).dot(K) + self.b
        return p







if __name__ == '__main__':
    def test_linear():
        D = mltls.MLData('./data/pla_train.txt')
        svm = LinearSVM(D)
        print(svm.W)

    # homework
    X = np.array([[1, 1, 0],
                  [1, 0, 1],
                  [1, 0, -1],
                  [1, -1, 0],
                  [1, 0, 2],
                  [1, 0, -2],
                  [1, -2, 0]])
    Y = np.array([-1, -1, -1, 1, 1, 1, 1])
    Z = np.array([[1, 1, -2],
                  [1, 4, -5],
                  [1, 4, -1],
                  [1, 5, -2],
                  [1, 7, -7],
                  [1, 7,  1],
                  [1, 7,  1]])
    Dx = mltls.MLData()
    mltls.MLData.initialize(Dx, X, Y)
    Dz = mltls.MLData()
    mltls.MLData.initialize(Dz, Z, Y)

    def question2():
        D = Dz
        def Phi(x1, x2):
            return x2**2-2*x1+3, x1**2-2*x2-3
        svm = LinearSVM(D)
        print("2:", svm.W)

    def question3():
        D = Dx
        def kernel(x1, x2):
            return (1 + x1.dot(x2)) ** 2
        svm = KernelSVM(kernel, D)
        # print("3:", svm.W)
        svm.predict(D.X[0][1:])

    def question4():
        D = Dz
        svm = DualSVM(D)
        print("4:", svm.W)

    def question15():
        def load(path):
            D = mltls.MLData()
            _X = []; _Y = []
            with open(path, 'r') as f:
                lines = f.readlines()
            for line in lines:
                l = line.split()
                # x, y = [1, ] + list(map(float, l[:-1])), int(l[-1])
                x = [1,] + list(map(float, l[1:]))
                y = 1 if float(l[0]) == 0 else -1
                _X.append(x)
                _Y.append(y)
            mltls.MLData.initialize(D, _X, _Y)
            return D
        def sign(x):
            return int(x / abs(x))
        def error(A, T):
            err = 0
            for i in range(T.size):
                x, y = T.choice(i)
                if sign(A.predict(x)) != y:
                    err += 1
            return err / T.size
        D = load("./data/features.train.txt")
        T = load("./data/features.test.txt")
        def kernel(x1, x2):
            return (1 + x1.dot(x2)) ** 2
            # return x1.dot(x2)
            # return np.exp(-10000 * (x1 - x2).dot(x1 - x2))
        svm = KernelSVM(kernel, D, 0.1)
        # print(error(svm, T))
        print(svm.predict(D.X[0][1:]))

    question15()
