import numpy as np

# An example in that book, the training set and parameters' sizes are fixed
training_set = np.array([[[3, 3], 1], [[4, 3], 1], [[1, 1], -1]],dtype=object)


a = np.zeros(len(training_set), np.float64)
b = 0.0
Gram = None
y = np.array(training_set[:, 1])
x = np.empty((len(training_set), 2), np.float64)
for i in range(len(training_set)):
    x[i] = training_set[i][0]

def cal_gram():
    """
    calculate the Gram matrix
    :return:
    """
    g = np.empty((len(training_set), len(training_set)), np.int64)
    for i in range(len(training_set)):
        for j in range(len(training_set)):
            g[i][j] = np.dot(training_set[i][0], training_set[j][0])
    return g


def update(i):
    """
    update parameters using stochastic gradient descent
    :param i:
    :return:
    """
    global a, b
    a[i] += 1
    b = b + y[i]
    print(a,b)


# calculate the judge condition
def cal(i):
    global a, b, x, y

    res = np.dot(a * y, Gram[i])
    res = (res + b) * y[i]
    return res

# check if the hyperplane can classify the examples correctly
def check():
    global a, b, x, y
    flag = False
    for i in range(len(training_set)):
        if cal(i) <= 0:
            flag = True
            update(i)
    if not flag:
        w = np.dot(a * y, x)
        print("RESULT: w: " + str(w) + " b: " + str(b))
        return False
    return True


if __name__ == "__main__":
    Gram = cal_gram()  # initialize the Gram matrix
    for i in range(1000):
        if not check():
            break

