import sys
sys.path.append("..")
import iris_data
import numpy as np
import random
from sklearn.model_selection import train_test_split


def percetron(X, y, a=0.003, max_iter=500):
    """
    input :
        X : Training set
        y : Training label
        a : Learning rate
    return:
        w : normal vector
        b : intercept
    """
    N, P = X.shape
    # initial w & b
    w = np.ones((P, 1))
    b = 0

    # vectorization calculation
    y_pre = np.sign(X.dot(w) + b)
    # error rate
    error_rate = np.sum(y_pre != y) / N

    step = 0
    while error_rate > 0.001 and step < max_iter:
        step += 1
        # get the index of error data points
        index = random.choice(np.argwhere((y_pre != y) == 1)[:,0])
        #here np.argwhere return array [error_index, a.ndim] 
        #we just need index here so

        # gradient decent
        w += a * X[index].reshape((P, 1)) * y[index]  # have no idea why X[i]'s dimention is (2, ) here we convert it to P * 1
        b += a * y[index]
        
        #update error rate
        y_pre = np.sign(X.dot(w) + b)
        error_rate = np.sum(y_pre != y) / N

    return w, b


if __name__ == '__main__':
    X, y = iris_data.get_data()
    # default value of y belongs to {0, 1}. convert to {-1, 1}
    y = np.where(y == 0, -1, y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=40)
    w, b = percetron(X_train, y_train)

    y_pre = np.sign(X_test.dot(w) + b)
    error_rate = np.sum(y_pre != y_test) / len(y_test)
