import numpy as np
import matplotlib.pyplot as plt


def data_reader():
    data = {1: [], 2: [], 3: [], 4: []}
    with open('data') as f:
        tmp = f.readlines()
    for line in tmp:
        data[int(line.split()[2])].append([float(x) for x in line.split()[:2]])
    return data


def batch_perception(class1, class2, data, learning_rate, end_iter):
    # augmentation
    data_class = np.array([x + [1] for x in data[class1]] + [[-z for z in x] + [-1] for x in data[class2]])

    # initialize weight
    a = np.zeros(data_class.shape[1])
    iter_num = 0

    # training section
    while True:
        iter_num += 1
        diff = np.zeros(data_class.shape[1])
        for smp in data_class:
            # wrong classification
            if a.dot(smp) <= 0:
                diff += smp
        diff *= learning_rate
        a += diff

        if diff.dot(diff) < end_iter:
            break

    # output section
    print('trained weight:', a)
    print('trained round:', iter_num)

    # visualization
    plt.scatter([x for [x, _] in data[class1]], [y for [_, y] in data[class1]])
    plt.scatter([x for [x, _] in data[class2]], [y for [_, y] in data[class2]])

    x = np.linspace(-5, 10, 256)
    y = (-a[2] - a[0] * x) / a[1]
    plt.plot(x, y)

    plt.show()


def ho_kashyap(class1, class2, data, learning_rate, end_iter):
    # augmentation
    data_class = np.array([x + [1] for x in data[class1]] + [[-z for z in x] + [-1] for x in data[class2]])

    # initialize weight and margin and Y*
    a = np.ones(data_class.shape[1])
    b = np.ones(data_class.shape[0])
    y_t = np.linalg.pinv(data_class)
    iter_num = 0

    # training section
    while True:
        iter_num += 1
        e = data_class.dot(a) - b
        e_plus = e + np.abs(e)
        if np.max(e_plus) < end_iter:
            break
        b += learning_rate * e_plus
        a = y_t.dot(b)

    # output section
    # print('trained weight:', a)
    # print('trained margin:', b)
    print('trained round:', iter_num)
    print('training error:', np.linalg.norm(e))

    # visualization
    plt.scatter([x for [x, _] in data[class1]], [y for [_, y] in data[class1]])
    plt.scatter([x for [x, _] in data[class2]], [y for [_, y] in data[class2]])

    x = np.linspace(-9, 5, 256)
    y = (-a[2] - a[0] * x) / a[1]
    plt.plot(x, y)

    plt.show()


def MSE_multi_class(data):
    # augmentation
    X = np.array([x + [1] for x in data[1][:8]]).T
    X_test = np.array([x + [1] for x in data[1][8:]]).T
    for i in range(2, 5):
        X = np.c_[X, np.array([x + [1] for x in data[i][:8]]).T]
        X_test = np.c_[X_test, np.array([x + [1] for x in data[i][8:]]).T]
    Y = np.zeros((4, X.shape[1]))
    ans_check = np.zeros(X_test.shape[1])
    for i in range(32):
        Y[int(i / 8)][i] = 1
    for i in range(8):
        ans_check[i] = int(i / 2)
    W = np.linalg.pinv(X).T.dot(Y.T)
    print('weight matrix:\n', W)
    print('accuracy:', np.sum(np.argmax(W.T.dot(X_test), axis=0) == ans_check) / X_test.shape[1])


if __name__ == '__main__':
    testData = data_reader()
    batch_perception(2, 3, testData, 1, 0.0001)
    # ho_kashyap(2, 4, testData, 0.1, 0.0001)
    # MSE_multi_class(testData)
