import numpy as np
import matplotlib.pyplot as plt

data = {
    0: np.array([[1.58, 2.32, 5.8],
                 [0.67, 1.58, 4.78],
                 [1.04, 1.01, 3.63],
                 [1.49, 2.18, 3.39],
                 [0.41, 1.21, 4.73],
                 [1.39, 3.16, 2.87],
                 [1.20, 1.40, 1.89],
                 [0.92, 1.44, 3.22],
                 [0.45, 1.33, 4.38],
                 [0.76, 0.84, 1.96]]),
    1: np.array([[0.21, 0.03, 2.21],
                 [0.37, 0.28, 1.8],
                 [0.18, 1.22, 0.16],
                 [0.24, 0.93, 1.01],
                 [1.18, 0.39, 0.39],
                 [0.74, 0.96, 1.16],
                 [0.38, 1.94, 0.48],
                 [0.02, 0.72, 0.17],
                 [0.44, 1.31, 0.14],
                 [0.46, 1.49, 0.68]]),
    2: np.array([[1.54, 1.17, 0.64],
                 [5.41, 3.45, 1.33],
                 [1.55, 0.99, 2.69],
                 [1.86, 3.19, 1.51],
                 [1.68, 1.79, 0.87],
                 [3.51, 0.22, 1.39],
                 [1.40, 0.44, 0.92],
                 [0.44, 0.83, 1.97],
                 [0.25, 0.68, 0.99],
                 [0.66, 0.45, 0.08]])
}

CLASS = np.array([[1, 0, 0],
                  [0, 1, 0],
                  [0, 0, 1]])

times = 30


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


class NN:
    def __init__(self, nodeSize):
        self.inputSize, self.hiddenSize, self.outputSize = nodeSize
        self.weight_1 = np.random.random((self.inputSize, self.hiddenSize))
        self.weight_2 = np.random.random((self.hiddenSize, self.outputSize))

    def forward(self, x):
        hidden_in = self.weight_1.T.dot(x)  # h
        hidden_out = np.tanh(hidden_in)  # y
        output_in = self.weight_2.T.dot(hidden_out)  # o
        output_out = sigmoid(output_in)  # z
        return output_out, output_in, hidden_out, hidden_in

    def backward(self, t, z, o, y, h, x):
        delta_2 = (z - t) * z * (1 - z)
        delta_1 = self.weight_2.dot(delta_2) * (1 - y * y)
        weight_2_fix = y.reshape(self.hiddenSize, 1).dot(delta_2.reshape(1, self.outputSize))
        weight_1_fix = x.reshape(self.inputSize, 1).dot(delta_1.reshape(1, self.hiddenSize))
        return weight_1_fix, weight_2_fix

    def forward_backward(self, sample):
        x, t = data[int(sample / 10)][sample % 10], CLASS[int(sample / 10)]
        z, o, y, h = self.forward(x)
        w1_fix, w2_fix = self.backward(t, z, o, y, h, x)
        J = 0.5 * np.sum((z - t) * (z - t))
        return w1_fix, w2_fix, J


def train_stochastic(hidden_nr, learning_rate):
    nn = NN((3, hidden_nr, 3))
    J_store, test_store, epoch = [], [], 500000
    while epoch > 0:
        epoch -= 1
        sample = np.random.randint(30)
        w1_fix, w2_fix, J = nn.forward_backward(sample)
        nn.weight_1 -= learning_rate * w1_fix
        nn.weight_2 -= learning_rate * w2_fix
        J_store.append(J)
        if epoch % times == 0:
            test_store.append(test(nn))
    print_J(J_store)
    print_test(test_store, 'result/acc_stochastic-h-{}-lr-{}.txt'.format(hidden_nr, learning_rate))


def train_batch(hidden_nr, learning_rate):
    nn = NN((3, hidden_nr, 3))
    J_store, test_store, epoch = [], [], 5000000
    while epoch > 0:
        epoch -= 30
        w1_fix_sum, w2_fix_sum, J_sum = 0, 0, 0
        for sample in range(30):
            w1_fix, w2_fix, J = nn.forward_backward(sample)
            w1_fix_sum += w1_fix
            w2_fix_sum += w2_fix
            J_sum += J
        nn.weight_1 -= learning_rate * w1_fix_sum
        nn.weight_2 -= learning_rate * w2_fix_sum
        J_store.append(J_sum / 30)
        test_store.append(test(nn))
    print_J(J_store, batch_num=30)
    print_test(test_store, 'result/acc_batch-h-{}-lr-{}.txt'.format(hidden_nr, learning_rate))


def test(nn):
    right = 0
    for i in range(30):
        x = data[int(i / 10)][i % 10]
        t = CLASS[int(i / 10)]
        z, o, y, h = nn.forward(x)
        right += z.argmax() == t.argmax()
    return right / 30


def store(dat, filename):
    np.savetxt(filename, dat)


def print_J(J, batch_num=1):
    x = np.linspace(1, len(J), len(J)) * batch_num
    plt.plot(x, J)
    plt.title('Test Error, learning rate = 0.01, hidden node = 10, batch NN')
    plt.xlabel('trained samples')
    plt.ylabel('error')
    plt.show()


def print_test(test_store, filename):
    store(test_store, filename)
    x = np.linspace(1, len(test_store), len(test_store)) * times
    plt.plot(x, test_store)
    plt.title('Test Accuracy')
    plt.xlabel('trained samples')
    plt.ylabel('accuracy')
    plt.show()


def print_stochastic_batch():
    # here, there must be two acc results with same length
    acc_sto = np.loadtxt('result/acc_stochastic.txt')
    acc_bat = np.loadtxt('result/acc_batch.txt')
    assert acc_bat.shape == acc_sto.shape
    x = np.linspace(1, len(acc_sto), len(acc_bat)) * times
    plt.plot(x, acc_sto, label='single')
    plt.plot(x, acc_bat, label='batch')
    plt.title('Test Accuracy (learning rate = 0.01)')
    plt.xlabel('trained samples')
    plt.ylabel('accuracy')
    plt.legend()
    # plt.show()
    plt.savefig('D:/Document/2021/课程/模式识别/Homework/HW4/latex/picture/stochastic_batch.pdf')


def print_hidden(sto_or_bat):
    h3 = np.loadtxt('result/acc_{}-h-3-lr-0.001.txt'.format(sto_or_bat))
    h4 = np.loadtxt('result/acc_{}-h-4-lr-0.001.txt'.format(sto_or_bat))
    h6 = np.loadtxt('result/acc_{}-h-6-lr-0.001.txt'.format(sto_or_bat))
    h8 = np.loadtxt('result/acc_{}-h-8-lr-0.001.txt'.format(sto_or_bat))
    h10 = np.loadtxt('result/acc_{}-h-10-lr-0.001.txt'.format(sto_or_bat))
    h15 = np.loadtxt('result/acc_{}-h-15-lr-0.001.txt'.format(sto_or_bat))
    h20 = np.loadtxt('result/acc_{}-h-20-lr-0.001.txt'.format(sto_or_bat))
    x = np.linspace(1, len(h3), len(h3)) * times
    plt.plot(x, h3, label='hidden node = 3')
    plt.plot(x, h4, label='hidden node = 4')
    plt.plot(x, h6, label='hidden node = 6')
    plt.plot(x, h8, label='hidden node = 8')
    plt.plot(x, h10, label='hidden node = 10')
    plt.plot(x, h15, label='hidden node = 15')
    plt.plot(x, h20, label='hidden node = 20')
    plt.title('Test Accuracy (learning rate = 0.001, batch NN)')
    plt.xlabel('trained samples')
    plt.ylabel('accuracy')
    plt.legend()
    # plt.show()
    plt.savefig('D:/Document/2021/课程/模式识别/Homework/HW4/latex/picture/batch_hidden.pdf')


def print_lr(sto_or_bat):
    lr_range, lr = ['0.0001', '0.001', '0.01', '0.1', '0.5', '1'], []
    for i in lr_range:
        lr.append(np.loadtxt('result/acc_{}-h-10-lr-{}.txt'.format(sto_or_bat, i)))
    x = np.linspace(1, len(lr[0]), len(lr[0])) * times
    for i in range(len(lr_range)):
        plt.plot(x, lr[i], label='learning rate = {}'.format(lr_range[i]))
    plt.title('Test Accuracy (hidden node = 10, batch NN)')
    plt.xlabel('trained samples')
    plt.ylabel('accuracy')
    plt.legend()
    # plt.show()
    plt.savefig('D:/Document/2021/课程/模式识别/Homework/HW4/latex/picture/{}_lr.pdf'.format(sto_or_bat))


def print_large():
    h30 = np.loadtxt('result/acc_batch-h-30-lr-0.001.txt')
    x = np.linspace(1, len(h30), len(h30)) * times
    plt.plot(x, h30, label='hidden node = 30')
    plt.title('Test Accuracy (learning rate = 0.001, batch NN, hidden node = 30)')
    plt.xlabel('trained samples')
    plt.ylabel('accuracy')
    plt.legend()
    # plt.show()
    plt.savefig('D:/Document/2021/课程/模式识别/Homework/HW4/latex/picture/batch_hidden_30.pdf')


if __name__ == '__main__':
    np.random.seed(1)
    # train_stochastic(10, 0.001)
    train_batch(10, 0.001)
    # print_stochastic_batch()
    # print_hidden('batch')
    # print_lr('batch')
    # print_large()
