import numpy as np
import matplotlib.pyplot as plt

NAME, data, CLASS = {'Iris-setosa\n': 0, 'Iris-versicolor\n': 1, 'Iris-virginica\n': 2}, [], []

with open('iris.data', 'r') as f:
    for line in f.readlines()[:-1]:
        line = line.split(',')
        data.append([float(p) for p in line[:4]])
        CLASS.append(NAME[line[4]])
    data = np.array(data).T


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


class NN:
    def __init__(self, nodeSize):
        self.inputSize, self.hiddenSize, self.outputSize = nodeSize
        self.weight_1 = np.random.random((self.inputSize, self.hiddenSize))
        self.weight_2 = np.random.random((self.hiddenSize, self.outputSize))

    def forward(self, x):
        hidden_in = self.weight_1.T.dot(x)  # h
        hidden_out = sigmoid(hidden_in)  # y
        output_in = self.weight_2.T.dot(hidden_out)  # o
        output_out = sigmoid(output_in)  # z
        return output_out, output_in, hidden_out, hidden_in

    def backward(self, t, z, o, y, h, x):
        delta_2 = z - t
        delta_1 = self.weight_2.dot(delta_2) * ((1 - y) * y)
        weight_2_fix = y.reshape(self.hiddenSize, 1).dot(delta_2.reshape(1, self.outputSize))
        weight_1_fix = x.reshape(self.inputSize, 1).dot(delta_1.reshape(1, self.hiddenSize))
        return weight_1_fix, weight_2_fix

    def forward_backward(self, sample):
        x, t = data[:, sample], np.eye(3)[CLASS[sample]]
        z, o, y, h = self.forward(x)
        w1_fix, w2_fix = self.backward(t, z, o, y, h, x)
        J = -np.sum(t * np.log(z) + (1 - t) * np.log(1 - z)) / self.outputSize
        return w1_fix, w2_fix, J


def train_test_batch(learning_rate, epoch, hidden_node):
    nn = NN((4, hidden_node, 3))
    loss, test_correct = [], 0

    # 80% of dataset for training
    for rnd in range(epoch):
        w1, w2, J_sum = 0, 0, 0
        for sample in range(150):
            if sample % 50 < 40:
                w1_fix, w2_fix, J = nn.forward_backward(sample)
                w1 += w1_fix
                w2 += w2_fix
                J_sum += J
        nn.weight_1 -= learning_rate * w1
        nn.weight_2 -= learning_rate * w2
        loss.append(J_sum / 120)

    # 20% of dataset for testing
    for sample in range(150):
        if sample % 50 >= 40:
            out, _, _, _ = nn.forward(data[:, sample])
            print(out)
            if out.argmax() == CLASS[sample]:
                test_correct += 1

    # &&&&&&&&&&& ACCURACY &&&&&&&&&&&
    ACCURACY = test_correct / 30

    # draw loss curve
    plt.plot(np.linspace(1, epoch, epoch), loss)
    plt.title(
        'Results of batch training loss\n'
        'Hidden node={}; Learning rate={}; Epoch={}; Accuracy={}'.format(hidden_node, learning_rate, epoch, ACCURACY))
    plt.xlabel('Rounds')
    plt.ylabel('Loss')
    plt.savefig('Result.pdf')


if __name__ == '__main__':
    train_test_batch(learning_rate=0.01, epoch=2000, hidden_node=8)
