from datasets.MNIST.mnist.mnist import load_mnist
import numpy as np
from numpy import random


def softmax(inputs):
    sumE = sum(list(map(lambda x: np.exp(x), inputs[0])))
    return np.array(list(map(lambda x: np.exp(x) / sumE, inputs[0])))


def sigmod(inputs):
    same_one = np.full((1, len(inputs)), 1, dtype=np.float64)
    re = same_one / (same_one + list(map(lambda x: np.exp(-x), inputs)))
    return re


def grad_sigmod(inputs):
    same_one = np.full((1, len(inputs)), 1, dtype=np.float64)
    front = sigmod(inputs)
    return front * (same_one - front)


def predict(inputs, weight1, weight2):
    out = softmax(sigmod(inputs @ weight1) @ weight2)
    return out


def grad(inputs, target, weight1, weight2):
    startgrad = (predict(inputs, weight1, weight2) - target).reshape((1, len(target)))
    grad2 = sigmod(inputs @ weight1).T @ startgrad
    grad1 = startgrad @ weight2.T
    grad1 = grad1 * grad_sigmod(inputs @ weight1)
    grad1 = inputs.T @ grad1
    return grad1, grad2

def loss(target, outputs):
    sumloss = 0
    for i in range(len(target)):
        sumloss -= target[i] * np.log10(outputs[i])
    return sumloss

if __name__ == "__main__":
    (train, train_lable), (test, test_lable) = load_mnist()
    train = list(map(lambda x: np.array([x]), train))
    true_y = np.zeros((len(train_lable), max(train_lable) + 1))
    for i in range(len(train_lable)):
        true_y[i][train_lable[i]] = 1
    train_lable = true_y
    test = list(map(lambda x: np.array([x]), test))
    true_y = np.zeros((len(test_lable), max(test_lable) + 1))
    for i in range(len(test_lable)):
        true_y[i][test_lable[i]] = 1
    test_lable_lable = true_y
    weight1 = 2*np.random.random((28*28, 512)) - 1
    weight2 = 2*np.random.random((512, 10)) - 1

    for i in range(len(train)):
        grad1, grad2 = grad(train[i], train_lable[i], weight1, weight2)
        weight1 -= grad1 * 0.001
        weight2 -= grad2 * 0.001
        if i % 1000 == 0:
            sumV = 0
            sumM = 0
            sumL = 0
            for j in range(len(train)):
                sumL += loss(train_lable[j], predict(train[j], weight1, weight2))
                if predict(train[j], weight1, weight2).argmax() == train_lable[j].argmax():
                    sumV += 1
            for j in range(len(test)):
                if predict(test[j], weight1, weight2).argmax() == test_lable[j].argmax():
                    sumM += 1
            print("train_accuracy:"+str(sumV / len(train))+", test_accuracy:"+str(sumM / len(test)) + ", loss_value:"
                  + str(sumL))
