import numpy as np
from util.datasets import load_mnist
from mindvision.classification.dataset import Mnist


def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    np.random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indices = np.mat(indices[i:min(i + batch_size, num_examples)])
        yield features[batch_indices, :], labels[batch_indices]


def softmax(x):
    x -= np.max(x)
    x_exp = np.exp(x)
    partition = np.sum(x_exp, axis=1, keepdims=True) + 1e-5
    # print(x_exp.shape, partition.shape)
    return x_exp / partition


def softmax_net(x, w, b):
    # print(x.shape, w.shape, b.shape)
    z = np.matmul(w.T, x.T) + b
    # print(z.shape)
    return softmax(z)


def cross_entropy(y_hat, y):
    # print(y_hat.shape, y.shape)
    o = y_hat[y[0], range(y_hat.shape[1])]
    # print(z.shape)
    return -np.log(o + 1e-5)


def softmax_sgd(y_hat, x, y, w, b, lr, batch_size):
    # manual calculate the gradient of cross entropy
    y_hat[y[0], range(y_hat.shape[1])] -= 1
    # print(y_hat.shape, x.shape)
    grad_w = np.matmul(y_hat, x).squeeze(axis=0).T
    # print(w.shape, grad_w.shape, x.shape)
    grad_b = y_hat
    grad_b = np.sum(grad_b, axis=1, keepdims=True)
    new_b = b - lr * grad_b / batch_size

    new_w = w - lr * grad_w / batch_size
    # print(grad_b.shape, b.shape)

    return new_w, new_b


def evaluate_accuracy(y_hat, y):
    right_item = np.sum(np.argmax(y_hat, axis=0) == y)
    return np.sum(right_item)


def main():
    # download datasets
    Mnist(path='/shareData/mindspore-dataset/Mnist',
          split="train", download=True).download_dataset()
    Mnist(path='/shareData/mindspore-dataset/Mnist',
          split="test", download=True).download_dataset()

    features_t, labels_t = load_mnist('/shareData/mindspore-dataset/Mnist/train')
    features_v, labels_v = load_mnist('/shareData/mindspore-dataset/Mnist/test', split='t10k')

    features_t = features_t.astype(np.float64) / 255
    features_v = features_v.astype(np.float64) / 255

    # load datasets
    batch_size = 256
    lr = 0.0001
    epochs = 10

    # for x, y in data_iter(batch_size, features_t, labels_t):
    #     # print(x, '\n', y)
    #     # print(x.shape, y.shape)
    #     break

    num_input = 28 * 28
    num_output = 10

    w = np.random.normal(0, 0.01, (num_input, num_output))
    b = np.zeros((num_output, 1))

    # y_hat = softmax_net(x.squeeze(axis=0), w, b)
    # print(y_hat.shape, y.shape)
    # l = cross_entropy(y_hat, y.squeeze(axis=0))
    # print(l)

    net = softmax_net
    loss = cross_entropy

    for epoch in range(epochs):
        train_loss, right, total = 0, 0, 0
        for x, y in data_iter(batch_size, features_t, labels_t):
            y_hat_t = net(x.squeeze(axis=0), w, b)
            train_loss += loss(y_hat_t, y).sum()

            right += evaluate_accuracy(y_hat_t, y)
            total += y.shape[1]

            w, b = softmax_sgd(y_hat_t, x, y, w, b, lr, batch_size)

        train_acc = right / total
        y_hat_v = net(features_v, w, b)
        valid_acc = evaluate_accuracy(y_hat_v, labels_v) / len(labels_v)

        print(
            f'epoch [{epoch + 1}/{epochs}], loss is {float(train_loss / len(labels_t)):f}, train accuracy is {train_acc}, valid accuracy is {valid_acc}')


if __name__ == '__main__':
    main()


"""
output
In epoch 1, loss is 5.538449, train accuracy is 0.10166666666666667, valid accuracy is 0.133
In epoch 2, loss is 5.513865, train accuracy is 0.18388333333333334, valid accuracy is 0.2307
In epoch 3, loss is 5.489963, train accuracy is 0.2880333333333333, valid accuracy is 0.3373
In epoch 4, loss is 5.466770, train accuracy is 0.38366666666666666, valid accuracy is 0.4284
In epoch 5, loss is 5.444432, train accuracy is 0.46321666666666667, valid accuracy is 0.4989
In epoch 6, loss is 5.422886, train accuracy is 0.5247, valid accuracy is 0.5511
In epoch 7, loss is 5.402007, train accuracy is 0.5696333333333333, valid accuracy is 0.5903
In epoch 8, loss is 5.381965, train accuracy is 0.6034666666666667, valid accuracy is 0.6209
In epoch 9, loss is 5.362727, train accuracy is 0.62905, valid accuracy is 0.6408
In epoch 10, loss is 5.344362, train accuracy is 0.6508333333333334, valid accuracy is 0.6578
"""