import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import OneHotEncoder
import matrixslow as ms

# 加载MNIST数据集，只取5000个样本
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:5000] / 255, y.astype(np.int)[:5000]
X, y = X.values, y.values

# 将整数形式的标签转换成One-Hot编码
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.reshape(-1, 1))

# 构造计算图，输入向量，是一个748*1的矩阵（一张图片是28*28的矩阵，转成向量形式就是748维向量)，不需要初始化，不参与训练
x = ms.core.Variable(dim=(784, 1), init=False, trainable=False)

one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)

# 输入层，100个神经元，激活函数为ReLU
hidden_1 = ms.layer.fc(x, 784, 100, "ReLU")

# 隐藏层，20个神经元，激活函数为ReLU
hidden_2 = ms.layer.fc(hidden_1, 100, 20, "ReLU")

# 输出层，10个神经元，无激活函数
output = ms.layer.fc(hidden_2, 20, 10, None)

# 概率输出
predict = ms.ops.SoftMax(output)

# 交叉熵损失
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)

# 学习率
learning_rate = 0.001

# 构造Adam优化器
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)

# 批大小
batch_size = 64

# 训练执行30个epoch
for epoch in range(5):
    # 批计数器清零
    batch_count = 0

    for i in range(len(X)):
        # 取第i个样本，构造748*1的矩阵对象
        feature = np.mat(X[i]).T

        label = np.mat(one_hot_label[i]).T

        x.set_value(feature)
        one_hot.set_value(label)

        optimizer.one_step()

        batch_count += 1

        if batch_count >= batch_size:
            # 打印当前epoch数，迭代数与损失值
            print("epoch: {:d}, iteration: {:d}, loss: {:.3f}".format(epoch+1, i+1, loss.value[0, 0]))
            optimizer.update()
            batch_count = 0

    pred = []

    for i in range(len(X)):
        feature = np.mat(X[i]).T
        x.set_value(feature)

        predict.forward()
        pred.append(predict.value.A.ravel())

    pred = np.array(pred).argmax(axis=1)

    accuracy = (y == pred).astype(np.int).sum() / len(X)

    print("epoch: {:d}, accuracy: {:3f}".format(epoch + 1, accuracy))
