# 在本实例中以batch的思想实现mnist神经网络
import os
import pickle
import sys

import numpy as np

sys.path.append(os.pardir)
from dataset.mnist import load_mnist


def sigmod(x):
    y = 1 / (1 + np.exp(-x))

    return y


def softmax1(x):
    # 二维矩阵处理办法
    if x.ndim == 2:
        x = x.T
        max = np.max(x)
        x = x - max
        y = np.exp(x) / np.sum(np.exp(x), axis=0)
        return y.T

    # 一维数组处理办法
    max = np.max(x, axis=0)
    x = x - max  # 溢出对策
    return np.exp(x) / np.sum(np.exp(x))


def softmax(x):
    max = np.max(x)
    exp_a = np.exp(x - max)
    sum_exp_a = np.sum(exp_a)

    y = exp_a / sum_exp_a
    return y


# 拿到数据集
def get_mnist():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=False, flatten=True, one_hot_label=False)
    return x_test, t_test


# 神经网络学习
def init_network():
    # pickle得到学习好的神经网络
    with open(file='sample_weight.pkl', mode='rb') as f:  # 从pickle中读取训练好的网络
        network = pickle.load(file=f)  # 使用pickle类读取打开的pkl文件

    return network


# 神经网络推理
def predict(network, x):
    pass
    # 拿到网络权重
    W1 = network['W1']
    W2 = network['W2']
    W3 = network['W3']

    b1 = network['b1']
    b2 = network['b2']
    b3 = network['b3']

    # 推理
    a1 = np.dot(x, W1) + b1
    z1 = sigmod(a1)

    a2 = np.dot(z1, W2) + b2
    z2 = sigmod(a2)

    a3 = np.dot(z2, W3) + b3
    y = softmax1(a3)

    return y


# 主入口
x, t = get_mnist()  # 输入层的值
network = init_network()

batch_size = 100
accuracy_count = 0

# 计算正确率
for i in range(0, len(x), batch_size):
    x_batch = x[i:i + batch_size]
    y_batch = predict(network, x_batch)  # 得到的y是一个ndarray数组
    predict_result = np.argmax(y_batch, axis=1)  # 预测结果

    rigth_count = np.sum(predict_result == t[i:i + batch_size])
    # if predict_result == t[i:i + batch_size]:
    accuracy_count = accuracy_count + rigth_count

print('Accuracy is ' + str(accuracy_count / len(x) * 100) + ' %')
