"""
# -*- coding: utf-8 -*-
# @Time    : 2023/3/6 14:53
# @Author  : 王摇摆
# @FileName: mini_batch_cross_entropy_error监督数据（标签版）.py
# @Software: PyCharm
# @Blog    ：https://blog.csdn.net/weixin_44943389?type=blog
"""
import os
import sys

# 在本例中使用mini_btach的思想实现均方误差损失函数
import numpy    as np

sys.path.append(os.pardir)
from dataset.mnist import load_mnist


# 设计函数
def mini_batch_cross_entropy_error(y, t):
    print('神经网络输出y的维数是： ' + str(y.ndim))
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)

    batch_size = y.shape[0]
    return -np.sum(t * np.log(y + 1e-7)) / batch_size


def get_mini_batch():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)  # 拿到数据集

    batch_size = 10
    # print(x_train.shape[0])  # debug排错
    random_data = np.random.choice(x_train.shape[0], batch_size)

    # 从训练集中随机选取batch数量的数据
    x_batch = x_train[random_data]
    t_batch = t_train[random_data]

    return x_batch, t_batch


from ch03.neuralnet_mnist_batch import get_data, init_network, predict

# mini_batch使用神经网络进行计算
x_tarin, t_tarin = get_data()
network = init_network()

batch_size = 10  # 一批数量，一捆的数量
train_size = x_tarin.shape[0]  # 训练集的规模
random_data = np.random.choice(train_size, batch_size)  # 随机选择数据

x_batch = x_tarin[random_data]  # 从训练集中随机得到一捆的值
y_batch = predict(network, x_batch)  # 将x送入神经网络参与计算得到y_batch
t_batch = t_tarin[random_data]  # 拿到随机的标签值

# 使用batch数据在交叉熵误差函数的神经网络中计算结果
error_value = mini_batch_cross_entropy_error(y=y_batch, t=t_batch)
print(error_value)
