import numpy as np
import sys,os
sys.path.append(os.pardir)
from mnist_data import load_mnist
from Two_Layer_Net import *
from optimizer import *
#获取数据和建立网络和选择优化器
(x_train,t_train),(x_test,t_test)=load_mnist(normalize=True,one_hot_label=True)
optimizer=Adam()
net=TwoLayerNet(input_size=784,hidden_size=100,output_size=10)
#储存各个指标的列表
train_loss_list=[]
train_acc_list=[]
test_acc_list=[]
#超参数
iters_num=10000
train_size=x_train.shape[0]
batch_size=100
iter_per_epoch=train_size//batch_size
for i in range(iters_num):
    #每次挑选出batch_size个数据
    batch_mask=np.random.choice(train_size,batch_size)
    x_batch=x_train[batch_mask]
    t_batch=t_train[batch_mask]
    #获取梯度，然后梯度下降
    grads=net.gradient(x_batch,t_batch)
    params=net.params
    optimizer.update(params,grads)
    loss=net.loss(x_batch,t_batch)
    train_loss_list.append(loss)
    #每一个 epoch 输出一次训练集和测试集的 accuracy，防止过拟合
    if i%iter_per_epoch==0:
        train_acc=net.accuracy(x_train,t_train)
        test_acc=net.accuracy(x_test,t_test)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        epoch = i // iter_per_epoch + 1
        print(f"第{epoch}轮 - 训练正确率: {train_acc:.4f}, 测试正确率: {test_acc:.4f}")