# 引入Tensorboard库，并设定保存作图数据的文件位置
from tb_paddle import SummaryWriter

data_writer = SummaryWriter(logdir="log/data")

with fluid.dygraph.guard(place):
    model = MNIST()
    model.train()

    # 四种优化算法的设置方案，可以逐一尝试效果
    optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01, parameter_list=model.parameters())

    EPOCH_NUM = 10
    iter = 0
    for epoch_id in range(EPOCH_NUM):
        for batch_id, data in enumerate(train_loader()):
            # 准备数据，变得更加简洁
            image_data, label_data = data
            image = fluid.dygraph.to_variable(image_data)
            label = fluid.dygraph.to_variable(label_data)

            # 前向计算的过程，同时拿到模型输出值和分类准确率
            predict, avg_acc = model(image, label)

            # 计算损失，取一个批次样本损失的平均值
            loss = fluid.layers.cross_entropy(predict, label)
            avg_loss = fluid.layers.mean(loss)

            # 每训练了100批次的数据，打印下当前Loss的情况
            if batch_id % 100 == 0:
                print("epoch: {}, batch: {}, loss is: {}, acc is {}".format(epoch_id, batch_id, avg_loss.numpy(),
                                                                            avg_acc.numpy()))
                data_writer.add_scalar("train/loss", avg_loss.numpy(), iter)
                data_writer.add_scalar("train/accuracy", avg_acc.numpy(), iter)
                iter = iter + 100

            # 后向传播，更新参数的过程
            avg_loss.backward()
            optimizer.minimize(avg_loss)
            model.clear_gradients()

    # 保存模型参数
    fluid.save_dygraph(model.state_dict(), 'mnist')