# -*- coding: utf-8 -*-
# @Time    : 2020/6/18 下午11:58
# @Author  : caotian
# @FileName: recoverytesttrain.py
# @Software: PyCharm
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D,Pool2D,Linear
import numpy as np
import os
import sys
import random
import json
import gzip
from PIL import Image

curpath=os.path.abspath(os.curdir)
sys.path.append(curpath)
import recoverytrain as rt
params_path='./checkpoint/mnist_epoch0'
with fluid.dygraph.guard():
    params_dict,opt_dict=fluid.load_dygraph(params_path)
    model=rt.MNIST('mnist')
    model.load_dict(params_dict)
    train_loader=rt.load_data('train')
    epoch_num=5
    batch_size=100
    total_steps=(int(60000//batch_size)+1)*epoch_num
    lr=fluid.dygraph.PolynomialDecay(0.01,total_steps,0.001)
    optimizer=fluid.optimizer.AdamOptimizer(learning_rate=lr,parameter_list=model.parameters())
    optimizer.set_dict(opt_dict)
    for epoch_id in range(1,epoch_num):
        for batch_id,data in enumerate(train_loader()):
            image_data,label_data=data
            image=fluid.dygraph.to_variable(image_data)
            label=fluid.dygraph.to_variable(label_data)

            predict,acc=model(image,label)
            avg_acc=fluid.layers.mean(acc)
            loss=fluid.layers.cross_entropy(predict,label)
            avg_loss=fluid.layers.mean(loss)
            # 每训练了200批次的数据，打印下当前Loss的情况
            if batch_id % 200 == 0:
                print("epoch: {}, batch: {}, loss is: {}, acc is {}".format(epoch_id, batch_id, avg_loss.numpy(),
                                                                            avg_acc.numpy()))
            # 后向传播，更新参数的过程
            avg_loss.backward()
            optimizer.minimize(avg_loss)
            model.clear_gradients()

