import layers
import utils
import datasets
import parameters

import os
import time
import paddle
from visualdl import LogWriter

#创建模型
edsr = layers.EDSR(parameters.model_scale)

#设置训练速率策略
#语法参考：https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/lr/StepDecay_cn.html
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=parameters.lr, step_size=1, gamma=0.5, verbose=True)
#设置优化器
optim = paddle.optimizer.Adam(learning_rate=\
                              scheduler,\
                              beta1=parameters.bate1,\
                              beta2=parameters.bate2,\
                              epsilon=parameters.epsilon,\
                              parameters=edsr.parameters())\
    
#加载最新的参数
if parameters.load_latest_parameter:
    parameter = os.listdir(parameters.model_folder)
    para = [(int(i.split("_")[0]),i) for i in  parameter if "pdparams" in i]
    para.sort()
    opt = [(int(i.split("_")[0]),i) for i in  parameter if "opt" in i]
    opt.sort()
    #加载参数
    layer_state_dict = paddle.load(os.path.join(parameters.model_folder,para[-1][1]))
    edsr.set_state_dict(layer_state_dict)
    opt_state_dict = paddle.load(os.path.join(parameters.model_folder,opt[-1][1]))
    optim.set_state_dict(opt_state_dict)
    step = opt[-1][0]
else:
    step = 0    
    #当模型为x3或x4，用x2的模型的baseline参数初始化
    if parameters.model_scale>2:
        parameter = os.listdir(parameters.x2_scale)
        para = [(int(i.split("_")[0]),i) for i in  parameter if "pdparams" in i]
        para.sort()
        layer_state_dict = paddle.load(os.path.join(parameters.x2_scale,para[-1][1]))
        w = {}
        for k in layer_state_dict:
            if "base" in k:
                w[k] = layer_state_dict[k]  
        edsr.set_state_dict(w)

#损失函数
loss_fn = paddle.nn.loss.L1Loss(reduction='mean', name="L1_loss_function")

#加载训练数据集，评估数据集
train_loader = paddle.io.DataLoader(datasets.AnimeDataset(parameters.train_dataset,\
                                                     scale=parameters.model_scale),\
                                                     batch_size=parameters.mini_batch,\
                                                     shuffle=True)
test_loader = paddle.io.DataLoader(datasets.AnimeDataset(parameters.val_dataset,\
                                                     scale=parameters.model_scale),\
                                                     batch_size=parameters.mini_batch,\
                                                     shuffle=True)

#开始训练
for epoch in range(parameters.epochs):
    for batch,data in enumerate(train_loader()):
        step += 1
        start = time.time()
        x_data = paddle.to_tensor(data[0],dtype="float32")  #数据
        y_data = paddle.to_tensor(data[1],dtype="float32")  #标签
        predicts = edsr(x_data)  #预测结果
        #计算损失
        loss = loss_fn(predicts,y_data)
        #反向传播
        loss.backward()
        #更新参数
        optim.step()
        #梯度清零
        optim.clear_grad()
        #将损失添加给self.losses
        print("\rstep=",step,"loss=",loss.numpy()[0],"cost:",time.time()-start,"s",end="")
        
        if (step+1)%parameters.add_log_step==0:
            loss = loss.numpy()[-1]
            pnsr = 0.
            ssim = 0.
            result = predicts.numpy()
            y_data = y_data.numpy()
            for i in range(parameters.mini_batch):
                img1 = utils.channel_first_to_last(result[i])*255
                img2 = utils.channel_first_to_last(y_data[i])*255
                pnsr += utils.psnr(img1,img2)
                ssim += utils.ssim(img1,img2)
                utils.save_rgb(img1,"%s/%d.jpg"%(parameters.result_folder,i))
            pnsr = pnsr/parameters.mini_batch
            ssim = ssim/parameters.mini_batch

            with LogWriter(logdir="./log/x%d"%(parameters.model_scale)) as writer:
                writer.add_scalar(tag="loss", step=step, value=loss)
                writer.add_scalar(tag="pnsr", step=step, value=pnsr)
                writer.add_scalar(tag="ssim", step=step, value=ssim)
                
            paddle.save(edsr.state_dict(),parameters.model_folder+"/%d_loss_%f.pdparams"%(step,loss))
            paddle.save(optim.state_dict(),parameters.model_folder+"/%d_loss_%f.pdopt"%(step,loss))
        #检测是否减半训练速率
        if step%parameters.halved_lr_mini_batch==0:
            scheduler.step()
                
         
       







