"""
write by qianqianjun
测试模型代码
"""
from autoencoder import CnnDAE, DenseDAE
from autoencoder.parameter import parameter as parm
from autoencoder.utils import readDatasetList,saveIntermediateImage
from autoencoder.DatasetUtil import DataSet
from torch.utils.data import DataLoader
from autoencoder.utils import batchDataTransform,getBaseGrid
from autoencoder.Loss import *
import numpy as np

if parm.useDense:
    encoders= DenseDAE.Encoders(parm)
    decoders= DenseDAE.Decoders(parm)
else:
    encoders= CnnDAE.Encoders(parm)
    decoders= CnnDAE.Decoders(parm)

# 加载预训练的模型
nn.Module.load_state_dict(encoders,torch.load("{}/encoders.pth".format(parm.dirCheckpoints)))
nn.Module.load_state_dict(decoders,torch.load("{}/decoders.pth".format(parm.dirCheckpoints)))

encoders=nn.Module.cuda(encoders)
decoders=nn.Module.cuda(decoders)
# 加载测试数据集
test_set=readDatasetList("./dataset/testSet")
# 创建数据集和皮数据加载器
dataset=DataSet(files=test_set,resizeTo=64)
test_loader=DataLoader(dataset,batch_size=parm.batchSize,shuffle=True,num_workers=parm.workers)
# 定义损失函数
ReconLoss=nn.L1Loss()
WarpLoss=TotalVaryLoss(parm)
BiasReduceloss=BiasReduceLoss(parm)

for step,batch_data in enumerate(test_loader,start=0):

    batch_data=batchDataTransform(batch_data,parm.channel)
    baseGrid=getBaseGrid(parm.imgSize,True,batchSize=parm.batchSize,normalize=True)
    zeroWarp=torch.tensor(np.zeros(shape=(1,2,parm.imgSize,parm.imgSize),dtype=float),dtype=torch.float32)
    if parm.useCuda:
        batch_data=batch_data.cuda()
        baseGrid=baseGrid.cuda()
        zeroWarp=zeroWarp.cuda()
    # 计算结果
    z,zI,zW=encoders(batch_data)
    I,W,out,Wach=decoders(zI,zW,baseGrid)

    # 损失函数计算
    recon_loss=ReconLoss(batch_data,out)
    loss_smooth=WarpLoss(W,weight=1e-6)
    loss_biasReduce=BiasReduceloss(W,zeroWarp,weight=1e-2)

    loss_val=recon_loss.item() + loss_biasReduce.item() + loss_smooth.item()
    print("<step: {}> loss-- total:{} recon:{} smooth:{} biasReduce:{}".format(step,loss_val,recon_loss.item(),
                                                                            loss_smooth.item(),loss_biasReduce.item()))

    gx=(W.data[:,0,:,:] + baseGrid.data[:,0,:,:]).unsqueeze(1).clone()
    gy=(W.data[:,0,:,:] + baseGrid.data[:,1,:,:]).unsqueeze(1).clone()

    # 可视化
    saveIntermediateImage(batch_data.data.clone(), parm.dirTestOutput,
                          filename="img_step_{}".format(step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(I.data.clone(), parm.dirTestOutput,
                          filename="texture_step_{}".format(step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(out.data.clone(), parm.dirTestOutput,
                          filename="output_step_{}".format(step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage((gx + 1) / 2, parm.dirTestOutput,
                          filename="warpx_step_{}".format(step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage((gy + 1) / 2, parm.dirTestOutput,
                          filename="warp_step_{}".format(step), n_sample=49, nrow=7, normalize=False)