"""
write by qianqianjun
2020.01.29
"""

from __future__ import print_function
import torch.utils.data as Data
import os
from autoencoder import CnnDAE as CnnDAE, DenseDAE as DenseDAE
from autoencoder.parameter import parameter as parm
from autoencoder.Loss import *
from autoencoder.utils import *
from autoencoder.DatasetUtil import PrepareData
from autoencoder.DatasetUtil import DataSet


if parm.useDense:
    encoders= DenseDAE.Encoders(parm)
    decoders= DenseDAE.Decoders(parm)
else:
    encoders= CnnDAE.Encoders(parm)
    decoders= CnnDAE.Decoders(parm)

# 模型参数初始化（加载已有模型进行微调或者随机正太初始化）
if not parm.modelPath=="":
    # 如果有训练好模型，直接加载进来，可以在后续步骤微调模型，也可以直接用来测试
    print("正在加载 {} 处的模型".format(parm.modelPath))
    nn.Module.load_state_dict(encoders,torch.load(os.path.join(parm.modelPath,"encoders.pth")))
    nn.Module.load_state_dict(decoders,torch.load(os.path.join(parm.modelPath,"decoders.pth")))
else:
    print("初始化训练参数")
    nn.Module.apply(encoders,weight_init)
    nn.Module.apply(decoders,weight_init)

# 将模型转换为cuda 类型
if parm.useCuda:
    encoders=nn.Module.cuda(encoders)
    decoders=nn.Module.cuda(decoders)

# 定义优化器
encodersOptimizer=torch.optim.Adam(DenseDAE.Encoders.parameters(encoders),
                                   lr=parm.learning_rate, betas=(parm.beta1,0.999))
decodersOptimizer=torch.optim.Adam(DenseDAE.Decoders.parameters(decoders),
                                   lr=parm.learning_rate, betas=(parm.beta1,0.999))


# 设置损失函数
ReconstructionLoss=nn.L1Loss() # 重建损失
WarpLoss=TotalVaryLoss(parm)
BiasReduce=BiasReduceLoss(parm)
SmoothL1=TotalVaryLoss(parm)
SmoothL2=SelfSmoothLoss2(parm)

# 读取数据集
preData=PrepareData("/home/qianqianjun/CODE/DataSets/DaeDatasets",random_seed=parm.randomSeed)
total_number=10000
train_number=8000
test_number=2000
steps=int(train_number/parm.batchSize)
train_files,test_files=preData.train_test_split(total_number,test_number)
preData.save_list("./dataset",train_set=train_files,test_set=test_files)

train_set=DataSet(files=train_files,resizeTo=64)
Step=0
# 开始训练
for epoch in range(parm.epochs):
    train_loader=Data.DataLoader(train_set,batch_size=parm.batchSize,shuffle=True,num_workers=parm.workers)
    # 定义要可视化的变量
    batch_data=None
    I=None
    out=None
    W=None
    baseGrid=None
    # 批训练
    for step,batch_data in enumerate(train_loader,start=0):
        batch_data=batchDataTransform(batch_data,parm.channel)
        baseGrid=getBaseGrid(imgSize=parm.imgSize,Inbatch=True,batchSize=batch_data.size()[0])
        zeroWarp=torch.tensor(
            np.zeros(shape=(1,2,parm.imgSize,parm.imgSize)),dtype=torch.float32,requires_grad=False
        )
        if parm.useCuda:
            batch_data=batch_data.cuda()
            baseGrid=baseGrid.cuda()
            zeroWarp=zeroWarp.cuda()

        encodersOptimizer.zero_grad()
        decodersOptimizer.zero_grad()

        # 前向计算
        z,zI,zW=encoders(batch_data)
        I,W,out,Wact=decoders(zI,zW,baseGrid)
        # 计算损失
        loss_reconstruction=ReconstructionLoss(out,batch_data)
        loss_Smooth=WarpLoss(W,weight=1e-6)
        loss_bias_reduce=BiasReduce(W,zeroWarp,weight=1e-2)
        loss_all=loss_reconstruction+loss_bias_reduce+loss_Smooth
        # 反向传播
        loss_all.backward()
        encodersOptimizer.step()
        decodersOptimizer.step()
        #loss_val=loss_reconstruction.data[0] + loss_Smooth.data[0] + loss_bias_reduce.data[0]
        loss_val = loss_reconstruction.item() + loss_Smooth.item() + loss_bias_reduce.item()
        print("<epoch:{}/{} , {}/{}> loss-- total:{} recon:{} smooth:{} biasReduce:{}".format(
            epoch+1,parm.epochs,step,steps,loss_val,loss_reconstruction.item(),loss_Smooth.item(),
            loss_bias_reduce.item()))
        # print("<epoch:{}/{} , {}/{}> loss-- total:{} recon:{} smooth:{} biasReduce:{}".format(
        #     epoch, parm.epochs, step, parm.batchSize, loss_val, loss_reconstruction.data[0], loss_Smooth.data[0],
        #     loss_bias_reduce.data[0]))
        Step+=1

    # 可视化训练过程
    gx=(W.data[:,0,:,:] + baseGrid.data[:,0,:,:]).unsqueeze(1).clone()
    gy=(W.data[:,1,:,:] + baseGrid.data[:,1,:,:]).unsqueeze(1).clone()

    saveIntermediateImage(img_list=batch_data.data.clone(), output_dir=parm.dirImageOutput,
                          filename="step_{}_img".format(Step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(img_list=I.data.clone(), output_dir=parm.dirImageOutput,
                          filename="step_{}_texture".format(Step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(img_list=out.data.clone(), output_dir=parm.dirImageOutput,
                          filename="step_{}_output".format(Step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(img_list=(gx + 1) / 2, output_dir=parm.dirImageOutput,
                          filename="step_{}_warpx".format(Step), n_sample=49, nrow=7, normalize=False)
    saveIntermediateImage(img_list=(gy + 1) / 2, output_dir=parm.dirImageOutput,
                          filename="step_{}_warpy".format(Step), n_sample=49, nrow=7, normalize=False)

# 保存训练好的模型
torch.save(nn.Module.state_dict(encoders),"{}/encoders.pth".format(parm.dirCheckpoints))
torch.save(nn.Module.state_dict(decoders),"{}/decoders.pth".format(parm.dirCheckpoints))