{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from autoencoder.parameter import Parameter\n",
    "from autoencoder.parameter import parameter as parm\n",
    "from autoencoder import CnnDAE as CnnDAE,DenseDAE as DenseDAE,VggDAE as VggDAE\n",
    "from autoencoder.utils import *\n",
    "from autoencoder.DatasetUtil import PrepareData,DataSet\n",
    "from autoencoder.Loss import *\n",
    "import torch.utils.data as Data\n",
    "\n",
    "# 创建编码器和解码器结构\n",
    "encoders=VggDAE.Encoders(parm)\n",
    "decoders=VggDAE.Decoders(parm)\n",
    "\n",
    "# 自定义初始化模型参数\n",
    "nn.Module.apply(encoders,weight_init)\n",
    "nn.Module.apply(decoders,weight_init)\n",
    "\n",
    "# 定义优化器\n",
    "encodersOptimizer=torch.optim.Adam(VggDAE.Encoders.parameters(encoders),lr=parm.learning_rate,\n",
    "                                 betas=(parm.beta1,0.999))\n",
    "\n",
    "decodersOptimizer=torch.optim.Adam(VggDAE.Decoders.parameters(decoders),lr=parm.learning_rate,\n",
    "                                 betas=(parm.beta1,0.999))\n",
    "\n",
    "# 设置损失函数\n",
    "ReconstructionLoss=nn.L1Loss() # 重建损失\n",
    "WarpLoss=TotalVaryLoss(parm)\n",
    "BiasReduce=BiasReduceLoss(parm)\n",
    "SmoothL1=TotalVaryLoss(parm)\n",
    "SmoothL2=SelfSmoothLoss2(parm)\n",
    "\n",
    "preData=PrepareData(\"/home/qianqianjun/CODE/DataSets/DaeDatasets\",random_seed=parm.randomSeed)\n",
    "total_number=1000\n",
    "train_number=800\n",
    "test_number=200\n",
    "steps=int(train_number/parm.batchSize)\n",
    "train_files,test_files=preData.train_test_split(total_number,test_number)\n",
    "preData.save_list(\"./dataset\",train_set=train_files,test_set=test_files)\n",
    "\n",
    "train_set=DataSet(files=train_files,resizeTo=224)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 将模型转化为 cuda 模型\n",
    "encoders=encoders.cuda()\n",
    "decoders=decoders.cuda()\n",
    "\n",
    "Step=0\n",
    "# 开始训练\n",
    "for epoch in range(parm.epochs):\n",
    "    train_loader=Data.DataLoader(train_set,batch_size=parm.batchSize,shuffle=True,num_workers=parm.workers)\n",
    "    # 定义要可视化的变量\n",
    "    batch_data=None\n",
    "    I=None\n",
    "    out=None\n",
    "    W=None\n",
    "    baseGrid=None\n",
    "    # 批训练\n",
    "    for step,batch_data in enumerate(train_loader,start=0):\n",
    "        batch_data=batchDataTransform(batch_data,parm.channel)\n",
    "        baseGrid=getBaseGrid(imgSize=parm.imgSize,Inbatch=True,batchSize=batch_data.size()[0])\n",
    "        zeroWarp=torch.tensor(\n",
    "            np.zeros(shape=(1,2,parm.imgSize,parm.imgSize)),dtype=torch.float32,requires_grad=False\n",
    "        )\n",
    "        if parm.useCuda:\n",
    "            batch_data=batch_data.cuda()\n",
    "            baseGrid=baseGrid.cuda()\n",
    "            zeroWarp=zeroWarp.cuda()\n",
    "\n",
    "        encodersOptimizer.zero_grad()\n",
    "        decodersOptimizer.zero_grad()\n",
    "\n",
    "        # 前向计算\n",
    "        z,zI,zW=encoders(batch_data)\n",
    "        I,W,out,Wact=decoders(zI,zW,baseGrid)\n",
    "        # 计算损失\n",
    "        loss_reconstruction=ReconstructionLoss(out,batch_data)\n",
    "        loss_Smooth=WarpLoss(W,weight=1e-6)\n",
    "        loss_bias_reduce=BiasReduce(W,zeroWarp,weight=1e-2)\n",
    "        loss_all=loss_reconstruction+loss_bias_reduce+loss_Smooth\n",
    "        # 反向传播\n",
    "        loss_all.backward()\n",
    "        encodersOptimizer.step()\n",
    "        decodersOptimizer.step()\n",
    "        \n",
    "        # 打印 训练日志\n",
    "        if (Step+1) % 100==0:\n",
    "            #loss_val=loss_reconstruction.data[0] + loss_Smooth.data[0] + loss_bias_reduce.data[0]\n",
    "            loss_val = loss_reconstruction.item() + loss_Smooth.item() + loss_bias_reduce.item()\n",
    "            print(\"<epoch:{}/{} step:{}/{}> loss-- total:{} recon:{} smooth:{} biasReduce:{}\".format(\n",
    "                epoch+1,parm.epochs,Step,steps,loss_val,loss_reconstruction.item(),loss_Smooth.item(),\n",
    "                loss_bias_reduce.item()))\n",
    "            # print(\"<epoch:{}/{} , {}/{}> loss-- total:{} recon:{} smooth:{} biasReduce:{}\".format(\n",
    "            #     epoch, parm.epochs, step, parm.batchSize, loss_val, loss_reconstruction.data[0], loss_Smooth.data[0],\n",
    "            #     loss_bias_reduce.data[0]))\n",
    "        Step+=1\n",
    "\n",
    "    if (epoch+1) % 10==0:\n",
    "        # 可视化训练过程\n",
    "        gx=(W.data[:,0,:,:] + baseGrid.data[:,0,:,:]).unsqueeze(1).clone()\n",
    "        gy=(W.data[:,1,:,:] + baseGrid.data[:,1,:,:]).unsqueeze(1).clone()\n",
    "\n",
    "        saveIntermediateImage(img_list=batch_data.data.clone(), output_dir=parm.dirImageOutput,\n",
    "                              filename=\"epoch{}_img\".format(epoch), n_sample=1, nrow=1, normalize=False)\n",
    "        saveIntermediateImage(img_list=I.data.clone(), output_dir=parm.dirImageOutput,\n",
    "                              filename=\"epoch_{}_texture\".format(epoch), n_sample=1, nrow=1, normalize=False)\n",
    "        saveIntermediateImage(img_list=out.data.clone(), output_dir=parm.dirImageOutput,\n",
    "                              filename=\"epoch_{}_output\".format(epoch), n_sample=1, nrow=1, normalize=False)\n",
    "        saveIntermediateImage(img_list=(gx + 1) / 2, output_dir=parm.dirImageOutput,\n",
    "                              filename=\"epoch_{}_warpx\".format(epoch), n_sample=1, nrow=1, normalize=False)\n",
    "        saveIntermediateImage(img_list=(gy + 1) / 2, output_dir=parm.dirImageOutput,\n",
    "                              filename=\"epoch_{}_warpy\".format(epoch), n_sample=1, nrow=1, normalize=False)\n",
    "\n",
    "# 保存训练好的模型\n",
    "torch.save(nn.Module.state_dict(encoders),\"{}/encoders.pth\".format(parm.dirCheckpoints))\n",
    "torch.save(nn.Module.state_dict(decoders),\"{}/decoders.pth\".format(parm.dirCheckpoints))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
