{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "pose_guided_image_generation.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "metadata": {
        "id": "snU0q1BxVYPk",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# http://pytorch.org/\n",
        "from os.path import exists\n",
        "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag\n",
        "platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())\n",
        "cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\\.\\([0-9]*\\)\\.\\([0-9]*\\)$/cu\\1\\2/'\n",
        "accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'\n",
        "\n",
        "!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision\n",
        "  \n",
        "import torch\n",
        "import torch.nn as nn\n",
        "import math"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "d_MvPCMfPOc7",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "from PIL import Image\n",
        "\n",
        "def register_extension(id, extension): Image.EXTENSION[extension.lower()] = id.upper()\n",
        "Image.register_extension = register_extension\n",
        "def register_extensions(id, extensions): \n",
        "  for extension in extensions: register_extension(id, extension)\n",
        "Image.register_extensions = register_extensions"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "nrBXcuASVYPv",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "4c40b45f-0bb8-4e88-e0d4-c43212025ab0"
      },
      "cell_type": "code",
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Mounted at /content/gdrive\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "omHlNiimVYP4",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import math\n",
        "import os\n",
        "import random\n",
        "import sys\n",
        "\n",
        "import numpy as np\n",
        "import scipy.io\n",
        "import scipy.stats\n",
        "import skimage.morphology\n",
        "from scipy import misc\n",
        "from skimage.morphology import square, dilation, erosion"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "i9RcizGtVYP8",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import torch\n",
        "import torch.optim as optim\n",
        "import torch.nn as nn\n",
        "\n",
        "from torch.autograd import Variable\n",
        "from torch import cat"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "R3a39hipVYP-",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def _getSparseKeypoint(r, c, k, height, width, radius=4, var=4, mode='Solid'):\n",
        "    r = int(r)\n",
        "    c = int(c)\n",
        "    k = int(k)\n",
        "    indices = []\n",
        "    for i in range(-radius, radius+1):\n",
        "        for j in range(-radius, radius+1):\n",
        "            distance = np.sqrt(float(i**2+j**2))\n",
        "            if r+i>=0 and r+i<height and c+j>=0 and c+j<width:\n",
        "                if 'Solid'==mode and distance<=radius:\n",
        "                    indices.append([r+i, c+j, k])\n",
        "\n",
        "    return indices\n",
        "\n",
        "\n",
        "def _getSparsePose(peaks, height, width, channel, radius=4, var=4, mode='Solid'):\n",
        "    indices = []\n",
        "    values = []\n",
        "    for k in range(len(peaks)):\n",
        "        p = peaks[k]\n",
        "        if 0!=len(p):\n",
        "            r = p[0][1]\n",
        "            c = p[0][0]\n",
        "            ind = _getSparseKeypoint(r, c, k, height, width, radius, var, mode)\n",
        "            indices.extend(ind)\n",
        "            \n",
        "    shape = [height, width, channel]\n",
        "    return indices, shape\n",
        "\n",
        "\n",
        "def _oneDimSparsePose(indices, shape):\n",
        "    ind_onedim = []\n",
        "    for ind in indices:\n",
        "        idx = ind[0]*shape[2]*shape[1] + ind[1]*shape[2] + ind[2]\n",
        "        ind_onedim.append(idx)\n",
        "    shape = np.prod(shape)\n",
        "    return ind_onedim, shape\n",
        "\n",
        "\n",
        "def _sparse2dense(indices, shape):\n",
        "    dense = np.zeros(shape)\n",
        "    for i in range(len(indices)):\n",
        "        r = indices[i][0]\n",
        "        c = indices[i][1]\n",
        "        k = indices[i][2]\n",
        "        dense[r,c,k] = 1\n",
        "    return dense\n",
        "\n",
        "\n",
        "def _getPoseMask(peaks, height, width, radius=4, var=4, mode='Solid'):\n",
        "    limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \\\n",
        "                         [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \\\n",
        "                         [1,16], [16,18], [2,17], [2,18], [9,12], [12,6], [9,3], [17,18]] #\n",
        "    indices = []\n",
        "    for limb in limbSeq:\n",
        "        p0 = peaks[limb[0] -1]\n",
        "        p1 = peaks[limb[1] -1]\n",
        "        if 0!=len(p0) and 0!=len(p1):\n",
        "            r0 = p0[0][1]\n",
        "            c0 = p0[0][0]\n",
        "            r1 = p1[0][1]\n",
        "            c1 = p1[0][0]\n",
        "            ind  = _getSparseKeypoint(r0, c0, 0, height, width, radius, var, mode)\n",
        "            indices.extend(ind)\n",
        "            ind = _getSparseKeypoint(r1, c1, 0, height, width, radius, var, mode)\n",
        "            indices.extend(ind)\n",
        "            \n",
        "            distance = np.sqrt((r0-r1)**2 + (c0-c1)**2)\n",
        "            sampleN = int(distance/radius)\n",
        "            if sampleN>1:\n",
        "                for i in range(1,sampleN):\n",
        "                    r = r0 + (r1-r0)*i/sampleN\n",
        "                    c = c0 + (c1-c0)*i/sampleN\n",
        "                    ind = _getSparseKeypoint(r, c, 0, height, width, radius, var, mode)\n",
        "                    indices.extend(ind)\n",
        "                    \n",
        "    shape = [height, width, 1]\n",
        "    ## Fill body\n",
        "    dense = np.squeeze(_sparse2dense(indices, shape))\n",
        "    dense = dilation(dense, square(5))\n",
        "    dense = erosion(dense, square(5))\n",
        "    return dense\n",
        "\n",
        "\n",
        "def _get_valid_peaks(all_peaks, subsets):\n",
        "    try:\n",
        "        subsets = subsets.tolist()\n",
        "        valid_idx = -1\n",
        "        valid_score = -1\n",
        "        for i, subset in enumerate(subsets):\n",
        "            score = subset[-2]   \n",
        "            if score > valid_score:\n",
        "                valid_idx = i\n",
        "                valid_score = score\n",
        "        if valid_idx>=0:\n",
        "            return all_peaks\n",
        "        else:\n",
        "            return None\n",
        "    except:\n",
        "        return None\n",
        "\n",
        "\n",
        "def _format_data(folder_path, pairs, i, all_peaks_dic, subsets_dic):\n",
        "    # Read the filename:\n",
        "    img_path_0 = os.path.join(folder_path, pairs[i][0])\n",
        "    img_path_1 = os.path.join(folder_path, pairs[i][1])\n",
        "    image_raw_0 = misc.imread(img_path_0)\n",
        "    image_raw_1 = misc.imread(img_path_1)\n",
        "    height, width = image_raw_0.shape[1], image_raw_0.shape[0]\n",
        "\n",
        "    ########################## Pose 16x8 & Pose coodinate (for 128x64(Solid) 128x64(Gaussian))##########################\n",
        "    if (all_peaks_dic is not None) and (pairs[i][0] in all_peaks_dic) and (pairs[i][1] in all_peaks_dic):\n",
        "        ## Pose 1\n",
        "        peaks = _get_valid_peaks(all_peaks_dic[pairs[i][1]], subsets_dic[pairs[i][1]])\n",
        "        \n",
        "        indices_r4_1, shape = _getSparsePose(peaks, height, width, 18, radius=4, mode='Solid')\n",
        "        indices_r4_1, shape_1 = _oneDimSparsePose(indices_r4_1, shape)\n",
        "        \n",
        "        pose_mask_r4_1 = _getPoseMask(peaks, height, width, radius=4, mode='Solid')\n",
        "    else:\n",
        "        return None\n",
        "\n",
        "    image_raw_0 = np.reshape(image_raw_0, (height, width, 3))\n",
        "    image_raw_0 = image_raw_0.astype('float32')\n",
        "    image_raw_1 = np.reshape(image_raw_1, (height, width, 3))\n",
        "    image_raw_1 = image_raw_1.astype('float32')\n",
        "\n",
        "    mask_1 = np.reshape(pose_mask_r4_1, (height, width, 1))\n",
        "    mask_1 = mask_1.astype('float32')\n",
        "\n",
        "    indices_r4_1 = np.array(indices_r4_1).astype(np.int64).flatten().tolist()\n",
        "    indices_r4_1_dense = np.zeros((shape_1))\n",
        "    indices_r4_1_dense[indices_r4_1] = 1\n",
        "    indices_r4_1 = np.reshape(indices_r4_1_dense, (height, width, 18))\n",
        "    pose_1 = indices_r4_1.astype('float32')\n",
        "\n",
        "    image_0 = (image_raw_0 - 127.5) / 127.5\n",
        "    image_1 = (image_raw_1 - 127.5) / 127.5\n",
        "    pose_1 = pose_1 * 2 - 1\n",
        "    \n",
        "    image_0 = torch.from_numpy(np.transpose(image_0, (2, 0, 1)))\n",
        "    image_1 = torch.from_numpy(np.transpose(image_1, (2, 0, 1)))\n",
        "    mask_1 = torch.from_numpy(np.transpose(mask_1, (2, 0, 1)))\n",
        "    pose_1 = torch.from_numpy(np.transpose(pose_1, (2, 0, 1)))\n",
        "    \n",
        "    \n",
        "    return [image_0, image_1, pose_1, mask_1]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "Z-19Pv6xVYQC",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import pickle"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "eOGq-HjOVYQF",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import pickle\n",
        "\n",
        "def _get_train_all_p_pairs(out_dir, split_name='train'):\n",
        "    assert split_name in {'train', 'train_flip', 'test', 'test_samples', 'test_seq', 'all'}\n",
        "    if split_name=='train_flip':\n",
        "        p_pairs_path = os.path.join(out_dir, 'p_pairs_train_flip.p')\n",
        "    else:\n",
        "        p_pairs_path = os.path.join(out_dir, 'p_pairs_'+split_name.split('_')[0]+'.p')\n",
        "        \n",
        "    if os.path.exists(p_pairs_path):\n",
        "        with open(p_pairs_path,'rb') as f:\n",
        "            p_pairs = pickle.load(f)\n",
        "            \n",
        "    print('_get_train_all_pn_pairs finish ......')\n",
        "    print('p_pairs length:%d' % len(p_pairs))\n",
        "    \n",
        "    return p_pairs"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "AHMY6G6jYhqK",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 86
        },
        "outputId": "0564fed5-523f-4339-8948-fd668885c580"
      },
      "cell_type": "code",
      "source": [
        "\n",
        "p_pairs = _get_train_all_p_pairs('/content/gdrive/My Drive/Colab Notebooks/data/DF_train_data/')\n",
        "p_pairs_flip = _get_train_all_p_pairs('/content/gdrive/My Drive/Colab Notebooks/data/DF_train_data/', 'train_flip')"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "_get_train_all_pn_pairs finish ......\n",
            "p_pairs length:97854\n",
            "_get_train_all_pn_pairs finish ......\n",
            "p_pairs length:77538\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "Sj_hDcgOVYQQ",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import torch.utils.data\n",
        "\n",
        "length = 97854 + 77538\n",
        "class PoseDataset(torch.utils.data.Dataset):\n",
        "    \"\"\"Pose dataset.\"\"\"    \n",
        "    def __init__(self, pose_peak_path, pose_sub_path, pose_peak_path_flip, pose_sub_path_flip):\n",
        "        self.folder_path = '/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/filted_up_train/'\n",
        "        self.folder_path_flip = '/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/filted_up_train_flip/'\n",
        "        self.all_peaks_dic = None\n",
        "        self.subsets_dic = None\n",
        "        self.all_peaks_dic_flip = None\n",
        "        self.subsets_dic_flip = None\n",
        "        \n",
        "        with open(pose_peak_path, 'rb') as f:\n",
        "            self.all_peaks_dic = pickle.load(f, encoding='latin1')\n",
        "        with open(pose_sub_path, 'rb') as f:\n",
        "            self.subsets_dic = pickle.load(f, encoding='latin1')\n",
        "        \n",
        "        with open(pose_peak_path_flip, 'rb') as f:\n",
        "            self.all_peaks_dic_flip = pickle.load(f, encoding='latin1')\n",
        "        with open(pose_sub_path_flip, 'rb') as f:\n",
        "            self.subsets_dic_flip = pickle.load(f, encoding='latin1')\n",
        "        \n",
        "    def __len__(self):\n",
        "        return length\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "        while True:\n",
        "            USE_FLIP = index >= 97854\n",
        "            if USE_FLIP:\n",
        "                example = _format_data(self.folder_path_flip, p_pairs_flip, index - 97854, self.all_peaks_dic_flip, self.subsets_dic_flip)\n",
        "                if example:\n",
        "                    \n",
        "                    return example\n",
        "                index = (index + 1) % length\n",
        "            else:\n",
        "                example = _format_data(self.folder_path, p_pairs, index, self.all_peaks_dic, self.subsets_dic)\n",
        "                if example:\n",
        "                    return example\n",
        "                index = (index + 1) % length"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "HClNJTx9VYQS",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "pose_dataset = PoseDataset('/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/PoseFiltered/all_peaks_dic_DeepFashion.p',\n",
        "                           '/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/PoseFiltered/subsets_dic_DeepFashion.p',\n",
        "                           '/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/PoseFiltered/all_peaks_dic_DeepFashion_Flip.p',\n",
        "                           '/content/gdrive/My Drive/Colab Notebooks/data/DF_img_pose/PoseFiltered/subsets_dic_DeepFashion_Flip.p')\n",
        "pose_loader = torch.utils.data.DataLoader(pose_dataset, batch_size=1, shuffle=True, num_workers=2)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "b8CXroMBVYQV",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "img_H = 256\n",
        "img_W = 256\n",
        "channel = 3\n",
        "batch_size = 1\n",
        "max_step = 80000\n",
        "d_lr = 0.00002\n",
        "g_lr = 0.00002\n",
        "lr_update_step = 50000\n",
        "data_format = 'NHWC'\n",
        "\n",
        "beta1 = 0.5\n",
        "beta2 = 0.999\n",
        "gamma = 0.5\n",
        "lambda_k = 0.001\n",
        "z_num = 64\n",
        "conv_hidden_num = 128\n",
        "repeat_num = int(np.log2(img_H)) - 2 # 6\n",
        "log_step = 200\n",
        "keypoint_num = 18"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "BbpEWWxCVYQZ",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "class GeneratorCNN_Pose_UAEAfterResidual_256(nn.Module):\n",
        "    \n",
        "    def block(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Sequential(\n",
        "            nn.Conv2d(ch_in, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU(),\n",
        "            nn.Conv2d(ch_out, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "    \n",
        "    def block_one(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Sequential(\n",
        "            nn.Conv2d(ch_in, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "    \n",
        "    def conv(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Conv2d(ch_in, ch_out, kernel, stride, padding)\n",
        "        \n",
        "    def fc(self, ch_in, ch_out):\n",
        "        return nn.Linear(ch_in, ch_out)\n",
        "    \n",
        "    def __init__(self, ch_in, z_num, repeat_num, hidden_num=128):\n",
        "        super(GeneratorCNN_Pose_UAEAfterResidual_256, self).__init__()\n",
        "        self.min_fea_map_H = 8\n",
        "        self.z_num = z_num \n",
        "        self.hidden_num = hidden_num \n",
        "        self.repeat_num = repeat_num\n",
        "        \n",
        "        self.block1 = self.block(self.hidden_num, 128, 3, 1)\n",
        "        self.block2 = self.block(256, 256, 3, 1)\n",
        "        self.block3 = self.block(384, 384, 3, 1)\n",
        "        self.block4 = self.block(512, 512, 3, 1)\n",
        "        self.block5 = self.block(640, 640, 3, 1)\n",
        "        self.block6 = self.block(768, 768, 3, 1)\n",
        "            \n",
        "        self.block_one1 = self.block_one(128, 256, 3, 2)\n",
        "        self.block_one2 = self.block_one(256, 384, 3, 2)\n",
        "        self.block_one3 = self.block_one(384, 512, 3, 2)\n",
        "        self.block_one4 = self.block_one(512, 640, 3, 2)\n",
        "        self.block_one5 = self.block_one(640, 768, 3, 2)\n",
        "        \n",
        "        self.fc1 = self.fc(self.min_fea_map_H * self.min_fea_map_H * 768, self.z_num)\n",
        "        self.fc2 = self.fc(self.z_num, self.min_fea_map_H * self.min_fea_map_H * self.hidden_num)\n",
        "        \n",
        "        self.block7 = self.block(896, 896, 3, 1)\n",
        "        self.block8 = self.block(1280, 1280, 3, 1)\n",
        "        self.block9 = self.block(1024, 1024, 3, 1)\n",
        "        self.block10 = self.block(768, 768, 3, 1)\n",
        "        self.block11 = self.block(512, 512, 3, 1)\n",
        "        self.block12 = self.block(256, 256, 3, 1)\n",
        "        \n",
        "        self.block_one6 = self.block_one(896, 640, 1, 1, padding=0)\n",
        "        self.block_one7 = self.block_one(1280, 512, 1, 1, padding=0)\n",
        "        self.block_one8 = self.block_one(1024, 384, 1, 1, padding=0)\n",
        "        self.block_one9 = self.block_one(768, 256, 1, 1, padding=0)\n",
        "        self.block_one10 = self.block_one(512, 128, 1, 1, padding=0)\n",
        "        \n",
        "        self.conv_last = self.conv(256, 3, 3, 1) \n",
        "        \n",
        "        self.block_1 = nn.Sequential(\n",
        "            nn.Conv2d(ch_in, self.hidden_num, 3, 1, padding=1),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "        \n",
        "        self.upscale = nn.Upsample(scale_factor=2)\n",
        "        \n",
        "    def forward(self, x):\n",
        "        encoder_layer_list = []\n",
        "        \n",
        "        x = self.block_1(x) # x: [1, 256, 256, 21]\n",
        "        \n",
        "        # 1\n",
        "        res = x\n",
        "        x = self.block1(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one1(x)\n",
        "        # 2\n",
        "        res = x\n",
        "        x = self.block2(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one2(x)\n",
        "        # 3\n",
        "        res = x\n",
        "        x = self.block3(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one3(x)\n",
        "        # 4\n",
        "        res = x\n",
        "        x = self.block4(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one4(x)\n",
        "        # 5\n",
        "        res = x\n",
        "        x = self.block5(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one5(x)\n",
        "        # 6\n",
        "        res = x\n",
        "        x = self.block6(x)\n",
        "        x = x + res\n",
        "        encoder_layer_list.append(x)\n",
        "            \n",
        "        x = x.view(-1, self.min_fea_map_H * self.min_fea_map_H * 768)\n",
        "        x = self.fc1(x)\n",
        "        z = x\n",
        "        \n",
        "        x = self.fc2(z)\n",
        "        x = x.view(-1, self.hidden_num, self.min_fea_map_H, self.min_fea_map_H) # x: [1, 8, 8, 128]\n",
        "        \n",
        "        # 1\n",
        "        x = torch.cat([x, encoder_layer_list[5]], dim=1)\n",
        "        res = x\n",
        "        x = self.block7(x)\n",
        "        x = x + res\n",
        "        x = self.upscale(x)\n",
        "        x = self.block_one6(x)\n",
        "        # 2\n",
        "        x = torch.cat([x, encoder_layer_list[4]], dim=1)\n",
        "        res = x\n",
        "        x = self.block8(x)\n",
        "        x = x + res\n",
        "        x = self.upscale(x)\n",
        "        x = self.block_one7(x)\n",
        "        # 3\n",
        "        x = torch.cat([x, encoder_layer_list[3]], dim=1)\n",
        "        res = x\n",
        "        x = self.block9(x)\n",
        "        x = x + res\n",
        "        x = self.upscale(x)\n",
        "        x = self.block_one8(x)\n",
        "        # 4\n",
        "        x = torch.cat([x, encoder_layer_list[2]], dim=1)\n",
        "        res = x\n",
        "        x = self.block10(x)\n",
        "        x = x + res\n",
        "        x = self.upscale(x)\n",
        "        x = self.block_one9(x)\n",
        "        # 5\n",
        "        x = torch.cat([x, encoder_layer_list[1]], dim=1)\n",
        "        res = x\n",
        "        x = self.block11(x)\n",
        "        x = x + res\n",
        "        x = self.upscale(x)\n",
        "        x = self.block_one10(x)\n",
        "        # 6\n",
        "        x = torch.cat([x, encoder_layer_list[0]], dim=1)\n",
        "        res = x\n",
        "        x = self.block12(x)\n",
        "        x = x + res\n",
        "       \n",
        "        output = self.conv_last(x)\n",
        "        return output"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "IhG76gecVYQd",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "class UAE_noFC_AfterNoise(nn.Module):\n",
        "    \n",
        "    def block(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Sequential(\n",
        "            nn.Conv2d(ch_in, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU(),\n",
        "            nn.Conv2d(ch_out, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "    \n",
        "    def block_one(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Sequential(\n",
        "            nn.Conv2d(ch_in, ch_out, kernel, stride, padding),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "    \n",
        "    def conv(self, ch_in, ch_out, kernel, stride=1, padding=1):\n",
        "        return nn.Conv2d(ch_in, ch_out, kernel, stride, padding)\n",
        "        \n",
        "    def __init__(self, ch_in, repeat_num, hidden_num=128):\n",
        "        super(UAE_noFC_AfterNoise, self).__init__()\n",
        "        self.hidden_num = hidden_num\n",
        "        self.repeat_num = repeat_num\n",
        "        \n",
        "        self.block_1 = nn.Sequential(\n",
        "            nn.Conv2d(ch_in, self.hidden_num, 3, 1, padding=1),\n",
        "            nn.ReLU()\n",
        "        )\n",
        "        \n",
        "        self.block1 = self.block(self.hidden_num, 128, 3, 1)\n",
        "        self.block2 = self.block(128, 256, 3, 1)\n",
        "        self.block3 = self.block(256, 384, 3, 1)\n",
        "        self.block4 = self.block(384, 512, 3, 1)\n",
        "            \n",
        "        self.block_one1 = self.block_one(128, 128, 3, 2)\n",
        "        self.block_one2 = self.block_one(256, 256, 3, 2)\n",
        "        self.block_one3 = self.block_one(384, 384, 3, 2)\n",
        "        \n",
        "        self.block5 = self.block(1024, 128, 3, 1)\n",
        "        self.block6 = self.block(512, 128, 3, 1)\n",
        "        self.block7 = self.block(384, 128, 3, 1)\n",
        "        self.block8 = self.block(256, 128, 3, 1)\n",
        "        \n",
        "        self.conv_last = self.conv(128, 3, 3, 1)\n",
        "        \n",
        "        self.upscale = nn.Upsample(scale_factor=2)\n",
        "        \n",
        "    def forward(self, x):\n",
        "        encoder_layer_list = []\n",
        "        \n",
        "        x = self.block_1(x) # x: [256, 256, 6]\n",
        "        \n",
        "        # 1\n",
        "        x = self.block1(x)\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one1(x)\n",
        "        # 2\n",
        "        x = self.block2(x)\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one2(x)\n",
        "        # 3\n",
        "        x = self.block3(x)\n",
        "        encoder_layer_list.append(x)\n",
        "        x = self.block_one3(x)\n",
        "        # 4\n",
        "        x = self.block4(x)\n",
        "        encoder_layer_list.append(x)\n",
        "        \n",
        "        # 1\n",
        "        x = torch.cat([x, encoder_layer_list[-1]], dim=1)\n",
        "        x = self.block5(x)\n",
        "        x = self.upscale(x)\n",
        "        # 2\n",
        "        x = torch.cat([x, encoder_layer_list[-2]], dim=1)\n",
        "        x = self.block6(x)\n",
        "        x = self.upscale(x)\n",
        "        # 3\n",
        "        x = torch.cat([x, encoder_layer_list[-3]], dim=1)\n",
        "        x = self.block7(x)\n",
        "        x = self.upscale(x)\n",
        "        # 4\n",
        "        x = torch.cat([x, encoder_layer_list[-4]], dim=1)\n",
        "        x = self.block8(x)\n",
        "        \n",
        "        output = self.conv_last(x)\n",
        "        return output"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "9xNN1T9dVYQi",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "class DCGANDiscriminator_256(nn.Module):\n",
        "    def uniform(self, stdev, size):\n",
        "        return np.random.uniform(\n",
        "            low=-stdev * np.sqrt(3),\n",
        "            high=stdev * np.sqrt(3),\n",
        "            size=size\n",
        "        ).astype('float32')\n",
        "    \n",
        "    def LeakyReLU(self, x, alpha=0.2):\n",
        "        return torch.max(alpha*x, x)\n",
        "\n",
        "    def conv2d(self, x, input_dim, filter_size, output_dim, gain=1, stride=1, padding=2):\n",
        "        filter_values = self.uniform(\n",
        "                self._weights_stdev,\n",
        "                (output_dim, input_dim, filter_size, filter_size)\n",
        "            )\n",
        "        filter_values *= gain\n",
        "        filters = torch.from_numpy(filter_values).cuda()\n",
        "        biases = torch.from_numpy(np.zeros(output_dim, dtype='float32')).cuda()\n",
        "        result = nn.functional.conv2d(x, filters, biases, stride, padding)\n",
        "        return result\n",
        "        \n",
        "    def LayerNorm(self, ch):\n",
        "        return nn.BatchNorm2d(ch)\n",
        "        \n",
        "    def __init__(self, bn=True, input_dim=3, dim=64, _weights_stdev=0.02):\n",
        "        super(DCGANDiscriminator_256, self).__init__()\n",
        "        self.bn = bn\n",
        "        self.input_dim = input_dim\n",
        "        self.dim = dim\n",
        "        self._weights_stdev = _weights_stdev\n",
        "        \n",
        "        self.bn1 = self.LayerNorm(2*self.dim)\n",
        "        self.bn2 = self.LayerNorm(4*self.dim)\n",
        "        self.bn3 = self.LayerNorm(8*self.dim)\n",
        "        self.bn4 = self.LayerNorm(8*self.dim)\n",
        "        \n",
        "        self.fc1 = nn.Linear(8*8*8*self.dim, 1)\n",
        "        \n",
        "    def forward(self, x):\n",
        "        output = x\n",
        "        \n",
        "        output = self.conv2d(output, self.input_dim, 5, self.dim, stride=2)\n",
        "        output = self.LeakyReLU(output)\n",
        "        \n",
        "        output = self.conv2d(output, self.dim, 5, 2*self.dim, stride=2)\n",
        "        if self.bn:\n",
        "            output = self.bn1(output)\n",
        "        output = self.LeakyReLU(output)\n",
        "        \n",
        "        output = self.conv2d(output, 2*self.dim, 5, 4*self.dim, stride=2)\n",
        "        if self.bn:\n",
        "            output = self.bn2(output)\n",
        "        output = self.LeakyReLU(output)\n",
        "        \n",
        "        output = self.conv2d(output, 4*self.dim, 5, 8*self.dim, stride=2)\n",
        "        if self.bn:\n",
        "            output = self.bn3(output)\n",
        "        output = self.LeakyReLU(output)\n",
        "        \n",
        "        output = self.conv2d(output, 8*self.dim, 5, 8*self.dim, stride=2)\n",
        "        if self.bn:\n",
        "            output = self.bn4(output)\n",
        "        output = self.LeakyReLU(output)\n",
        "        \n",
        "        output = output.view(-1, 8*8*8*self.dim)\n",
        "        output = self.fc1(output)\n",
        "        return output"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "CqsRpuHcVYQl",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 139
        },
        "outputId": "fed1de69-4f89-46a6-9630-a05a96448896"
      },
      "cell_type": "code",
      "source": [
        "generator_one = GeneratorCNN_Pose_UAEAfterResidual_256(21, z_num, repeat_num)\n",
        "generator_two = UAE_noFC_AfterNoise(6, repeat_num-2)\n",
        "discriminator = DCGANDiscriminator_256()\n",
        "\n",
        "generator_one.cuda()\n",
        "generator_two.cuda()\n",
        "discriminator.cuda()"
      ],
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "DCGANDiscriminator_256(\n",
              "  (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "  (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "  (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "  (bn4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "  (fc1): Linear(in_features=32768, out_features=1, bias=True)\n",
              ")"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 16
        }
      ]
    },
    {
      "metadata": {
        "id": "l_S9j3r2VYQn",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "L1_criterion = nn.L1Loss()\n",
        "BCE_criterion = nn.BCELoss()\n",
        "\n",
        "gen_train_op1 = optim.Adam(generator_one.parameters(), lr=2e-5, betas=(0.5, 0.999))\n",
        "gen_train_op2 = optim.Adam(generator_two.parameters(), lr=2e-5, betas=(0.5, 0.999))\n",
        "dis_train_op1 = optim.Adam(discriminator.parameters(), lr=2e-5, betas=(0.5, 0.999))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "icSXhH10VYQq",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def train():\n",
        "    for epoch in range(10):\n",
        "        for step, example in enumerate(pose_loader):\n",
        "            [x, x_target, pose_target, mask_target] = example\n",
        "            x = Variable(x.cuda())\n",
        "            x_target = Variable(x_target.cuda())\n",
        "            pose_target = Variable(pose_target.cuda())\n",
        "            mask_target = Variable(mask_target.cuda())\n",
        "            \n",
        "            G1 = generator_one(torch.cat([x, pose_target], dim=1))\n",
        "            if step < 22000:\n",
        "                PoseMaskLoss1 = L1_criterion(G1 * mask_target, x_target * mask_target)\n",
        "                g_loss_1 = L1_criterion(G1, x_target) + PoseMaskLoss1\n",
        "                gen_train_op1.zero_grad()\n",
        "                g_loss_1.backward()\n",
        "                gen_train_op1.step()\n",
        "                print('Epoch: %d, Step: %d, g_loss1: %0.05f' %(epoch+1, step+1, g_loss_1))\n",
        "                if step % 1000 == 999:\n",
        "                    torch.save(generator_one.state_dict(), '/content/gdrive/My Drive/Colab Notebooks/data/train_generator_one')\n",
        "                continue\n",
        "\n",
        "            DiffMap = generator_two(torch.cat([G1, x], dim=1))\n",
        "            G2 = G1 + DiffMap\n",
        "            triplet = torch.cat([x_target, G2, x], dim=0)\n",
        "            D_z = discriminator(triplet)\n",
        "            D_z = torch.clamp(D_z, 0.0, 1.0)\n",
        "            D_z_pos_x_target, D_z_neg_g2, D_z_neg_x = D_z[0], D_z[1], D_z[2]\n",
        "            D_z_pos = D_z_pos_x_target\n",
        "            D_z_neg = torch.cat([D_z_neg_g2, D_z_neg_x], 0)\n",
        "\n",
        "            PoseMaskLoss1 = L1_criterion(G1 * mask_target, x_target * mask_target)\n",
        "            g_loss_1 = L1_criterion(G1, x_target) + PoseMaskLoss1\n",
        "\n",
        "            g_loss_2 = BCE_criterion(D_z_neg, torch.ones((2)).cuda())\n",
        "            PoseMaskLoss2 = L1_criterion(G2 * mask_target, x_target * mask_target)\n",
        "            L1Loss2 = L1_criterion(G2, x_target) + PoseMaskLoss2\n",
        "            g_loss_2 += 50*L1Loss2\n",
        "\n",
        "            gen_train_op2.zero_grad()\n",
        "            g_loss_2.backward(retain_graph=True)\n",
        "            gen_train_op2.step()\n",
        "\n",
        "            d_loss = BCE_criterion(D_z_pos, torch.ones((1)).cuda())\n",
        "            d_loss += BCE_criterion(D_z_neg, torch.zeros((2)).cuda())\n",
        "            d_loss /= 2\n",
        "\n",
        "            dis_train_op1.zero_grad()\n",
        "            d_loss.backward()\n",
        "            dis_train_op1.step()\n",
        "\n",
        "            print('Epoch: %d, Step: %d, g_loss1: %0.05f, g_loss2: %0.05f, d_loss: %0.05f' %(epoch+1, step+1, g_loss_1, g_loss_2, d_loss))\n",
        "            if step % 100 == 99:\n",
        "                torch.save(generator_one.state_dict(), '/content/gdrive/My Drive/Colab Notebooks/data/train_generator_one')\n",
        "                torch.save(generator_two.state_dict(), '/content/gdrive/My Drive/Colab Notebooks/data/train_generator_two')\n",
        "                torch.save(discriminator.state_dict(), '/content/gdrive/My Drive/Colab Notebooks/data/train_discriminator')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "NCal6V-_fU2_",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "train()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "8Bm7W8wC0Rhk",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}