{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mEWl8xxU3sKu"
      },
      "source": [
        "# Paired Style Transfer using Pix2Pix GAN\n",
        "\n",
        "In their work titled [“Image to Image Translation with Conditional Adversarial Networks”](https://arxiv.org/abs/1611.07004), Isola and Zhu et. al. present a conditional GAN network which is able to learn task specific loss functions and thus work across datasets. As the name suggests, this GAN architecture takes a specific type of image as input and transforms it into a different domain. It is called pair-wise style transfer as the training set needs to have samples from both, source and target domains."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "N6FXnJHk32ND"
      },
      "source": [
        "## Load Libraries"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "id": "9KlERwzUJbNr"
      },
      "outputs": [],
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "from torchvision import datasets\n",
        "from torch.autograd import Variable\n",
        "from torch.utils.data import Dataset\n",
        "from torch.utils.data import DataLoader\n",
        "from torchvision.utils import save_image\n",
        "import torchvision.transforms as transforms\n",
        "from torchvision.datasets.utils import download_and_extract_archive\n",
        "\n",
        "import os\n",
        "import glob\n",
        "import random\n",
        "import numpy as np\n",
        "\n",
        "from PIL import Image"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "E5hkGY1V4OG8"
      },
      "source": [
        "## U-Net Generator\n",
        "The U-Net architecture uses skip connections to shuttle important features between the input and outputs. In case of pix2pix GAN, skip connections are added between every $ith$ down-sampling and $(n-i)th$ over-sampling layers, where $n$ is the total number of layers in the generator. The skip connection leads to concatenation of all channels from the ith and $(n-i)th$ layers."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {
        "id": "S4qty9n1Jpxn"
      },
      "outputs": [],
      "source": [
        "class DownSampleBlock(nn.Module):\n",
        "    def __init__(self, input_channels, output_channels,normalize=True):\n",
        "        super(DownSampleBlock, self).__init__()\n",
        "        layers = [\n",
        "            nn.Conv2d(\n",
        "                input_channels,\n",
        "                output_channels,\n",
        "                kernel_size=4,\n",
        "                stride=2,\n",
        "                padding=1,\n",
        "                bias=False)\n",
        "            ]\n",
        "        if normalize:\n",
        "          layers.append(nn.InstanceNorm2d(output_channels))\n",
        "        layers.append(nn.LeakyReLU(0.2))\n",
        "        layers.append(nn.Dropout(0.5))\n",
        "        self.model = nn.Sequential(*layers)\n",
        "\n",
        "    def forward(self, x):\n",
        "        return self.model(x)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {
        "id": "X0FhVXlZJpu6"
      },
      "outputs": [],
      "source": [
        "class UpSampleBlock(nn.Module):\n",
        "    def __init__(self, input_channels, output_channels):\n",
        "        super(UpSampleBlock, self).__init__()\n",
        "        layers = [\n",
        "            nn.ConvTranspose2d(\n",
        "                input_channels,\n",
        "                output_channels,\n",
        "                kernel_size=4,\n",
        "                stride=2,\n",
        "                padding=1,\n",
        "                bias=False),\n",
        "        ]\n",
        "        layers.append(nn.InstanceNorm2d(output_channels))\n",
        "        layers.append(nn.ReLU(inplace=True))\n",
        "        layers.append(nn.Dropout(0.5))\n",
        "        self.model = nn.Sequential(*layers)\n",
        "\n",
        "    def forward(self, x, skip_connection):\n",
        "        x = self.model(x)\n",
        "        x = torch.cat((x, skip_connection), 1)\n",
        "\n",
        "        return x"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {
        "id": "nbjEnhYGJpsB"
      },
      "outputs": [],
      "source": [
        "class Generator(nn.Module):\n",
        "    def __init__(self, input_channels=3,out_channels=3):\n",
        "        super(Generator, self).__init__()\n",
        "\n",
        "        self.downsample1 = DownSampleBlock(input_channels,64, normalize=False)\n",
        "        self.downsample2 = DownSampleBlock(64, 128)\n",
        "        self.downsample3 = DownSampleBlock(128, 256)\n",
        "        self.downsample4 = DownSampleBlock(256, 512)\n",
        "        self.downsample5 = DownSampleBlock(512, 512)\n",
        "        self.downsample6 = DownSampleBlock(512, 512)\n",
        "        self.downsample7 = DownSampleBlock(512, 512)\n",
        "        self.downsample8 = DownSampleBlock(512, 512,normalize=False)\n",
        "\n",
        "        self.upsample1 = UpSampleBlock(512, 512)\n",
        "        self.upsample2 = UpSampleBlock(1024, 512)\n",
        "        self.upsample3 = UpSampleBlock(1024, 512)\n",
        "        self.upsample4 = UpSampleBlock(1024, 512)\n",
        "        self.upsample5 = UpSampleBlock(1024, 256)\n",
        "        self.upsample6 = UpSampleBlock(512, 128)\n",
        "        self.upsample7 = UpSampleBlock(256, 64)\n",
        "\n",
        "        self.final_layer = nn.Sequential(\n",
        "            nn.Upsample(scale_factor=2),\n",
        "            # padding left, right, top, bottom\n",
        "            nn.ZeroPad2d((1, 0, 1, 0)),\n",
        "            nn.Conv2d(128, out_channels, 4, padding=1),\n",
        "            nn.Tanh(),\n",
        "        )\n",
        "\n",
        "    def forward(self, x):\n",
        "        # downsampling blocks\n",
        "        d1 = self.downsample1(x)\n",
        "        d2 = self.downsample2(d1)\n",
        "        d3 = self.downsample3(d2)\n",
        "        d4 = self.downsample4(d3)\n",
        "        d5 = self.downsample5(d4)\n",
        "        d6 = self.downsample6(d5)\n",
        "        d7 = self.downsample7(d6)\n",
        "        d8 = self.downsample8(d7)\n",
        "        # upsampling blocks with skip connections\n",
        "        u1 = self.upsample1(d8, d7)\n",
        "        u2 = self.upsample2(u1, d6)\n",
        "        u3 = self.upsample3(u2, d5)\n",
        "        u4 = self.upsample4(u3, d4)\n",
        "        u5 = self.upsample5(u4, d3)\n",
        "        u6 = self.upsample6(u5, d2)\n",
        "        u7 = self.upsample7(u6, d1)\n",
        "\n",
        "        return self.final_layer(u7)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "d5gQ7yrG4Uzp"
      },
      "source": [
        "## Patch-GAN Discriminator\n",
        "The authors for pix2pix propose a Patch-GAN setup for the discriminator which takes the required inputs and generates an output of size NxN. Each $x_{ij}$ element of the NxN output signifies whether the corresponding patch ij in the generated image is real or fake. Each output patch can be traced back to its initial input patch basis the effective receptive field for each of the layers."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 6,
      "metadata": {
        "id": "TX2unZt1JppL"
      },
      "outputs": [],
      "source": [
        "class Discriminator(nn.Module):\n",
        "    def __init__(self, input_channels=3):\n",
        "        super(Discriminator, self).__init__()\n",
        "\n",
        "        def discriminator_block(input_filters, output_filters):\n",
        "            layers = [\n",
        "                nn.Conv2d(\n",
        "                    input_filters,\n",
        "                    output_filters,\n",
        "                    kernel_size=4,\n",
        "                    stride=2,\n",
        "                    padding=1)\n",
        "                ]\n",
        "            layers.append(nn.InstanceNorm2d(output_filters))\n",
        "            layers.append(nn.LeakyReLU(0.2, inplace=True))\n",
        "            return layers\n",
        "\n",
        "        self.model = nn.Sequential(\n",
        "            *discriminator_block(input_channels * 2, output_filters=64),\n",
        "            *discriminator_block(64, 128),\n",
        "            *discriminator_block(128, 256),\n",
        "            *discriminator_block(256, 512),\n",
        "            # padding left, right, top, bottom\n",
        "            nn.ZeroPad2d((1, 0, 1, 0)),\n",
        "            nn.Conv2d(512, 1, 4, padding=1, bias=False)\n",
        "        )\n",
        "\n",
        "    def forward(self, img_A, img_B):\n",
        "        img_input = torch.cat((img_A, img_B), 1)\n",
        "        return self.model(img_input)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "i91ulRNU44HK"
      },
      "source": [
        "## Prepare Dataset Class"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {
        "id": "Anu2QD74Pw3L"
      },
      "outputs": [],
      "source": [
        "class ImageDataset(Dataset):\n",
        "    def __init__(self, dataset_path, is_train=False, image_transformations=None):\n",
        "        self.transform = transforms.Compose(image_transformations)\n",
        "        if is_train:\n",
        "          dataset_path += '/train'\n",
        "        else:\n",
        "          dataset_path += '/val'\n",
        "        self.files = sorted(glob.glob(os.path.join(dataset_path) + \"/*.*\"))\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "\n",
        "        img = Image.open(self.files[index % len(self.files)])\n",
        "        w, h = img.size\n",
        "        img_A = img.crop((0, 0, w / 2, h))\n",
        "        img_B = img.crop((w / 2, 0, w, h))\n",
        "\n",
        "        # flip images randomly\n",
        "        if np.random.random() < 0.5:\n",
        "            img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], \"RGB\")\n",
        "            img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], \"RGB\")\n",
        "\n",
        "        img_A = self.transform(img_A)\n",
        "        img_B = self.transform(img_B)\n",
        "\n",
        "        return {\"A\": img_A, \"B\": img_B}\n",
        "\n",
        "    def __len__(self):\n",
        "        return len(self.files)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 8,
      "metadata": {
        "id": "Dk0_E8SXV3xB"
      },
      "outputs": [],
      "source": [
        "def sample_images(batches_done):\n",
        "    imgs = next(iter(val_dataloader))\n",
        "    # condition\n",
        "    real_A = Variable(imgs[\"B\"].type(Tensor))\n",
        "    # real\n",
        "    real_B = Variable(imgs[\"A\"].type(Tensor))\n",
        "    # generated\n",
        "    fake_B = generator(real_A)\n",
        "    img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2)\n",
        "    save_image(img_sample, f\"images/{batches_done}.png\", nrow=4, normalize=True)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2bTJhEZG5brD"
      },
      "source": [
        "## Download Dataset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "_8uS_P5PUDwA",
        "outputId": "a751e1bf-5218-47f3-b670-b1cb2f387379"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--2024-02-04 19:21:47--  http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.tar.gz\n",
            "Resolving efrosgans.eecs.berkeley.edu (efrosgans.eecs.berkeley.edu)... 128.32.244.190\n",
            "Connecting to efrosgans.eecs.berkeley.edu (efrosgans.eecs.berkeley.edu)|128.32.244.190|:80... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 250242400 (239M) [application/x-gzip]\n",
            "Saving to: ‘images/maps.tar.gz.1’\n",
            "\n",
            "maps.tar.gz.1       100%[===================>] 238.65M  2.62MB/s    in 2m 9s   \n",
            "\n",
            "2024-02-04 19:23:56 (1.85 MB/s) - ‘images/maps.tar.gz.1’ saved [250242400/250242400]\n",
            "\n"
          ]
        }
      ],
      "source": [
        "!wget -P images/ http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.tar.gz\n",
        "!tar -zxf ./images/maps.tar.gz --directory ./images/"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_T7bKz_T5EP4"
      },
      "source": [
        "## Set Parameters"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 9,
      "metadata": {
        "id": "O1qyHFhZOvJz"
      },
      "outputs": [],
      "source": [
        "URL=\"http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.tar.gz\"\n",
        "DATASET_PATH = \"./images/maps\"\n",
        "IMG_WIDTH = 256\n",
        "IMG_HEIGHT = 256\n",
        "NUM_CHANNELS = 3\n",
        "BATCH_SIZE = 64\n",
        "N_EPOCHS = 200\n",
        "SAMPLE_INTERVAL = 18"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 10,
      "metadata": {
        "id": "RqxpSPgrPw0h"
      },
      "outputs": [],
      "source": [
        "CUDA = True if torch.cuda.is_available() else False"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YAHrHz84RlJU"
      },
      "outputs": [],
      "source": [
        "os.makedirs(\"images/maps/\", exist_ok=True)\n",
        "os.makedirs(\"saved_models/\", exist_ok=True)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "6Jtbrajj5MlI"
      },
      "source": [
        "## Calculate Receptive Field for Patch"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 11,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "yWfLxI-DRlCe",
        "outputId": "9a834654-93b2-40de-bbc2-cd09e7d601c7"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Patch Shape=(1, 16, 16)\n"
          ]
        }
      ],
      "source": [
        "# prepare patch size for our setup\n",
        "patch = int(IMG_HEIGHT / 2**4)\n",
        "patch_gan_shape = (1,patch, patch)\n",
        "print(\"Patch Shape={}\".format(patch_gan_shape))"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FbXMgH8_5Pyp"
      },
      "source": [
        "## Get Generator and Discriminator Model Objects"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 12,
      "metadata": {
        "id": "CFJbgyHeRlFh"
      },
      "outputs": [],
      "source": [
        "# Initialize generator and discriminator\n",
        "generator = Generator()\n",
        "discriminator = Discriminator()\n",
        "\n",
        "# Loss functions\n",
        "adversarial_loss = torch.nn.MSELoss()\n",
        "pixelwise_loss = torch.nn.L1Loss()\n",
        "\n",
        "# Loss weight of L1 pixel-wise loss between translated image and real image\n",
        "weight_pixel_wise_identity = 100\n",
        "\n",
        "# Optimizers\n",
        "optimizer_G = torch.optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))\n",
        "optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 13,
      "metadata": {
        "id": "6dr3Jr7ySNJ4"
      },
      "outputs": [],
      "source": [
        "if CUDA:\n",
        "    generator = generator.cuda()\n",
        "    discriminator = discriminator.cuda()\n",
        "    adversarial_loss.cuda()\n",
        "    pixelwise_loss.cuda()\n",
        "    Tensor = torch.cuda.FloatTensor\n",
        "else:\n",
        "  Tensor = torch.FloatTensor"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 14,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "RiGJnEmzIUq2",
        "outputId": "0decfbbf-5f04-4bd4-eea5-d5b325f144c1"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "torch.Size([64, 3, 4, 4])"
            ]
          },
          "execution_count": 14,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "first_parameter = next(generator.parameters())\n",
        "input_shape = first_parameter.size()\n",
        "input_shape"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VvRfehAN5o8I"
      },
      "source": [
        "## Define Transformations and Dataloaders"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "DjbEdO81SNHa"
      },
      "outputs": [],
      "source": [
        "image_transformations = [\n",
        "    transforms.Resize((IMG_HEIGHT, IMG_WIDTH), Image.BICUBIC),\n",
        "    transforms.ToTensor(),\n",
        "    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n",
        "]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "lv0x9dX_SNEs"
      },
      "outputs": [],
      "source": [
        "train_dataloader = DataLoader(\n",
        "    ImageDataset(DATASET_PATH, is_train=True,image_transformations=image_transformations),\n",
        "    batch_size=BATCH_SIZE,\n",
        "    shuffle=True\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "3cA2eGNXSNBz"
      },
      "outputs": [],
      "source": [
        "val_dataloader = DataLoader(\n",
        "    ImageDataset(DATASET_PATH,image_transformations=image_transformations),\n",
        "    batch_size=BATCH_SIZE//8,\n",
        "    shuffle=True\n",
        ")"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sfCtlMsj5tTV"
      },
      "source": [
        "## Training Begins!"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "id": "41d5MJCGZoZv",
        "outputId": "f9921743-fde8-4857-d0a8-8b33d1575d52"
      },
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "<ipython-input-18-3ceec9520300>:9: UserWarning: The torch.cuda.*DtypeTensor constructors are no longer recommended. It's best to use methods such as torch.tensor(data, dtype=*, device='cuda') to create tensors. (Triggered internally at ../torch/csrc/tensor/python_tensor.cpp:83.)\n",
            "  valid = Variable(Tensor(np.ones((real_A.size(0), *patch_gan_shape))), requires_grad=False)\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Epoch: 0/200-Batch: 0/18--D.loss:0.7406,G.loss:57.0879--Adv.Loss:1.3402\n",
            "Epoch: 0/200-Batch: 1/18--D.loss:0.4748,G.loss:49.7242--Adv.Loss:0.4805\n",
            "Epoch: 0/200-Batch: 2/18--D.loss:0.7498,G.loss:42.0825--Adv.Loss:0.5350\n",
            "Epoch: 0/200-Batch: 3/18--D.loss:0.4612,G.loss:37.5967--Adv.Loss:0.2657\n",
            "Epoch: 0/200-Batch: 4/18--D.loss:0.2983,G.loss:34.0058--Adv.Loss:0.3665\n",
            "Epoch: 0/200-Batch: 5/18--D.loss:0.3036,G.loss:32.1782--Adv.Loss:0.4951\n",
            "Epoch: 0/200-Batch: 6/18--D.loss:0.2849,G.loss:30.7707--Adv.Loss:0.4436\n",
            "Epoch: 0/200-Batch: 7/18--D.loss:0.2765,G.loss:30.4441--Adv.Loss:0.3500\n",
            "Epoch: 0/200-Batch: 8/18--D.loss:0.2622,G.loss:30.6459--Adv.Loss:0.3463\n",
            "Epoch: 0/200-Batch: 9/18--D.loss:0.2508,G.loss:30.2438--Adv.Loss:0.4105\n",
            "Epoch: 0/200-Batch: 10/18--D.loss:0.2253,G.loss:28.3003--Adv.Loss:0.4241\n",
            "Epoch: 0/200-Batch: 11/18--D.loss:0.2179,G.loss:28.3821--Adv.Loss:0.4165\n",
            "Epoch: 0/200-Batch: 12/18--D.loss:0.2142,G.loss:28.2538--Adv.Loss:0.4410\n",
            "Epoch: 0/200-Batch: 13/18--D.loss:0.2162,G.loss:27.8362--Adv.Loss:0.4918\n",
            "Epoch: 0/200-Batch: 14/18--D.loss:0.2295,G.loss:28.5193--Adv.Loss:0.5323\n",
            "Epoch: 0/200-Batch: 15/18--D.loss:0.1802,G.loss:28.8353--Adv.Loss:0.5089\n",
            "Epoch: 0/200-Batch: 16/18--D.loss:0.1607,G.loss:27.9462--Adv.Loss:0.5593\n",
            "Epoch: 0/200-Batch: 17/18--D.loss:0.1408,G.loss:26.2013--Adv.Loss:0.5686\n",
            "Epoch: 1/200-Batch: 0/18--D.loss:0.1320,G.loss:27.5216--Adv.Loss:0.5661\n",
            "Epoch: 1/200-Batch: 1/18--D.loss:0.1134,G.loss:27.7035--Adv.Loss:0.7153\n",
            "Epoch: 1/200-Batch: 2/18--D.loss:0.1004,G.loss:28.5795--Adv.Loss:0.6627\n",
            "Epoch: 1/200-Batch: 3/18--D.loss:0.0869,G.loss:27.1145--Adv.Loss:0.7882\n",
            "Epoch: 1/200-Batch: 4/18--D.loss:0.0811,G.loss:26.9475--Adv.Loss:0.7703\n",
            "Epoch: 1/200-Batch: 5/18--D.loss:0.0661,G.loss:27.4684--Adv.Loss:0.8342\n",
            "Epoch: 1/200-Batch: 6/18--D.loss:0.0598,G.loss:27.9923--Adv.Loss:0.9038\n",
            "Epoch: 1/200-Batch: 7/18--D.loss:0.0661,G.loss:26.2709--Adv.Loss:0.7608\n",
            "Epoch: 1/200-Batch: 8/18--D.loss:0.0937,G.loss:27.4022--Adv.Loss:1.2326\n",
            "Epoch: 1/200-Batch: 9/18--D.loss:0.2633,G.loss:26.6663--Adv.Loss:0.2411\n",
            "Epoch: 1/200-Batch: 10/18--D.loss:0.3303,G.loss:27.6168--Adv.Loss:1.8319\n",
            "Epoch: 1/200-Batch: 11/18--D.loss:0.1295,G.loss:26.2116--Adv.Loss:0.4646\n",
            "Epoch: 1/200-Batch: 12/18--D.loss:0.0697,G.loss:27.2877--Adv.Loss:0.6568\n",
            "Epoch: 1/200-Batch: 13/18--D.loss:0.0787,G.loss:27.3338--Adv.Loss:1.0212\n",
            "Epoch: 1/200-Batch: 14/18--D.loss:0.0691,G.loss:27.1863--Adv.Loss:0.7491\n",
            "Epoch: 1/200-Batch: 15/18--D.loss:0.0824,G.loss:26.8444--Adv.Loss:0.6762\n",
            "Epoch: 1/200-Batch: 16/18--D.loss:0.0821,G.loss:25.2591--Adv.Loss:0.8992\n",
            "Epoch: 1/200-Batch: 17/18--D.loss:0.0855,G.loss:23.4417--Adv.Loss:0.6507\n",
            "Epoch: 2/200-Batch: 0/18--D.loss:0.0876,G.loss:26.1694--Adv.Loss:0.9340\n",
            "Epoch: 2/200-Batch: 1/18--D.loss:0.0747,G.loss:25.9364--Adv.Loss:0.6509\n",
            "Epoch: 2/200-Batch: 2/18--D.loss:0.0584,G.loss:25.7269--Adv.Loss:0.8002\n",
            "Epoch: 2/200-Batch: 3/18--D.loss:0.0609,G.loss:27.2547--Adv.Loss:0.9416\n",
            "Epoch: 2/200-Batch: 4/18--D.loss:0.0817,G.loss:24.6385--Adv.Loss:0.5502\n",
            "Epoch: 2/200-Batch: 5/18--D.loss:0.0884,G.loss:28.5131--Adv.Loss:1.2694\n",
            "Epoch: 2/200-Batch: 6/18--D.loss:0.0945,G.loss:25.8184--Adv.Loss:0.5160\n",
            "Epoch: 2/200-Batch: 7/18--D.loss:0.0808,G.loss:26.3931--Adv.Loss:1.3657\n",
            "Epoch: 2/200-Batch: 8/18--D.loss:0.0642,G.loss:25.0023--Adv.Loss:0.6577\n",
            "Epoch: 2/200-Batch: 9/18--D.loss:0.0440,G.loss:26.3825--Adv.Loss:1.0548\n",
            "Epoch: 2/200-Batch: 10/18--D.loss:0.0365,G.loss:25.2378--Adv.Loss:0.8827\n",
            "Epoch: 2/200-Batch: 11/18--D.loss:0.0370,G.loss:26.1034--Adv.Loss:0.8958\n",
            "Epoch: 2/200-Batch: 12/18--D.loss:0.0364,G.loss:26.8555--Adv.Loss:0.9794\n",
            "Epoch: 2/200-Batch: 13/18--D.loss:0.0341,G.loss:25.2192--Adv.Loss:0.8859\n",
            "Epoch: 2/200-Batch: 14/18--D.loss:0.0375,G.loss:25.6982--Adv.Loss:1.0606\n",
            "Epoch: 2/200-Batch: 15/18--D.loss:0.0407,G.loss:26.2781--Adv.Loss:0.7920\n",
            "Epoch: 2/200-Batch: 16/18--D.loss:0.0424,G.loss:25.9125--Adv.Loss:1.1978\n",
            "Epoch: 2/200-Batch: 17/18--D.loss:0.0499,G.loss:22.4673--Adv.Loss:0.6737\n",
            "Epoch: 3/200-Batch: 0/18--D.loss:0.0484,G.loss:25.7166--Adv.Loss:1.2199\n",
            "Epoch: 3/200-Batch: 1/18--D.loss:0.0454,G.loss:24.9632--Adv.Loss:0.7299\n",
            "Epoch: 3/200-Batch: 2/18--D.loss:0.0413,G.loss:24.2505--Adv.Loss:1.2356\n",
            "Epoch: 3/200-Batch: 3/18--D.loss:0.0470,G.loss:24.6680--Adv.Loss:0.7010\n",
            "Epoch: 3/200-Batch: 4/18--D.loss:0.0435,G.loss:26.2379--Adv.Loss:1.1266\n",
            "Epoch: 3/200-Batch: 5/18--D.loss:0.0343,G.loss:24.1910--Adv.Loss:0.8576\n",
            "Epoch: 3/200-Batch: 6/18--D.loss:0.0355,G.loss:24.5233--Adv.Loss:0.9753\n",
            "Epoch: 3/200-Batch: 7/18--D.loss:0.0343,G.loss:25.7527--Adv.Loss:1.0192\n",
            "Epoch: 3/200-Batch: 8/18--D.loss:0.0319,G.loss:25.7935--Adv.Loss:0.9909\n",
            "Epoch: 3/200-Batch: 9/18--D.loss:0.0304,G.loss:24.4250--Adv.Loss:0.9861\n",
            "Epoch: 3/200-Batch: 10/18--D.loss:0.0274,G.loss:24.9105--Adv.Loss:0.9310\n",
            "Epoch: 3/200-Batch: 11/18--D.loss:0.0257,G.loss:25.8365--Adv.Loss:1.0532\n",
            "Epoch: 3/200-Batch: 12/18--D.loss:0.0316,G.loss:26.1291--Adv.Loss:0.8198\n",
            "Epoch: 3/200-Batch: 13/18--D.loss:0.0337,G.loss:24.8096--Adv.Loss:1.1879\n",
            "Epoch: 3/200-Batch: 14/18--D.loss:0.0397,G.loss:24.6657--Adv.Loss:0.7159\n",
            "Epoch: 3/200-Batch: 15/18--D.loss:0.0502,G.loss:25.6199--Adv.Loss:1.3366\n",
            "Epoch: 3/200-Batch: 16/18--D.loss:0.0923,G.loss:24.9341--Adv.Loss:0.4800\n",
            "Epoch: 3/200-Batch: 17/18--D.loss:0.1539,G.loss:25.1303--Adv.Loss:1.8165\n",
            "Epoch: 4/200-Batch: 0/18--D.loss:0.1932,G.loss:23.1393--Adv.Loss:0.3460\n",
            "Epoch: 4/200-Batch: 1/18--D.loss:0.0863,G.loss:26.0065--Adv.Loss:1.4875\n",
            "Epoch: 4/200-Batch: 2/18--D.loss:0.0376,G.loss:24.9932--Adv.Loss:0.8128\n",
            "Epoch: 4/200-Batch: 3/18--D.loss:0.0400,G.loss:24.7563--Adv.Loss:0.7427\n",
            "Epoch: 4/200-Batch: 4/18--D.loss:0.0548,G.loss:24.0621--Adv.Loss:1.3359\n",
            "Epoch: 4/200-Batch: 5/18--D.loss:0.0497,G.loss:24.2910--Adv.Loss:0.6509\n",
            "Epoch: 4/200-Batch: 6/18--D.loss:0.0299,G.loss:23.9816--Adv.Loss:1.1143\n",
            "Epoch: 4/200-Batch: 7/18--D.loss:0.0220,G.loss:24.0239--Adv.Loss:0.9532\n",
            "Epoch: 4/200-Batch: 8/18--D.loss:0.0255,G.loss:24.0634--Adv.Loss:0.9502\n",
            "Epoch: 4/200-Batch: 9/18--D.loss:0.0239,G.loss:25.3449--Adv.Loss:0.9464\n",
            "Epoch: 4/200-Batch: 10/18--D.loss:0.0227,G.loss:25.0381--Adv.Loss:1.0319\n",
            "Epoch: 4/200-Batch: 11/18--D.loss:0.0229,G.loss:23.4089--Adv.Loss:0.8963\n",
            "Epoch: 4/200-Batch: 12/18--D.loss:0.0211,G.loss:25.2915--Adv.Loss:1.0362\n",
            "Epoch: 4/200-Batch: 13/18--D.loss:0.0218,G.loss:23.9723--Adv.Loss:0.9926\n",
            "Epoch: 4/200-Batch: 14/18--D.loss:0.0214,G.loss:24.4967--Adv.Loss:0.9075\n",
            "Epoch: 4/200-Batch: 15/18--D.loss:0.0224,G.loss:23.9892--Adv.Loss:1.1039\n",
            "Epoch: 4/200-Batch: 16/18--D.loss:0.0248,G.loss:24.4028--Adv.Loss:0.7987\n",
            "Epoch: 4/200-Batch: 17/18--D.loss:0.0402,G.loss:24.2875--Adv.Loss:1.2989\n",
            "Epoch: 5/200-Batch: 0/18--D.loss:0.0650,G.loss:23.0371--Adv.Loss:0.5369\n",
            "Epoch: 5/200-Batch: 1/18--D.loss:0.0738,G.loss:24.8492--Adv.Loss:1.5047\n",
            "Epoch: 5/200-Batch: 2/18--D.loss:0.0708,G.loss:23.8107--Adv.Loss:0.5538\n",
            "Epoch: 5/200-Batch: 3/18--D.loss:0.0401,G.loss:25.8459--Adv.Loss:1.2256\n",
            "Epoch: 5/200-Batch: 4/18--D.loss:0.0256,G.loss:23.7521--Adv.Loss:0.9345\n",
            "Epoch: 5/200-Batch: 5/18--D.loss:0.0300,G.loss:23.6810--Adv.Loss:0.8525\n",
            "Epoch: 5/200-Batch: 6/18--D.loss:0.0297,G.loss:25.0292--Adv.Loss:1.1217\n",
            "Epoch: 5/200-Batch: 7/18--D.loss:0.0236,G.loss:24.3914--Adv.Loss:0.8980\n",
            "Epoch: 5/200-Batch: 8/18--D.loss:0.0187,G.loss:23.9776--Adv.Loss:1.0595\n",
            "Epoch: 5/200-Batch: 9/18--D.loss:0.0194,G.loss:23.5810--Adv.Loss:0.9226\n",
            "Epoch: 5/200-Batch: 10/18--D.loss:0.0206,G.loss:24.0112--Adv.Loss:1.0195\n",
            "Epoch: 5/200-Batch: 11/18--D.loss:0.0198,G.loss:23.8968--Adv.Loss:0.9024\n",
            "Epoch: 5/200-Batch: 12/18--D.loss:0.0207,G.loss:24.1051--Adv.Loss:1.0504\n",
            "Epoch: 5/200-Batch: 13/18--D.loss:0.0182,G.loss:24.3014--Adv.Loss:0.9707\n",
            "Epoch: 5/200-Batch: 14/18--D.loss:0.0183,G.loss:23.7549--Adv.Loss:0.8980\n",
            "Epoch: 5/200-Batch: 15/18--D.loss:0.0210,G.loss:23.5614--Adv.Loss:1.1513\n",
            "Epoch: 5/200-Batch: 16/18--D.loss:0.0290,G.loss:22.4569--Adv.Loss:0.7667\n",
            "Epoch: 5/200-Batch: 17/18--D.loss:0.0297,G.loss:26.5525--Adv.Loss:1.2500\n",
            "Epoch: 6/200-Batch: 0/18--D.loss:0.0297,G.loss:24.1139--Adv.Loss:0.7633\n",
            "Epoch: 6/200-Batch: 1/18--D.loss:0.0258,G.loss:23.7579--Adv.Loss:1.1565\n",
            "Epoch: 6/200-Batch: 2/18--D.loss:0.0248,G.loss:23.5776--Adv.Loss:0.7893\n",
            "Epoch: 6/200-Batch: 3/18--D.loss:0.0214,G.loss:23.8736--Adv.Loss:1.1212\n",
            "Epoch: 6/200-Batch: 4/18--D.loss:0.0196,G.loss:23.7415--Adv.Loss:0.8714\n",
            "Epoch: 6/200-Batch: 5/18--D.loss:0.0178,G.loss:23.5203--Adv.Loss:1.0110\n",
            "Epoch: 6/200-Batch: 6/18--D.loss:0.0169,G.loss:22.7773--Adv.Loss:0.9720\n",
            "Epoch: 6/200-Batch: 7/18--D.loss:0.0180,G.loss:22.6101--Adv.Loss:0.9539\n",
            "Epoch: 6/200-Batch: 8/18--D.loss:0.0169,G.loss:22.3710--Adv.Loss:1.0445\n",
            "Epoch: 6/200-Batch: 9/18--D.loss:0.0187,G.loss:23.4264--Adv.Loss:0.8892\n",
            "Epoch: 6/200-Batch: 10/18--D.loss:0.0208,G.loss:23.5688--Adv.Loss:1.0752\n",
            "Epoch: 6/200-Batch: 11/18--D.loss:0.0216,G.loss:22.7211--Adv.Loss:0.8800\n",
            "Epoch: 6/200-Batch: 12/18--D.loss:0.0217,G.loss:23.2624--Adv.Loss:1.1052\n",
            "Epoch: 6/200-Batch: 13/18--D.loss:0.0252,G.loss:23.1558--Adv.Loss:0.8242\n",
            "Epoch: 6/200-Batch: 14/18--D.loss:0.0313,G.loss:22.1414--Adv.Loss:1.2083\n",
            "Epoch: 6/200-Batch: 15/18--D.loss:0.0409,G.loss:22.3079--Adv.Loss:0.6745\n",
            "Epoch: 6/200-Batch: 16/18--D.loss:0.0552,G.loss:24.0730--Adv.Loss:1.4403\n",
            "Epoch: 6/200-Batch: 17/18--D.loss:0.0773,G.loss:20.4404--Adv.Loss:0.5148\n",
            "Epoch: 7/200-Batch: 0/18--D.loss:0.0966,G.loss:24.3719--Adv.Loss:1.5688\n",
            "Epoch: 7/200-Batch: 1/18--D.loss:0.0958,G.loss:22.4419--Adv.Loss:0.4816\n",
            "Epoch: 7/200-Batch: 2/18--D.loss:0.0667,G.loss:22.6539--Adv.Loss:1.3862\n",
            "Epoch: 7/200-Batch: 3/18--D.loss:0.0402,G.loss:22.7968--Adv.Loss:0.6793\n",
            "Epoch: 7/200-Batch: 4/18--D.loss:0.0220,G.loss:23.1378--Adv.Loss:1.0658\n",
            "Epoch: 7/200-Batch: 5/18--D.loss:0.0158,G.loss:22.9886--Adv.Loss:0.9869\n",
            "Epoch: 7/200-Batch: 6/18--D.loss:0.0164,G.loss:22.8277--Adv.Loss:0.8991\n",
            "Epoch: 7/200-Batch: 7/18--D.loss:0.0174,G.loss:22.1286--Adv.Loss:1.0590\n",
            "Epoch: 7/200-Batch: 8/18--D.loss:0.0156,G.loss:21.9497--Adv.Loss:0.8761\n",
            "Epoch: 7/200-Batch: 9/18--D.loss:0.0182,G.loss:23.3181--Adv.Loss:0.9757\n",
            "Epoch: 7/200-Batch: 10/18--D.loss:0.0140,G.loss:24.0373--Adv.Loss:1.0374\n",
            "Epoch: 7/200-Batch: 11/18--D.loss:0.0145,G.loss:22.4244--Adv.Loss:0.9472\n",
            "Epoch: 7/200-Batch: 12/18--D.loss:0.0131,G.loss:21.6470--Adv.Loss:0.9740\n",
            "Epoch: 7/200-Batch: 13/18--D.loss:0.0143,G.loss:22.9937--Adv.Loss:0.9625\n",
            "Epoch: 7/200-Batch: 14/18--D.loss:0.0157,G.loss:22.9399--Adv.Loss:0.9736\n",
            "Epoch: 7/200-Batch: 15/18--D.loss:0.0132,G.loss:22.2382--Adv.Loss:0.9701\n",
            "Epoch: 7/200-Batch: 16/18--D.loss:0.0148,G.loss:23.2447--Adv.Loss:0.9340\n",
            "Epoch: 7/200-Batch: 17/18--D.loss:0.0175,G.loss:23.2251--Adv.Loss:1.1535\n",
            "Epoch: 8/200-Batch: 0/18--D.loss:0.0301,G.loss:23.3814--Adv.Loss:0.7284\n",
            "Epoch: 8/200-Batch: 1/18--D.loss:0.0402,G.loss:24.4135--Adv.Loss:1.3588\n",
            "Epoch: 8/200-Batch: 2/18--D.loss:0.0454,G.loss:21.2799--Adv.Loss:0.6556\n",
            "Epoch: 8/200-Batch: 3/18--D.loss:0.0386,G.loss:22.0610--Adv.Loss:1.2761\n",
            "Epoch: 8/200-Batch: 4/18--D.loss:0.0386,G.loss:21.1014--Adv.Loss:0.6625\n",
            "Epoch: 8/200-Batch: 5/18--D.loss:0.0381,G.loss:23.6647--Adv.Loss:1.3184\n",
            "Epoch: 8/200-Batch: 6/18--D.loss:0.0306,G.loss:21.2848--Adv.Loss:0.7268\n",
            "Epoch: 8/200-Batch: 7/18--D.loss:0.0216,G.loss:22.8861--Adv.Loss:1.1062\n",
            "Epoch: 8/200-Batch: 8/18--D.loss:0.0181,G.loss:21.3296--Adv.Loss:0.8801\n",
            "Epoch: 8/200-Batch: 9/18--D.loss:0.0168,G.loss:22.2638--Adv.Loss:1.0263\n",
            "Epoch: 8/200-Batch: 10/18--D.loss:0.0142,G.loss:20.8746--Adv.Loss:0.9695\n",
            "Epoch: 8/200-Batch: 11/18--D.loss:0.0166,G.loss:23.1689--Adv.Loss:0.8880\n",
            "Epoch: 8/200-Batch: 12/18--D.loss:0.0204,G.loss:22.5358--Adv.Loss:1.1858\n",
            "Epoch: 8/200-Batch: 13/18--D.loss:0.0277,G.loss:21.4733--Adv.Loss:0.7269\n",
            "Epoch: 8/200-Batch: 14/18--D.loss:0.0321,G.loss:22.3297--Adv.Loss:1.2749\n",
            "Epoch: 8/200-Batch: 15/18--D.loss:0.0410,G.loss:22.1004--Adv.Loss:0.6446\n",
            "Epoch: 8/200-Batch: 16/18--D.loss:0.0536,G.loss:22.9138--Adv.Loss:1.4065\n",
            "Epoch: 8/200-Batch: 17/18--D.loss:0.0846,G.loss:23.2531--Adv.Loss:0.4523\n",
            "Epoch: 9/200-Batch: 0/18--D.loss:0.1635,G.loss:23.0225--Adv.Loss:1.7106\n",
            "Epoch: 9/200-Batch: 1/18--D.loss:0.2047,G.loss:21.4365--Adv.Loss:0.2484\n",
            "Epoch: 9/200-Batch: 2/18--D.loss:0.1148,G.loss:22.6841--Adv.Loss:1.4884\n",
            "Epoch: 9/200-Batch: 3/18--D.loss:0.0392,G.loss:21.6061--Adv.Loss:0.6678\n",
            "Epoch: 9/200-Batch: 4/18--D.loss:0.0220,G.loss:21.7551--Adv.Loss:0.8718\n",
            "Epoch: 9/200-Batch: 5/18--D.loss:0.0203,G.loss:21.2799--Adv.Loss:1.0634\n",
            "Epoch: 9/200-Batch: 6/18--D.loss:0.0159,G.loss:21.1949--Adv.Loss:0.8597\n",
            "Epoch: 9/200-Batch: 7/18--D.loss:0.0137,G.loss:22.8708--Adv.Loss:0.9667\n",
            "Epoch: 9/200-Batch: 8/18--D.loss:0.0127,G.loss:22.1363--Adv.Loss:1.0967\n",
            "Epoch: 9/200-Batch: 9/18--D.loss:0.0134,G.loss:21.5846--Adv.Loss:0.9202\n",
            "Epoch: 9/200-Batch: 10/18--D.loss:0.0123,G.loss:22.0697--Adv.Loss:1.0532\n",
            "Epoch: 9/200-Batch: 11/18--D.loss:0.0122,G.loss:21.2308--Adv.Loss:0.9205\n",
            "Epoch: 9/200-Batch: 12/18--D.loss:0.0104,G.loss:21.7871--Adv.Loss:1.0130\n",
            "Epoch: 9/200-Batch: 13/18--D.loss:0.0110,G.loss:22.4109--Adv.Loss:1.0041\n",
            "Epoch: 9/200-Batch: 14/18--D.loss:0.0111,G.loss:22.6769--Adv.Loss:0.9165\n",
            "Epoch: 9/200-Batch: 15/18--D.loss:0.0112,G.loss:21.8667--Adv.Loss:1.0294\n",
            "Epoch: 9/200-Batch: 16/18--D.loss:0.0104,G.loss:23.2794--Adv.Loss:0.9474\n",
            "Epoch: 9/200-Batch: 17/18--D.loss:0.0164,G.loss:19.2043--Adv.Loss:1.1163\n",
            "Epoch: 10/200-Batch: 0/18--D.loss:0.0354,G.loss:24.4528--Adv.Loss:0.6514\n",
            "Epoch: 10/200-Batch: 1/18--D.loss:0.0469,G.loss:21.9076--Adv.Loss:1.3714\n",
            "Epoch: 10/200-Batch: 2/18--D.loss:0.0244,G.loss:21.7880--Adv.Loss:0.7939\n",
            "Epoch: 10/200-Batch: 3/18--D.loss:0.0152,G.loss:22.7107--Adv.Loss:0.9477\n",
            "Epoch: 10/200-Batch: 4/18--D.loss:0.0202,G.loss:21.3628--Adv.Loss:1.1592\n",
            "Epoch: 10/200-Batch: 5/18--D.loss:0.0130,G.loss:21.8758--Adv.Loss:0.9092\n",
            "Epoch: 10/200-Batch: 6/18--D.loss:0.0092,G.loss:21.2156--Adv.Loss:0.9843\n",
            "Epoch: 10/200-Batch: 7/18--D.loss:0.0116,G.loss:23.0952--Adv.Loss:1.0430\n",
            "Epoch: 10/200-Batch: 8/18--D.loss:0.0106,G.loss:22.0971--Adv.Loss:0.9372\n",
            "Epoch: 10/200-Batch: 9/18--D.loss:0.0096,G.loss:22.2522--Adv.Loss:0.9447\n",
            "Epoch: 10/200-Batch: 10/18--D.loss:0.0123,G.loss:21.6484--Adv.Loss:1.0534\n",
            "Epoch: 10/200-Batch: 11/18--D.loss:0.0111,G.loss:21.1326--Adv.Loss:0.8895\n",
            "Epoch: 10/200-Batch: 12/18--D.loss:0.0114,G.loss:21.2885--Adv.Loss:1.0598\n",
            "Epoch: 10/200-Batch: 13/18--D.loss:0.0107,G.loss:22.3333--Adv.Loss:0.9220\n",
            "Epoch: 10/200-Batch: 14/18--D.loss:0.0102,G.loss:22.3811--Adv.Loss:1.0252\n",
            "Epoch: 10/200-Batch: 15/18--D.loss:0.0090,G.loss:22.7434--Adv.Loss:0.9639\n",
            "Epoch: 10/200-Batch: 16/18--D.loss:0.0100,G.loss:21.7908--Adv.Loss:1.0333\n",
            "Epoch: 10/200-Batch: 17/18--D.loss:0.0090,G.loss:21.5552--Adv.Loss:0.9740\n",
            "Epoch: 11/200-Batch: 0/18--D.loss:0.0095,G.loss:21.4723--Adv.Loss:0.9872\n",
            "Epoch: 11/200-Batch: 1/18--D.loss:0.0093,G.loss:22.3856--Adv.Loss:0.9736\n",
            "Epoch: 11/200-Batch: 2/18--D.loss:0.0096,G.loss:20.2629--Adv.Loss:1.0169\n",
            "Epoch: 11/200-Batch: 3/18--D.loss:0.0105,G.loss:22.5678--Adv.Loss:0.8916\n",
            "Epoch: 11/200-Batch: 4/18--D.loss:0.0104,G.loss:23.0761--Adv.Loss:1.0818\n",
            "Epoch: 11/200-Batch: 5/18--D.loss:0.0086,G.loss:21.1473--Adv.Loss:0.9591\n",
            "Epoch: 11/200-Batch: 6/18--D.loss:0.0102,G.loss:22.7651--Adv.Loss:0.9496\n",
            "Epoch: 11/200-Batch: 7/18--D.loss:0.0106,G.loss:20.6381--Adv.Loss:1.0476\n",
            "Epoch: 11/200-Batch: 8/18--D.loss:0.0099,G.loss:22.3478--Adv.Loss:0.9245\n",
            "Epoch: 11/200-Batch: 9/18--D.loss:0.0099,G.loss:22.1092--Adv.Loss:1.0202\n",
            "Epoch: 11/200-Batch: 10/18--D.loss:0.0092,G.loss:21.1776--Adv.Loss:0.9856\n",
            "Epoch: 11/200-Batch: 11/18--D.loss:0.0110,G.loss:21.7594--Adv.Loss:0.9133\n",
            "Epoch: 11/200-Batch: 12/18--D.loss:0.0115,G.loss:21.1260--Adv.Loss:1.0661\n",
            "Epoch: 11/200-Batch: 13/18--D.loss:0.0097,G.loss:21.5392--Adv.Loss:0.9429\n",
            "Epoch: 11/200-Batch: 14/18--D.loss:0.0097,G.loss:19.9571--Adv.Loss:0.9813\n",
            "Epoch: 11/200-Batch: 15/18--D.loss:0.0108,G.loss:19.5616--Adv.Loss:1.0289\n",
            "Epoch: 11/200-Batch: 16/18--D.loss:0.0131,G.loss:21.3749--Adv.Loss:0.9591\n",
            "Epoch: 11/200-Batch: 17/18--D.loss:0.0152,G.loss:24.7748--Adv.Loss:1.0244\n",
            "Epoch: 12/200-Batch: 0/18--D.loss:0.0162,G.loss:22.4486--Adv.Loss:1.0480\n",
            "Epoch: 12/200-Batch: 1/18--D.loss:0.0150,G.loss:20.8452--Adv.Loss:0.9200\n",
            "Epoch: 12/200-Batch: 2/18--D.loss:0.0141,G.loss:21.1617--Adv.Loss:1.0198\n",
            "Epoch: 12/200-Batch: 3/18--D.loss:0.0111,G.loss:20.9867--Adv.Loss:0.9273\n",
            "Epoch: 12/200-Batch: 4/18--D.loss:0.0126,G.loss:21.9991--Adv.Loss:1.0664\n",
            "Epoch: 12/200-Batch: 5/18--D.loss:0.0159,G.loss:21.5292--Adv.Loss:0.8703\n",
            "Epoch: 12/200-Batch: 6/18--D.loss:0.0178,G.loss:21.4865--Adv.Loss:1.1407\n",
            "Epoch: 12/200-Batch: 7/18--D.loss:0.0173,G.loss:21.8086--Adv.Loss:0.8847\n",
            "Epoch: 12/200-Batch: 8/18--D.loss:0.0150,G.loss:20.9834--Adv.Loss:1.0649\n",
            "Epoch: 12/200-Batch: 9/18--D.loss:0.0113,G.loss:21.6403--Adv.Loss:0.9114\n",
            "Epoch: 12/200-Batch: 10/18--D.loss:0.0105,G.loss:20.4888--Adv.Loss:1.0611\n",
            "Epoch: 12/200-Batch: 11/18--D.loss:0.0092,G.loss:20.5323--Adv.Loss:0.9371\n",
            "Epoch: 12/200-Batch: 12/18--D.loss:0.0074,G.loss:21.8450--Adv.Loss:0.9796\n",
            "Epoch: 12/200-Batch: 13/18--D.loss:0.0099,G.loss:21.0010--Adv.Loss:1.0252\n",
            "Epoch: 12/200-Batch: 14/18--D.loss:0.0110,G.loss:21.8177--Adv.Loss:0.8785\n",
            "Epoch: 12/200-Batch: 15/18--D.loss:0.0116,G.loss:21.7113--Adv.Loss:1.1126\n",
            "Epoch: 12/200-Batch: 16/18--D.loss:0.0104,G.loss:20.2746--Adv.Loss:0.8946\n",
            "Epoch: 12/200-Batch: 17/18--D.loss:0.0075,G.loss:20.4945--Adv.Loss:1.0306\n",
            "Epoch: 13/200-Batch: 0/18--D.loss:0.0126,G.loss:22.0330--Adv.Loss:0.9604\n",
            "Epoch: 13/200-Batch: 1/18--D.loss:0.0115,G.loss:21.1548--Adv.Loss:1.0770\n",
            "Epoch: 13/200-Batch: 2/18--D.loss:0.0179,G.loss:22.1922--Adv.Loss:0.7820\n",
            "Epoch: 13/200-Batch: 3/18--D.loss:0.0313,G.loss:21.0722--Adv.Loss:1.2237\n",
            "Epoch: 13/200-Batch: 4/18--D.loss:0.0500,G.loss:21.2797--Adv.Loss:0.5916\n",
            "Epoch: 13/200-Batch: 5/18--D.loss:0.0824,G.loss:22.3150--Adv.Loss:1.5570\n",
            "Epoch: 13/200-Batch: 6/18--D.loss:0.0668,G.loss:20.8851--Adv.Loss:0.5424\n",
            "Epoch: 13/200-Batch: 7/18--D.loss:0.0246,G.loss:22.5517--Adv.Loss:1.2058\n",
            "Epoch: 13/200-Batch: 8/18--D.loss:0.0113,G.loss:21.9769--Adv.Loss:1.0006\n",
            "Epoch: 13/200-Batch: 9/18--D.loss:0.0142,G.loss:21.4594--Adv.Loss:0.8346\n",
            "Epoch: 13/200-Batch: 10/18--D.loss:0.0098,G.loss:21.2553--Adv.Loss:1.0663\n",
            "Epoch: 13/200-Batch: 11/18--D.loss:0.0080,G.loss:19.9543--Adv.Loss:0.9685\n",
            "Epoch: 13/200-Batch: 12/18--D.loss:0.0071,G.loss:20.4960--Adv.Loss:1.0136\n",
            "Epoch: 13/200-Batch: 13/18--D.loss:0.0066,G.loss:20.1877--Adv.Loss:0.9995\n",
            "Epoch: 13/200-Batch: 14/18--D.loss:0.0065,G.loss:20.4476--Adv.Loss:0.9696\n",
            "Epoch: 13/200-Batch: 15/18--D.loss:0.0067,G.loss:21.3074--Adv.Loss:1.0414\n",
            "Epoch: 13/200-Batch: 16/18--D.loss:0.0066,G.loss:21.7353--Adv.Loss:0.9453\n",
            "Epoch: 13/200-Batch: 17/18--D.loss:0.0066,G.loss:19.7743--Adv.Loss:1.0448\n",
            "Epoch: 14/200-Batch: 0/18--D.loss:0.0084,G.loss:20.2956--Adv.Loss:0.9481\n",
            "Epoch: 14/200-Batch: 1/18--D.loss:0.0071,G.loss:21.9876--Adv.Loss:1.0120\n",
            "Epoch: 14/200-Batch: 2/18--D.loss:0.0064,G.loss:20.7172--Adv.Loss:1.0082\n",
            "Epoch: 14/200-Batch: 3/18--D.loss:0.0066,G.loss:19.9294--Adv.Loss:0.9485\n",
            "Epoch: 14/200-Batch: 4/18--D.loss:0.0061,G.loss:20.8687--Adv.Loss:1.0185\n",
            "Epoch: 14/200-Batch: 5/18--D.loss:0.0065,G.loss:20.3773--Adv.Loss:1.0056\n",
            "Epoch: 14/200-Batch: 6/18--D.loss:0.0062,G.loss:20.9539--Adv.Loss:0.9559\n",
            "Epoch: 14/200-Batch: 7/18--D.loss:0.0062,G.loss:22.1127--Adv.Loss:1.0178\n",
            "Epoch: 14/200-Batch: 8/18--D.loss:0.0057,G.loss:20.8425--Adv.Loss:1.0192\n",
            "Epoch: 14/200-Batch: 9/18--D.loss:0.0059,G.loss:21.2863--Adv.Loss:0.9562\n",
            "Epoch: 14/200-Batch: 10/18--D.loss:0.0065,G.loss:21.3428--Adv.Loss:1.0248\n",
            "Epoch: 14/200-Batch: 11/18--D.loss:0.0059,G.loss:21.4920--Adv.Loss:0.9516\n",
            "Epoch: 14/200-Batch: 12/18--D.loss:0.0061,G.loss:21.8505--Adv.Loss:1.0257\n",
            "Epoch: 14/200-Batch: 13/18--D.loss:0.0051,G.loss:20.6264--Adv.Loss:0.9910\n",
            "Epoch: 14/200-Batch: 14/18--D.loss:0.0058,G.loss:20.3430--Adv.Loss:0.9917\n",
            "Epoch: 14/200-Batch: 15/18--D.loss:0.0057,G.loss:19.6266--Adv.Loss:0.9833\n",
            "Epoch: 14/200-Batch: 16/18--D.loss:0.0068,G.loss:21.6506--Adv.Loss:0.9556\n",
            "Epoch: 14/200-Batch: 17/18--D.loss:0.0058,G.loss:20.8767--Adv.Loss:1.0339\n",
            "Epoch: 15/200-Batch: 0/18--D.loss:0.0089,G.loss:21.4111--Adv.Loss:0.9217\n",
            "Epoch: 15/200-Batch: 1/18--D.loss:0.0079,G.loss:20.4248--Adv.Loss:1.0624\n",
            "Epoch: 15/200-Batch: 2/18--D.loss:0.0074,G.loss:21.2814--Adv.Loss:0.9822\n",
            "Epoch: 15/200-Batch: 3/18--D.loss:0.0077,G.loss:21.1422--Adv.Loss:0.9856\n",
            "Epoch: 15/200-Batch: 4/18--D.loss:0.0074,G.loss:20.6467--Adv.Loss:1.0165\n",
            "Epoch: 15/200-Batch: 5/18--D.loss:0.0076,G.loss:21.7255--Adv.Loss:1.0019\n",
            "Epoch: 15/200-Batch: 6/18--D.loss:0.0067,G.loss:21.7218--Adv.Loss:0.9844\n",
            "Epoch: 15/200-Batch: 7/18--D.loss:0.0058,G.loss:19.9333--Adv.Loss:0.9881\n",
            "Epoch: 15/200-Batch: 8/18--D.loss:0.0052,G.loss:21.2905--Adv.Loss:0.9956\n",
            "Epoch: 15/200-Batch: 9/18--D.loss:0.0049,G.loss:20.7572--Adv.Loss:1.0059\n",
            "Epoch: 15/200-Batch: 10/18--D.loss:0.0052,G.loss:20.4024--Adv.Loss:1.0018\n",
            "Epoch: 15/200-Batch: 11/18--D.loss:0.0060,G.loss:21.5160--Adv.Loss:0.9896\n",
            "Epoch: 15/200-Batch: 12/18--D.loss:0.0055,G.loss:20.7997--Adv.Loss:0.9742\n",
            "Epoch: 15/200-Batch: 13/18--D.loss:0.0062,G.loss:20.9995--Adv.Loss:0.9764\n",
            "Epoch: 15/200-Batch: 14/18--D.loss:0.0066,G.loss:20.7207--Adv.Loss:1.0309\n",
            "Epoch: 15/200-Batch: 15/18--D.loss:0.0068,G.loss:19.8347--Adv.Loss:0.9644\n",
            "Epoch: 15/200-Batch: 16/18--D.loss:0.0064,G.loss:19.8305--Adv.Loss:0.9981\n",
            "Epoch: 15/200-Batch: 17/18--D.loss:0.0071,G.loss:18.3664--Adv.Loss:0.9640\n",
            "Epoch: 16/200-Batch: 0/18--D.loss:0.0086,G.loss:21.0834--Adv.Loss:1.0530\n",
            "Epoch: 16/200-Batch: 1/18--D.loss:0.0087,G.loss:20.2892--Adv.Loss:0.9627\n",
            "Epoch: 16/200-Batch: 2/18--D.loss:0.0086,G.loss:21.0533--Adv.Loss:1.0220\n",
            "Epoch: 16/200-Batch: 3/18--D.loss:0.0089,G.loss:20.9082--Adv.Loss:0.9765\n",
            "Epoch: 16/200-Batch: 4/18--D.loss:0.0084,G.loss:21.5648--Adv.Loss:1.0159\n",
            "Epoch: 16/200-Batch: 5/18--D.loss:0.0080,G.loss:21.0601--Adv.Loss:0.9781\n",
            "Epoch: 16/200-Batch: 6/18--D.loss:0.0071,G.loss:19.9857--Adv.Loss:1.0030\n",
            "Epoch: 16/200-Batch: 7/18--D.loss:0.0066,G.loss:20.9557--Adv.Loss:0.9523\n",
            "Epoch: 16/200-Batch: 8/18--D.loss:0.0065,G.loss:20.7313--Adv.Loss:1.0494\n"
          ]
        },
        {
          "ename": "KeyboardInterrupt",
          "evalue": "",
          "output_type": "error",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-18-3ceec9520300>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     62\u001b[0m         \u001b[0;31m# Progress Report\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m         \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'Epoch: {epoch}/{N_EPOCHS}-Batch: {i}/{len(train_dataloader)}--D.loss:{d_loss.item():.4f},G.loss:{g_loss.item():.4f}--Adv.Loss:{adv_loss.item():.4f}'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     64\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     65\u001b[0m         \u001b[0;31m# If at sample interval save image\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ],
      "source": [
        "for epoch in range(0, N_EPOCHS):\n",
        "    for i, batch in enumerate(train_dataloader):\n",
        "\n",
        "        # prepare inputs\n",
        "        real_A = Variable(batch[\"B\"].type(Tensor))\n",
        "        real_B = Variable(batch[\"A\"].type(Tensor))\n",
        "\n",
        "        # ground truth\n",
        "        valid = Variable(Tensor(np.ones((real_A.size(0), *patch_gan_shape))), requires_grad=False)\n",
        "        fake = Variable(Tensor(np.zeros((real_A.size(0), *patch_gan_shape))), requires_grad=False)\n",
        "\n",
        "        #  Train Generator\n",
        "        optimizer_G.zero_grad()\n",
        "\n",
        "        # generator loss\n",
        "        fake_B = generator(real_A)\n",
        "        pred_fake = discriminator(fake_B, real_A)\n",
        "        adv_loss = adversarial_loss(pred_fake, valid)\n",
        "        loss_pixel = pixelwise_loss(fake_B, real_B)\n",
        "\n",
        "        # Overall Generator loss\n",
        "        g_loss = adv_loss + weight_pixel_wise_identity * loss_pixel\n",
        "\n",
        "        g_loss.backward()\n",
        "\n",
        "        optimizer_G.step()\n",
        "\n",
        "        #  Train Discriminator\n",
        "        optimizer_D.zero_grad()\n",
        "\n",
        "        pred_real = discriminator(real_B, real_A)\n",
        "        loss_real = adversarial_loss(pred_real, valid)\n",
        "        pred_fake = discriminator(fake_B.detach(), real_A)\n",
        "        loss_fake = adversarial_loss(pred_fake, fake)\n",
        "\n",
        "        # Overall Discriminator loss\n",
        "        d_loss = 0.5 * (loss_real + loss_fake)\n",
        "\n",
        "        d_loss.backward()\n",
        "        optimizer_D.step()\n",
        "\n",
        "        # Progress Report\n",
        "        batches_done = epoch * len(train_dataloader) + i\n",
        "        print(f'Epoch: {epoch}/{N_EPOCHS}-Batch: {i}/{len(train_dataloader)}--D.loss:{d_loss.item():.4f},G.loss:{g_loss.item():.4f}--Adv.Loss:{adv_loss.item():.4f}')\n",
        "\n",
        "        # generate samples\n",
        "        if batches_done % SAMPLE_INTERVAL == 0:\n",
        "            sample_images(batches_done)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "sIzQFLMrajAw"
      },
      "outputs": [],
      "source": []
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
