{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "vgg16 = torchvision.models.vgg16(pretrained = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "VGG(\n",
       "  (features): Sequential(\n",
       "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (1): ReLU(inplace=True)\n",
       "    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (3): ReLU(inplace=True)\n",
       "    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (6): ReLU(inplace=True)\n",
       "    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (8): ReLU(inplace=True)\n",
       "    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (11): ReLU(inplace=True)\n",
       "    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (13): ReLU(inplace=True)\n",
       "    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (15): ReLU(inplace=True)\n",
       "    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (18): ReLU(inplace=True)\n",
       "    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (20): ReLU(inplace=True)\n",
       "    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (22): ReLU(inplace=True)\n",
       "    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (25): ReLU(inplace=True)\n",
       "    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (27): ReLU(inplace=True)\n",
       "    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (29): ReLU(inplace=True)\n",
       "    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  )\n",
       "  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))\n",
       "  (classifier): Sequential(\n",
       "    (0): Linear(in_features=25088, out_features=4096, bias=True)\n",
       "    (1): ReLU(inplace=True)\n",
       "    (2): Dropout(p=0.5, inplace=False)\n",
       "    (3): Linear(in_features=4096, out_features=4096, bias=True)\n",
       "    (4): ReLU(inplace=True)\n",
       "    (5): Dropout(p=0.5, inplace=False)\n",
       "    (6): Linear(in_features=4096, out_features=1000, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vgg16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp = 1\n",
    "def get_features_hook(self, input, output):\n",
    "    global tmp\n",
    "    tmp = output.data\n",
    "\n",
    "handle=vgg16.features[4].register_forward_hook(get_features_hook)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [],
   "source": [
    "randomTensor = torch.rand(size = (1, 3, 64, 64))\n",
    "output = vgg16(randomTensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "inner_tensor = 1\n",
    "\n",
    "def get_inner_tensor_for_style_loss(model, num):\n",
    "    \n",
    "    def get_features_hook(self, input, output):\n",
    "        global inner_tensor\n",
    "        inner_tensor = output.data.cpu().numpy()\n",
    "    handle = model.features[num].register_forward_hook(get_features_hook)\n",
    "    randomTensor = torch.ones(size = (1, 3, 64, 64))\n",
    "    output = vgg16(randomTensor)\n",
    "    return inner_tensor\n",
    "\n",
    "tmeptemp = get_inner_tensor_for_style_loss(vgg16, 5)\n",
    "tmeptemp2 = get_inner_tensor_for_style_loss(vgg16, 23)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Sequential(\n",
       "  (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (1): ReLU(inplace=True)\n",
       "  (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (3): ReLU(inplace=True)\n",
       "  (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (6): ReLU(inplace=True)\n",
       "  (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (8): ReLU(inplace=True)\n",
       "  (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (11): ReLU(inplace=True)\n",
       "  (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (13): ReLU(inplace=True)\n",
       "  (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (15): ReLU(inplace=True)\n",
       ")"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vgg_new = nn.Sequential(*list(vgg16.children())[0][:16])\n",
    "vgg_new"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "styleTransferNet(\n",
       "  (downSample): Sequential(\n",
       "    (0): Conv2d(3, 32, kernel_size=(9, 9), stride=(2, 2), padding=(4, 4), bias=False)\n",
       "    (1): InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (2): ReLU(inplace=True)\n",
       "    (3): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (4): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (5): ReLU(inplace=True)\n",
       "    (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (7): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (8): ReLU(inplace=True)\n",
       "  )\n",
       "  (residualBlocks): Sequential(\n",
       "    (0): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (1): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (4): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (deconvBlocks): Sequential(\n",
       "    (0): deconv(\n",
       "      (deconvBlock): Sequential(\n",
       "        (0): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (1): deconv(\n",
       "      (deconvBlock): Sequential(\n",
       "        (0): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (lastDeconvBlock): Sequential(\n",
       "    (0): ConvTranspose2d(32, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (1): InstanceNorm2d(3, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (2): Tanh()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 76,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import model\n",
    "\n",
    "stnet = model.styleTransferNet()\n",
    "stnet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 3, 256, 256])"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = stnet(torch.ones(size = (1, 3, 256, 256)))\n",
    "a.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of samples:  18432\n",
      "Image Size:  torch.Size([3, 300, 300])\n",
      "1\n"
     ]
    }
   ],
   "source": [
    "import torchvision.datasets as dset\n",
    "import torchvision.transforms as transforms\n",
    "from dataset import CoCoDataset\n",
    "# cap = dset.CocoCaptions(root = r'.\\data\\train\\image',\n",
    "#                         annFile = r'.\\data\\train\\anno\\captions_train2014.json',\n",
    "#                         transform=transforms.ToTensor())\n",
    "\n",
    "# print('Number of samples: ', len(cap))\n",
    "# img, target = cap[20000] # load 4th sample\n",
    "\n",
    "# print(\"Image Size: \", img.size())\n",
    "# print(target)\n",
    "\n",
    "cap = CoCoDataset(data_dir = r'.\\data', transform=transforms.ToTensor())\n",
    "\n",
    "print('Number of samples: ', len(cap))\n",
    "img, target = cap[100] # load 4th sample\n",
    "\n",
    "print(\"Image Size: \", img.size())\n",
    "print(target)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAA4sAAABtCAYAAAAI5vRhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAClElEQVR4nO3dMQrDMBQFwcjk/keO0gaWpArIfGZagxG8alGhtfd+AAAAwKfr9AEAAAC4H7EIAABAiEUAAABCLAIAABBiEQAAgBCLAAAAxPP358u7Gse81v/+Zcdz7DiDHWew4wx2nMGOM9hxhu87ulkEAAAgxCIAAAAhFgEAAAixCAAAQIhFAAAAQiwCAAAQYhEAAIAQiwAAAIRYBAAAIMQiAAAAIRYBAAAIsQgAAECIRQAAAEIsAgAAEGIRAACAEIsAAACEWAQAACDEIgAAACEWAQAACLEIAABAiEUAAABCLAIAABBiEQAAgBCLAAAAhFgEAAAgxCIAAAAhFgEAAAixCAAAQIhFAAAAQiwCAAAQYhEAAIAQiwAAAIRYBAAAIMQiAAAAIRYBAAAIsQgAAECIRQAAAEIsAgAAEGIRAACAEIsAAACEWAQAACDEIgAAACEWAQAACLEIAABAiEUAAABCLAIAABBiEQAAgBCLAAAAhFgEAAAgxCIAAAAhFgEAAAixCAAAQIhFAAAAQiwCAAAQYhEAAIAQiwAAAIRYBAAAIMQiAAAAIRYBAAAIsQgAAECIRQAAAEIsAgAAEGIRAACAEIsAAACEWAQAACDEIgAAACEWAQAACLEIAABAiEUAAABCLAIAABBiEQAAgBCLAAAAhFgEAAAgxCIAAAAhFgEAAAixCAAAQIhFAAAAQiwCAAAQYhEAAIAQiwAAAIRYBAAAIMQiAAAAIRYBAAAIsQgAAECIRQAAAEIsAgAAEGIRAACAEIsAAACEWAQAACDEIgAAACEWAQAACLEIAABAiEUAAABCLAIAABBr7336DAAAANyMm0UAAABCLAIAABBiEQAAgBCLAAAAhFgEAAAgxCIAAADxBpO3FtU8MeizAAAAAElFTkSuQmCC",
      "text/plain": [
       "<Figure size 1152x144 with 8 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]\n",
      "\n",
      " [[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]\n",
      "\n",
      " [[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]\n",
      "\n",
      " ...\n",
      "\n",
      " [[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]\n",
      "\n",
      " [[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]\n",
      "\n",
      " [[255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  ...\n",
      "  [255 255   1]\n",
      "  [255 255   1]\n",
      "  [255 255   1]]]\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "samples = []\n",
    "def plot_reconstruction(samples, nex=8, zm=2):\n",
    "    fig, axs = plt.subplots(ncols=nex, nrows=1, figsize=(zm * nex, zm))\n",
    "    for axi in range(nex):\n",
    "        samples[axi] = samples[axi].squeeze()\n",
    "        samples[axi] = samples[axi].swapaxes(0,1)\n",
    "        samples[axi] = samples[axi].swapaxes(1,2)\n",
    "        axs[axi].imshow(samples[axi])\n",
    "        axs[axi].axis('off')\n",
    "    plt.show()\n",
    "for i in range(10):\n",
    "    tmp = np.ones(shape=(1,3,256,256))\n",
    "    tmp[0][0] = np.ones(shape=(256,256)) * 255\n",
    "    tmp[0][1] = np.ones(shape=(256,256)) * 255\n",
    "    samples.append(tmp.astype('uint8'))\n",
    "\n",
    "plot_reconstruction(samples)\n",
    "print(samples[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[1., 2.],\n",
       "         [2., 2.]],\n",
       "\n",
       "        [[1., 2.],\n",
       "         [2., 2.]]])"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss = nn.MSELoss()\n",
    "input = torch.tensor([[1.0,2.0], [2.0, 2.0]])\n",
    "target = torch.tensor([[0.0,1.0], [0.0,2.0]])\n",
    "output = loss(input, target)\n",
    "input.repeat(2,1,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0000, 1.4142],\n",
       "        [1.4142, 1.4142]])"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = input.sqrt()\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "styleTransferNet(\n",
       "  (downSample): Sequential(\n",
       "    (0): Conv2d(3, 32, kernel_size=(9, 9), stride=(2, 2), padding=(4, 4), bias=False)\n",
       "    (1): InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (2): ReLU(inplace=True)\n",
       "    (3): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (4): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (5): ReLU(inplace=True)\n",
       "    (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (7): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (8): ReLU(inplace=True)\n",
       "  )\n",
       "  (residualBlocks): Sequential(\n",
       "    (0): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (1): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "    (4): residual(\n",
       "      (res): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): ReLU(inplace=True)\n",
       "        (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      )\n",
       "      (RELU): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (deconvBlocks): Sequential(\n",
       "    (0): deconv(\n",
       "      (deconvBlock): Sequential(\n",
       "        (0): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (1): deconv(\n",
       "      (deconvBlock): Sequential(\n",
       "        (0): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (lastDeconvBlock): Sequential(\n",
       "    (0): ConvTranspose2d(32, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "    (1): InstanceNorm2d(3, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "    (2): Tanh()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torchvision.utils as vutils\n",
    "def save_img_results(data_img, fake, epoch, image_dir = \"./output/Image\"):\n",
    "    \"\"\"\n",
    "    保存生成的图片结果，如果是阶段一的结果，那么保存原图和阶段一生成图，如果是阶段二的结果，那么只保存阶段二生成图，\n",
    "    实际上，如果结合trainer代码看，如果是阶段二的结果的话，由于调用了两次此函数，实际上会保存原图，阶段一生成图和阶段二生成图\n",
    "    \"\"\"\n",
    "    print(np.shape(fake))\n",
    "    # num = 256\n",
    "    # fake = fake[0:num]\n",
    "    # data_img is changed to [0,1]\n",
    "    if data_img is not None:\n",
    "        # data_img = data_img[0:num]\n",
    "        vutils.save_image(\n",
    "            data_img, '%s/real_samples.png' % image_dir,\n",
    "            normalize=True)\n",
    "        # fake.data is still [-1, 1]\n",
    "        vutils.save_image(\n",
    "            fake.data, '%s/fake_samples_epoch_%03d.png' %\n",
    "            (image_dir, epoch), normalize=True)\n",
    "    else:\n",
    "        vutils.save_image(\n",
    "            fake.data, '%s/lr_fake_samples_epoch_%03d.png' %\n",
    "            (image_dir, epoch), normalize=True)\n",
    "\n",
    "from model import styleTransferNet\n",
    "\n",
    "stnet = styleTransferNet()\n",
    "stnet.load_state_dict(torch.load(\"./output/Model/vgg16_epoch_0.pth\"))\n",
    "stnet.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 3, 256, 256])\n"
     ]
    }
   ],
   "source": [
    "from PIL import Image\n",
    "loader = torchvision.transforms.Compose([\n",
    "torchvision.transforms.ToTensor()])\n",
    "\n",
    "def image_loader(image_name):\n",
    "    image = Image.open(image_name).convert('RGB')\n",
    "    image = image.resize(size = (256, 256))\n",
    "    image = loader(image).unsqueeze(0)\n",
    "    return image.to(torch.float)\n",
    "image = image_loader(\"./data/test/test.jpg\")\n",
    "output = stnet(image)\n",
    "save_img_results(image, output, 15, image_dir = \"./output/Image\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp = torch.tensor([1.0], requires_grad=True)\n",
    "b = tmp * 127.5\n",
    "cri = nn.MSELoss()\n",
    "target = torch.tensor([12.0])\n",
    "loss = cri(tmp * 12.5, target)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([12.5000])\n"
     ]
    }
   ],
   "source": [
    "loss.backward()\n",
    "print(tmp.grad)"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "494899efd6527d56ea7f55c588d0081523a17dc3a9ff1107f3394ad815ff2527"
  },
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
