{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c0e1b490-5d47-4e57-bb5c-213d46205d1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os, math, sys\n",
    "import glob, itertools\n",
    "import argparse, random\n",
    "import torchvision\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.autograd import Variable\n",
    "from torchvision.models import vgg19\n",
    "import torchvision.transforms as transforms\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "from torchvision.utils import save_image, make_grid\n",
    "\n",
    "import plotly\n",
    "import plotly.express as px\n",
    "\n",
    "import plotly.graph_objects as go\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from PIL import Image\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "random.seed(42)\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4f6d3fbc-e143-488f-b90a-136484f2aef1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# load pretrained models\n",
    "load_pretrained_models = True\n",
    "# number of epochs of training\n",
    "n_epochs = 2\n",
    "# name of the dataset\n",
    "dataset_path = \"celeba_hq_256\"\n",
    "# size of the batches\n",
    "batch_size = 16\n",
    "# adam: learning rate\n",
    "lr = 0.00001\n",
    "# adam: decay of first order momentum of gradient\n",
    "b1 = 0.9\n",
    "# adam: decay of second order momentum of gradient\n",
    "b2 = 0.999\n",
    "# epoch from which to start lr decay\n",
    "decay_epoch = 100\n",
    "# number of cpu threads to use during batch generation\n",
    "n_cpu = 8\n",
    "# high res. image height\n",
    "hr_height = 256\n",
    "# high res. image width\n",
    "hr_width = 256\n",
    "# number of image channels\n",
    "channels = 3\n",
    "load_pretrained_models=False\n",
    "os.makedirs(\"image\", exist_ok=True)\n",
    "os.makedirs(\"saved_model\", exist_ok=True)\n",
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "hr_shape = (hr_height, hr_width)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fd05d7ea-9628-42c9-b679-e6a8e84e8411",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Normalization parameters for pre-trained PyTorch models\n",
    "\n",
    "mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)\n",
    "std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)\n",
    "\n",
    "class ImageDataset(Dataset):\n",
    "    def __init__(self, files, hr_shape):\n",
    "        hr_height, hr_width = hr_shape\n",
    "        # Transforms for low resolution images and high resolution images\n",
    "        self.lr_transform = transforms.Compose(\n",
    "            [\n",
    "                transforms.Resize((hr_height // 4, hr_height // 4), Image.BICUBIC),\n",
    "                transforms.ToTensor(),\n",
    "                transforms.Normalize(mean, std),\n",
    "            ]\n",
    "        )\n",
    "        \n",
    "        self.hr_transform = transforms.Compose(\n",
    "            [\n",
    "                transforms.Resize((hr_height, hr_height), Image.BICUBIC),\n",
    "                transforms.ToTensor(),\n",
    "                transforms.Normalize(mean, std),\n",
    "            ]\n",
    "        )\n",
    "        self.files = files\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        img = Image.open(self.files[index % len(self.files)]).convert('RGB')\n",
    "        img_lr = self.lr_transform(img)\n",
    "        img_hr = self.hr_transform(img)\n",
    "\n",
    "\n",
    "        return {\"lr\": img_lr, \"hr\": img_hr}\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.files)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "98f7f86d-be98-4334-8eb2-d1382be5426b",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_paths, test_paths = train_test_split(sorted(glob.glob(dataset_path + \"/*.*\")), test_size=0.02, random_state=42)\n",
    "train_paths=train_paths[0:800]\n",
    "train_paths=train_paths[200:400]\n",
    "train_dataloader = DataLoader(ImageDataset(train_paths, hr_shape=hr_shape), batch_size=batch_size, shuffle=True)\n",
    "test_dataloader = DataLoader(ImageDataset(test_paths, hr_shape=hr_shape), batch_size=int(batch_size*0.75), shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a72e00c6-048d-4a42-b3e7-7baedb1c3ef8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# class VGGFeatureExtractor(nn.Module):\n",
    "#     def __init__(self, feature_layer=34, use_bn=False, use_input_norm=True,\n",
    "#                  device=torch.device('cuda')):\n",
    "#         super(VGGFeatureExtractor, self).__init__()\n",
    "#         self.use_input_norm = use_input_norm\n",
    "#         if use_bn:\n",
    "#             model = torchvision.models.vgg19_bn(pretrained=True)\n",
    "#         else:\n",
    "#             model = torchvision.models.vgg19(pretrained=False)\n",
    "#         if self.use_input_norm:\n",
    "#             mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n",
    "#             # [0.485 - 1, 0.456 - 1, 0.406 - 1] if input in range [-1, 1]\n",
    "#             std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n",
    "#             # [0.229 * 2, 0.224 * 2, 0.225 * 2] if input in range [-1, 1]\n",
    "#             self.register_buffer('mean', mean) # 构建处理步骤的名字\n",
    "#             self.register_buffer('std', std)\n",
    "#         self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])\n",
    "#         # No need to BP to variable\n",
    "#         for k, v in self.features.named_parameters():\n",
    "#             v.requires_grad = False # 为了防止梯度进行更新\n",
    "\n",
    "#     def forward(self, x):\n",
    "#         # Assume input range is [0, 1]\n",
    "#         if self.use_input_norm:\n",
    "#             x = (x - self.mean) / self.std\n",
    "#         output = self.features(x)\n",
    "#         return output\n",
    "class VGGFeatureExtractor(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(VGGFeatureExtractor, self).__init__()\n",
    "        vgg19_model = vgg19(pretrained=True)\n",
    "        self.feature_extractor = nn.Sequential(*list(vgg19_model.features.children())[:18])\n",
    "\n",
    "    def forward(self, img):\n",
    "        return self.feature_extractor(img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ef326f9d-8d20-4ff5-a70c-6e99c8281914",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch  import  nn as nn\n",
    "#dim_in:输入中每个token的维度，也就是输入x的最后一个维度\n",
    "#d_model:single-head-attention情况下q、k、v总的向量长度\n",
    "#num_heads:head个数\n",
    "class MultiHead_SelfAttention(nn.Module):\n",
    "    def __init__(self,input_dim,num_heads):\n",
    "        super().__init__()\n",
    "        self.num_heads=num_heads\n",
    "        self.head_dim=input_dim//num_heads#head的维度为输入维度除以head个数，方便后面拼接\n",
    "        assert input_dim%num_heads==0 ,\"Input dimension must be divisible by the number of heads.\"\n",
    "\n",
    "        # Linear layers for the query, key, and value projections for each head\n",
    "        self.query=nn.Linear(input_dim,input_dim)\n",
    "        self.key=nn.Linear(input_dim,input_dim)\n",
    "        self.value=nn.Linear(input_dim,input_dim)\n",
    "\n",
    "        self.output_linear=nn.Linear(input_dim,input_dim)\n",
    "\n",
    "    def forward(self,x):\n",
    "        batch_size,cn,seq_len,input_dim=x.size()\n",
    "        #输入数据shape=[batch_size,token个数，token长度]\n",
    "        #将输出向量经过矩阵乘法后拆分为多头\n",
    "        query=self.query(x).view(batch_size,cn,seq_len,self.num_heads,self.head_dim)\n",
    "        #输入数据shape=[batch_size,token个数，head数，head维度]\n",
    "        key=self.query(x).view(batch_size,cn ,seq_len, self.num_heads, self.head_dim)\n",
    "        value=self.query(x).view(batch_size,cn ,seq_len, self.num_heads, self.head_dim)\n",
    "        #对调序列的长度和head个数(batch_size, seq_len, num_heads, head_dim) to (batch_size, num_heads, seq_len, head_dim)\n",
    "        #方便后续矩阵乘法和不同头部的注意力计算\n",
    "        query=query.transpose(2,3)#(batch_size, num_heads, seq_len, head_dim)\n",
    "        key=key.transpose(2,3)\n",
    "        value=value.transpose(2,3)\n",
    "        #计算注意力分数和权重matmul:最后两个维度做矩阵乘法\n",
    "        attention_scores=torch.matmul(query,key.transpose(-2,-1))/torch.sqrt(torch.tensor(self.head_dim,dtype=torch.float))\n",
    "        #query:(batch_size, num_heads, seq_len, head_dim) * key(batch_size,num_heads,head_dim,seq_len)\n",
    "        #attention_scores:(batch_size, num_heads, seq_len, seq_len)\n",
    "        attention_weights=torch.softmax(attention_scores,dim=-1)\n",
    "        #注意力加权求和\n",
    "        attention=torch.matmul(attention_weights,value)\n",
    "        # attention_scores:(batch_size, num_heads, seq_len, seq_len)* value(batch_size, num_heads, seq_len, head_dim)\n",
    "        #attention:(batch_size, num_heads, seq_len, head_dim)\n",
    "        #连接和线性变换\n",
    "        attention=attention.transpose(2,3).contiguous().view(batch_size,cn,seq_len,input_dim)#contiguos深拷贝，不改变原数据\n",
    "        #(batch_size,num_heads, seq_len , head_dim) to (batch_size, seq_len, num_heads, head_dim) to (batch_size,seq_len,input_dim)\n",
    "        output=self.output_linear(attention)\n",
    "\n",
    "        return output\n",
    "\n",
    "#定义多头自注意力机制模型\n",
    "class MultiHead_SelfAttention_Classifier(nn.Module):\n",
    "    def __init__(self,input_dim,num_heads):\n",
    "        super().__init__()\n",
    "        self.attention=MultiHead_SelfAttention(input_dim,num_heads)\n",
    "        # self.fc1=nn.Linear(input_dim,hidden_dim)\n",
    "        # self.fc2=nn.Linear(hidden_dim,num_classes)\n",
    "        self.relu=nn.ELU()\n",
    "    def forward(self,x):\n",
    "        x=self.attention(x)\n",
    "        # x=x.mean(dim=1)#(batch_size, seq_len, input_dim) to (batch_size, input_dim)\n",
    "        # x=self.fc1(x)\n",
    "        x=self.relu(x)\n",
    "        # x=self.fc2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "90a270d1-62d2-4f73-a3cf-99f39eda455e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# import torch\n",
    "# from torch  import  nn as nn\n",
    "# #dim_in:输入中每个token的维度，也就是输入x的最后一个维度\n",
    "# #d_model:single-head-attention情况下q、k、v总的向量长度\n",
    "# #num_heads:head个数\n",
    "# class MultiHead_SelfAttention(nn.Module):\n",
    "#     def __init__(self,input_dim,num_heads):\n",
    "#         super().__init__()\n",
    "#         self.num_heads=num_heads\n",
    "#         self.head_dim=input_dim//num_heads#head的维度为输入维度除以head个数，方便后面拼接\n",
    "#         assert input_dim%num_heads==0 ,\"Input dimension must be divisible by the number of heads.\"\n",
    "\n",
    "#         # Linear layers for the query, key, and value projections for each head\n",
    "#         self.query=nn.Linear(input_dim,input_dim)\n",
    "#         self.key=nn.Linear(input_dim,input_dim)\n",
    "#         self.value=nn.Linear(input_dim,input_dim)\n",
    "\n",
    "#         self.output_linear=nn.Linear(input_dim,input_dim)\n",
    "\n",
    "#     def forward(self,x):\n",
    "#         print(x.size())\n",
    "#         batch_size,seq_len,input_dim=x.size()\n",
    "#         #输入数据shape=[batch_size,token个数，token长度]\n",
    "#         #将输出向量经过矩阵乘法后拆分为多头\n",
    "#         query=self.query(x).view(batch_size,seq_len,self.num_heads,self.head_dim)\n",
    "#         #输入数据shape=[batch_size,token个数，head数，head维度]\n",
    "#         key=self.query(x).view(batch_size, seq_len, self.num_heads, self.head_dim)\n",
    "#         value=self.query(x).view(batch_size, seq_len, self.num_heads, self.head_dim)\n",
    "#         #对调序列的长度和head个数(batch_size, seq_len, num_heads, head_dim) to (batch_size, num_heads, seq_len, head_dim)\n",
    "#         #方便后续矩阵乘法和不同头部的注意力计算\n",
    "#         query=query.transpose(1,2)#(batch_size, num_heads, seq_len, head_dim)\n",
    "#         key=key.transpose(1,2)\n",
    "#         value=value.transpose(1,2)\n",
    "#         #计算注意力分数和权重matmul:最后两个维度做矩阵乘法\n",
    "#         attention_scores=torch.matmul(query,key.transpose(-2,-1))/torch.sqrt(torch.tensor(self.head_dim,dtype=torch.float))\n",
    "#         #query:(batch_size, num_heads, seq_len, head_dim) * key(batch_size,num_heads,head_dim,seq_len)\n",
    "#         #attention_scores:(batch_size, num_heads, seq_len, seq_len)\n",
    "#         attention_weights=torch.softmax(attention_scores,dim=-1)\n",
    "#         #注意力加权求和\n",
    "#         attention=torch.matmul(attention_weights,value)\n",
    "#         # attention_scores:(batch_size, num_heads, seq_len, seq_len)* value(batch_size, num_heads, seq_len, head_dim)\n",
    "#         #attention:(batch_size, num_heads, seq_len, head_dim)\n",
    "#         #连接和线性变换\n",
    "#         attention=attention.transpose(1,2).contiguous().view(batch_size,seq_len,input_dim)#contiguos深拷贝，不改变原数据\n",
    "#         #(batch_size,num_heads, seq_len , head_dim) to (batch_size, seq_len, num_heads, head_dim) to (batch_size,seq_len,input_dim)\n",
    "#         output=self.output_linear(attention)\n",
    "\n",
    "#         return output\n",
    "\n",
    "# #定义多头自注意力机制模型\n",
    "# class MultiHead_SelfAttention_Classifier(nn.Module):\n",
    "#     def __init__(self,input_dim,num_heads,hidden_dim):\n",
    "#         super().__init__()\n",
    "#         self.attention=MultiHead_SelfAttention(input_dim,num_heads)\n",
    "#         # self.fc1=nn.Linear(input_dim,hidden_dim)\n",
    "#         # self.fc2=nn.Linear(hidden_dim,num_classes)\n",
    "#         self.relu=nn.ELU()\n",
    "#     def forward(self,x):\n",
    "#         x=self.attention(x)\n",
    "#         # x=x.mean(dim=1)#(batch_size, seq_len, input_dim) to (batch_size, input_dim)\n",
    "#         # x=self.fc1(x)\n",
    "#         x=self.relu(x)\n",
    "#         # x=self.fc2(x)\n",
    "#         # return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "06c9600b-3885-4b74-81ca-4b01bfe24fc1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import functools\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class ResidualDenseBlock_5C(nn.Module):\n",
    "    def __init__(self, nf=64, gc=32, bias=True):\n",
    "        super(ResidualDenseBlock_5C, self).__init__()\n",
    "        # gc: growth channel, i.e. intermediate channels\n",
    "        # self.conv0 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)\n",
    "        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)\n",
    "        # self.mul=MultiHead_SelfAttention_Classifier(64,num_heads=8)\n",
    "        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)\n",
    "        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)\n",
    "        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)\n",
    "        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)\n",
    "        self.bn = nn.BatchNorm2d(nf)\n",
    "        self.lrelu = nn.PReLU()\n",
    "\n",
    "        # initialization\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x0=self.mul(self.lrelu(self.conv0(x)))\n",
    "        # xm=x+x0\n",
    "        x1 = self.lrelu(self.conv1(x))\n",
    "        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))\n",
    "        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))\n",
    "        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))\n",
    "        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))\n",
    "        x5=self.bn(x5)\n",
    "        return x5 * 0.2+ x\n",
    "\n",
    "\n",
    "class RRDB(nn.Module):\n",
    "    '''Residual in Residual Dense Block'''\n",
    "\n",
    "    def __init__(self, nf, gc=32):\n",
    "        super(RRDB, self).__init__()\n",
    "        self.RDB1 = ResidualDenseBlock_5C(nf, gc)\n",
    "        self.RDB2 = ResidualDenseBlock_5C(nf, gc)\n",
    "        self.RDB3 = ResidualDenseBlock_5C(nf, gc)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.RDB1(x)\n",
    "        out = self.RDB2(out)\n",
    "        out = self.RDB3(out)\n",
    "        return out * 0.2 + x\n",
    "\n",
    "\n",
    "class RRDBNet(nn.Module):\n",
    "    def __init__(self,RRDB, in_nc, out_nc, nf, nb, gc=32):\n",
    "        super(RRDBNet, self).__init__()\n",
    "        self.conv_first1 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) # 进行第一次的卷积\n",
    "        self.pr=nn.PReLU()\n",
    "        self.mul=MultiHead_SelfAttention_Classifier(64,num_heads=8)\n",
    "        self.conv_first2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) # 进行第一次的卷积\n",
    "        \n",
    "        self.RRDB_trunk = self.make_layer(RRDB,nf,nf, nb) # 添加RRDB残差块\n",
    "        \n",
    "        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) # 再次进行卷积操作\n",
    "        #### upsampling\n",
    "        self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n",
    "        self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n",
    "        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n",
    "        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n",
    "\n",
    "        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x0 = self.conv_first1(x)\n",
    "        x1=self.pr(x0)\n",
    "        x1=self.mul(x1)\n",
    "        # fea = self.conv_first2(x1)\n",
    "        fea=x0+x1\n",
    "        trunk = self.trunk_conv(self.RRDB_trunk(fea))\n",
    "        fea = fea + trunk\n",
    "        fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest')))\n",
    "        fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest')))\n",
    "        out = self.conv_last(self.lrelu(self.HRconv(fea)))\n",
    "        return out\n",
    "\n",
    "\n",
    "    \n",
    "    def make_layer(self,block,in_nc,out_nc,nb):\n",
    "        layers=[]\n",
    "        for _ in range(nb):\n",
    "            layers.append(block(in_nc,gc=32))\n",
    "        return nn.Sequential(*layers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4f7ceafe-57f7-4170-a6f0-ed8b11f666de",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Discriminator(nn.Module):\n",
    "    def __init__(self, input_shape):\n",
    "        super(Discriminator, self).__init__()\n",
    "        self.input_shape = input_shape\n",
    "        in_channels, in_height, in_width = self.input_shape\n",
    "        patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)\n",
    "        self.output_shape = (1, patch_h, patch_w)\n",
    "\n",
    "        def discriminator_block(in_filters, out_filters, first_block=False):\n",
    "            layers = []\n",
    "            layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))\n",
    "            if not first_block:\n",
    "                layers.append(nn.BatchNorm2d(out_filters))\n",
    "            layers.append(nn.PReLU())\n",
    "            layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=3, stride=2, padding=1))\n",
    "            layers.append(nn.BatchNorm2d(out_filters))\n",
    "            layers.append(nn.PReLU())\n",
    "            return layers\n",
    "\n",
    "        layers = []\n",
    "        in_filters = in_channels\n",
    "        for i, out_filters in enumerate([64, 128, 256, 512]):\n",
    "            layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))\n",
    "            in_filters = out_filters\n",
    "\n",
    "        layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))\n",
    "\n",
    "        self.model = nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, img):\n",
    "        return self.model(img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "f36b8439-5c21-4995-bb22-f063c5453b9e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Downloading: \"https://download.pytorch.org/models/vgg19-dcbb9e9d.pth\" to C:\\Users\\dell/.cache\\torch\\hub\\checkpoints\\vgg19-dcbb9e9d.pth\n",
      "100%|███████████████████████████████████████████████████████████████████████████████| 548M/548M [02:47<00:00, 3.42MB/s]\n"
     ]
    }
   ],
   "source": [
    "# Initialize generator and discriminator\n",
    "generator = RRDBNet(RRDB,3,3,64,23,32)\n",
    "discriminator = Discriminator(input_shape=(channels, *hr_shape))\n",
    "feature_extractor = VGGFeatureExtractor()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "f037e2df-1f1f-44f0-921c-c7524531fb91",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Losses\n",
    "criterion_GAN = torch.nn.MSELoss()\n",
    "criterion_GAN1=torch.nn.BCEWithLogitsLoss(reduction='mean')\n",
    "criterion_content = torch.nn.L1Loss()\n",
    "if load_pretrained_models:\n",
    "    generator=torch.load(\"saved_model/generator.pth\")\n",
    "    discriminator=torch.load(\"saved_model/discriminator.pth\")\n",
    "feature_extractor = VGGFeatureExtractor()\n",
    "feature_extractor.eval()\n",
    "if cuda:\n",
    "    generator = generator.cuda()\n",
    "    discriminator = discriminator.cuda()\n",
    "    feature_extractor = feature_extractor.cuda()\n",
    "    criterion_GAN = criterion_GAN.cuda()\n",
    "    criterion_GAN1 = criterion_GAN1.cuda()\n",
    "    criterion_content = criterion_content.cuda()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b4d3c06-9f9e-4d2e-b809-a7724caba433",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Optimizers\n",
    "optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))\n",
    "optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))\n",
    "\n",
    "Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b0bb08e5-902b-4b89-bb77-b73924c66a92",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_gen_losses, train_disc_losses, train_counter = [], [], []\n",
    "test_gen_losses, test_disc_losses = [], []\n",
    "test_counter = [idx * len(train_dataloader.dataset) for idx in range(1, n_epochs + 1)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c7b0ca69-67c8-4a54-96aa-527f074dbe7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "    for epoch in range(50):\n",
    "        ### Training\n",
    "        gen_loss, disc_loss = 0, 0\n",
    "        tqdm_bar = tqdm(train_dataloader, desc=f'Training Epoch {epoch} ', total=int(len(train_dataloader)))\n",
    "        for batch_idx, imgs in enumerate(tqdm_bar):\n",
    "            generator.train();\n",
    "            discriminator.train()\n",
    "            # Configure model input\n",
    "            imgs_lr = Variable(imgs[\"lr\"].type(Tensor))\n",
    "            imgs_hr = Variable(imgs[\"hr\"].type(Tensor))\n",
    "          \n",
    "            # Adversarial ground truths\n",
    "            valid = Variable(Tensor(np.ones((imgs_lr.size(0), *discriminator.output_shape))), requires_grad=False)\n",
    "            fake = Variable(Tensor(np.zeros((imgs_lr.size(0), *discriminator.output_shape))), requires_grad=False)\n",
    "\n",
    "            ### Train Generator\n",
    "            optimizer_G.zero_grad()\n",
    "            # Generate a high resolution image from low resolution input\n",
    "            gen_hr = generator(imgs_lr)\n",
    "    \n",
    "            # Adversarial loss\n",
    "   \n",
    "            loss_GAN = criterion_GAN(discriminator(gen_hr), valid)\n",
    "            loss_GAN1 = criterion_GAN1(discriminator(gen_hr), valid)\n",
    "            # Content loss\n",
    "            gen_features = feature_extractor(gen_hr)\n",
    "            real_features = feature_extractor(imgs_hr)\n",
    "            loss_content = criterion_content(gen_features, real_features.detach())\n",
    "            # Total loss\n",
    "            loss_G = loss_content + 1e-2 * loss_GAN+1e-2*loss_GAN1\n",
    "         \n",
    "            loss_G.backward()\n",
    "            optimizer_G.step()\n",
    "\n",
    "            ### Train Discriminator\n",
    "            optimizer_D.zero_grad()\n",
    "            # Loss of real and fake images\n",
    "            loss_real = criterion_GAN(discriminator(imgs_hr), valid)\n",
    "            loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)\n",
    "            # Total loss\n",
    "            loss_D = (loss_real + loss_fake) / 2\n",
    "            loss_D.backward()\n",
    "            optimizer_D.step()\n",
    "\n",
    "            gen_loss += loss_G.item()\n",
    "            train_gen_losses.append(loss_G.item())\n",
    "            disc_loss += loss_D.item()\n",
    "            train_disc_losses.append(loss_D.item())\n",
    "            train_counter.append(batch_idx * batch_size + imgs_lr.size(0) + epoch * len(train_dataloader.dataset))\n",
    "            tqdm_bar.set_postfix(gen_loss=gen_loss / (batch_idx + 1), disc_loss=disc_loss / (batch_idx + 1))\n",
    "        # Testing\n",
    "        gen_loss, disc_loss = 0, 0\n",
    "        tqdm_bar = tqdm(test_dataloader, desc=f'Testing Epoch {epoch} ', total=int(len(test_dataloader)))\n",
    "        for batch_idx, imgs in enumerate(tqdm_bar):\n",
    "            generator.eval();\n",
    "            discriminator.eval()\n",
    "            # Configure model input\n",
    "            imgs_lr = Variable(imgs[\"lr\"].type(Tensor))\n",
    "            imgs_hr = Variable(imgs[\"hr\"].type(Tensor))\n",
    "            # Adversarial ground truths\n",
    "\n",
    "\n",
    "            valid = Variable(Tensor(np.ones((imgs_lr.size(0), *discriminator.output_shape))), requires_grad=False)\n",
    "            fake = Variable(Tensor(np.zeros((imgs_lr.size(0), *discriminator.output_shape))), requires_grad=False)\n",
    "  \n",
    "            ### Eval Generator\n",
    "            # Generate a high resolution image from low resolution input\n",
    "            gen_hr = generator(imgs_lr)\n",
    "            # Adversarial loss\n",
    "     \n",
    "            loss_GAN = criterion_GAN(discriminator(gen_hr), valid)\n",
    "            loss_GAN1 = criterion_GAN1(discriminator(gen_hr), valid)\n",
    "            # Content loss\n",
    "    \n",
    "            gen_features = feature_extractor(gen_hr)\n",
    "            real_features = feature_extractor(imgs_hr)\n",
    "\n",
    "            loss_content = criterion_content(gen_features, real_features.detach())\n",
    "            # Total loss\n",
    "\n",
    "            loss_G = loss_content + 1e-2 * loss_GAN+1e-2*loss_GAN1\n",
    "\n",
    "            ### Eval Discriminator\n",
    "            # Loss of real and fake images\n",
    "            loss_real = criterion_GAN(discriminator(imgs_hr), valid)\n",
    "            loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)\n",
    "            # Total loss\n",
    "            loss_D = (loss_real + loss_fake) / 2\n",
    "\n",
    "            gen_loss += loss_G.item()\n",
    "            disc_loss += loss_D.item()\n",
    "            tqdm_bar.set_postfix(gen_loss=gen_loss / (batch_idx + 1), disc_loss=disc_loss / (batch_idx + 1))\n",
    "\n",
    "            # Save image grid with upsampled inputs and SRGAN outputs\n",
    "            if random.uniform(0, 1) < 0.1:\n",
    "                imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)\n",
    "                imgs_hr = make_grid(imgs_hr, nrow=1, normalize=True)\n",
    "                gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\n",
    "                imgs_lr = make_grid(imgs_lr, nrow=1, normalize=True)\n",
    "                img_grid = torch.cat((imgs_hr, imgs_lr, gen_hr), -1)\n",
    "                save_image(img_grid, f\"image/{batch_idx}.png\", normalize=False)\n",
    "\n",
    "        test_gen_losses.append(gen_loss / len(test_dataloader))\n",
    "        test_disc_losses.append(disc_loss / len(test_dataloader))\n",
    "\n",
    "        # Save model checkpoints\n",
    "        if np.argmin(test_gen_losses) == len(test_gen_losses) - 1:\n",
    "            torch.save(generator, \"saved_model/generator.pth\")\n",
    "            torch.save(discriminator, \"saved_model/discriminator.pth\")\n",
    "\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c84563a5-1f7e-4f80-9891-a2d8932a2787",
   "metadata": {},
   "outputs": [],
   "source": [
    "fig = go.Figure()\n",
    "fig.add_trace(go.Scatter(x=train_counter, y=train_gen_losses, mode='lines', name='Train Generator Loss'))\n",
    "fig.add_trace(go.Scatter(x=test_counter, y=test_gen_losses, marker_symbol='star-diamond', \n",
    "                         marker_color='orange', marker_line_width=1, marker_size=9, mode='markers', name='Test Generator Loss'))\n",
    "fig.update_layout(\n",
    "    width=1000,\n",
    "    height=500,\n",
    "    title=\"Train vs. Test Generator Loss\",\n",
    "    xaxis_title=\"Number of training examples seen\",\n",
    "    yaxis_title=\"Adversarial + Content Loss\"),\n",
    "fig.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d42751fc-f8a5-41f7-897a-0d43ec61a6c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "fig = go.Figure()\n",
    "fig.add_trace(go.Scatter(x=train_counter, y=train_disc_losses, mode='lines', name='Train Discriminator Loss'))\n",
    "fig.add_trace(go.Scatter(x=test_counter, y=test_disc_losses, marker_symbol='star-diamond', \n",
    "                         marker_color='orange', marker_line_width=1, marker_size=9, mode='markers', name='Test Discriminator Loss'))\n",
    "fig.update_layout(\n",
    "    width=1000,\n",
    "    height=500,\n",
    "    title=\"Train vs. Test Discriminator Loss\",\n",
    "    xaxis_title=\"Number of training examples seen\",\n",
    "    yaxis_title=\"Adversarial Loss\"),\n",
    "fig.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d1a0bf10-da4f-4807-9fef-c988bb64672e",
   "metadata": {},
   "outputs": [],
   "source": [
    "!nvidia-smi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "956a5648-472a-4de0-b4cf-6af09b61d3b8",
   "metadata": {},
   "outputs": [],
   "source": [
    "os.makedirs(\"test\", exist_ok=True)\n",
    "test_model=torch.load(\"saved_model/generator.pth\")\n",
    "img=Image.open(\"test/111.png\").convert(\"RGB\")\n",
    "trans=transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()])\n",
    "img=trans(img)\n",
    "img=torch.reshape(img,(1,3,256,256))\n",
    "img=img.cuda()\n",
    "gen_hr = test_model(img)\n",
    "save_image(gen_hr, \"test/test.png\", normalize=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4421ce2a-d9ee-4d57-aeea-8aa50690b5d4",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
