{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "af1c903a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import argparse\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torchvision.utils as utils\n",
    "import pytorch_ssim\n",
    "import  time \n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "from torch.nn.modules.loss import _Loss \n",
    "from Trans_unet_Gan import *\n",
    "#from dataset import prepare_data, Dataset\n",
    "from utils import *\n",
    "import cv2\n",
    "import matplotlib.pyplot as plt\n",
    "from utility import plots as plots, ptcolor as ptcolor, ptutils as ptutils, data as data\n",
    "from LAB import *\n",
    "from LCH import *\n",
    "from torchvision.utils import save_image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3f986a75",
   "metadata": {},
   "outputs": [],
   "source": [
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "24fccf77",
   "metadata": {},
   "outputs": [],
   "source": [
    "def split(img):\n",
    "    output=[]\n",
    "    output.append(F.interpolate(img, scale_factor=0.125))\n",
    "    output.append(F.interpolate(img, scale_factor=0.25))\n",
    "    output.append(F.interpolate(img, scale_factor=0.5))\n",
    "    output.append(img)\n",
    "    return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "733afcc5",
   "metadata": {},
   "outputs": [],
   "source": [
    "dtype = 'float32'\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n",
    "torch.set_default_tensor_type(torch.FloatTensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "52453181",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Initialize generator \n",
    "generator = Generator().cuda()\n",
    "generator.load_state_dict(torch.load(\"./saved_models/G/generator_795.pth\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "8314e271",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Generator(\n",
       "  (linear_encoding): Linear(in_features=384, out_features=512, bias=True)\n",
       "  (position_encoding): LearnedPositionalEncoding()\n",
       "  (pe_dropout): Dropout(p=0.0, inplace=False)\n",
       "  (transformer): TransformerModel(\n",
       "    (net): IntermediateSequential(\n",
       "      (0): Residual(\n",
       "        (fn): PreNormDrop(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (dropout): Dropout(p=0.0, inplace=False)\n",
       "          (fn): SelfAttention(\n",
       "            (qkv): Linear(in_features=512, out_features=1536, bias=False)\n",
       "            (attn_drop): Dropout(p=0.0, inplace=False)\n",
       "            (proj): Linear(in_features=512, out_features=512, bias=True)\n",
       "            (proj_drop): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (1): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): FeedForward(\n",
       "            (net): Sequential(\n",
       "              (0): Linear(in_features=512, out_features=256, bias=True)\n",
       "              (1): GELU()\n",
       "              (2): Dropout(p=0.0, inplace=False)\n",
       "              (3): Linear(in_features=256, out_features=512, bias=True)\n",
       "              (4): Dropout(p=0.0, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (2): Residual(\n",
       "        (fn): PreNormDrop(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (dropout): Dropout(p=0.0, inplace=False)\n",
       "          (fn): SelfAttention(\n",
       "            (qkv): Linear(in_features=512, out_features=1536, bias=False)\n",
       "            (attn_drop): Dropout(p=0.0, inplace=False)\n",
       "            (proj): Linear(in_features=512, out_features=512, bias=True)\n",
       "            (proj_drop): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (3): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): FeedForward(\n",
       "            (net): Sequential(\n",
       "              (0): Linear(in_features=512, out_features=256, bias=True)\n",
       "              (1): GELU()\n",
       "              (2): Dropout(p=0.0, inplace=False)\n",
       "              (3): Linear(in_features=256, out_features=512, bias=True)\n",
       "              (4): Dropout(p=0.0, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (4): Residual(\n",
       "        (fn): PreNormDrop(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (dropout): Dropout(p=0.0, inplace=False)\n",
       "          (fn): SelfAttention(\n",
       "            (qkv): Linear(in_features=512, out_features=1536, bias=False)\n",
       "            (attn_drop): Dropout(p=0.0, inplace=False)\n",
       "            (proj): Linear(in_features=512, out_features=512, bias=True)\n",
       "            (proj_drop): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (5): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): FeedForward(\n",
       "            (net): Sequential(\n",
       "              (0): Linear(in_features=512, out_features=256, bias=True)\n",
       "              (1): GELU()\n",
       "              (2): Dropout(p=0.0, inplace=False)\n",
       "              (3): Linear(in_features=256, out_features=512, bias=True)\n",
       "              (4): Dropout(p=0.0, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (6): Residual(\n",
       "        (fn): PreNormDrop(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (dropout): Dropout(p=0.0, inplace=False)\n",
       "          (fn): SelfAttention(\n",
       "            (qkv): Linear(in_features=512, out_features=1536, bias=False)\n",
       "            (attn_drop): Dropout(p=0.0, inplace=False)\n",
       "            (proj): Linear(in_features=512, out_features=512, bias=True)\n",
       "            (proj_drop): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (7): Residual(\n",
       "        (fn): PreNorm(\n",
       "          (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "          (fn): FeedForward(\n",
       "            (net): Sequential(\n",
       "              (0): Linear(in_features=512, out_features=256, bias=True)\n",
       "              (1): GELU()\n",
       "              (2): Dropout(p=0.0, inplace=False)\n",
       "              (3): Linear(in_features=256, out_features=512, bias=True)\n",
       "              (4): Dropout(p=0.0, inplace=False)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (pre_head_ln): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
       "  (Conv_x): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "  (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  (relu): ReLU(inplace=True)\n",
       "  (rgb_to_feature): ModuleList(\n",
       "    (0): from_rgb(\n",
       "      (conv_1): _equalized_conv2d(32, 3, 1, 1)\n",
       "      (pixNorm): PixelwiseNorm()\n",
       "      (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "    )\n",
       "    (1): from_rgb(\n",
       "      (conv_1): _equalized_conv2d(64, 3, 1, 1)\n",
       "      (pixNorm): PixelwiseNorm()\n",
       "      (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "    )\n",
       "    (2): from_rgb(\n",
       "      (conv_1): _equalized_conv2d(128, 3, 1, 1)\n",
       "      (pixNorm): PixelwiseNorm()\n",
       "      (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "    )\n",
       "  )\n",
       "  (feature_to_rgb): ModuleList(\n",
       "    (0): to_rgb(\n",
       "      (conv_1): _equalized_conv2d(3, 32, 1, 1)\n",
       "    )\n",
       "    (1): to_rgb(\n",
       "      (conv_1): _equalized_conv2d(3, 64, 1, 1)\n",
       "    )\n",
       "    (2): to_rgb(\n",
       "      (conv_1): _equalized_conv2d(3, 128, 1, 1)\n",
       "    )\n",
       "    (3): to_rgb(\n",
       "      (conv_1): _equalized_conv2d(3, 256, 1, 1)\n",
       "    )\n",
       "  )\n",
       "  (Maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (Maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (Maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (Maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (Maxpool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  (Conv1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(16, 3, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(16, 16, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(16, 16, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv1_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(32, 16, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv2): conv_block(\n",
       "    (conv_1): _equalized_conv2d(32, 32, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv2_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(64, 32, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv3): conv_block(\n",
       "    (conv_1): _equalized_conv2d(64, 64, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv3_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(128, 64, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv4): conv_block(\n",
       "    (conv_1): _equalized_conv2d(128, 128, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv4_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(256, 128, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv5): conv_block(\n",
       "    (conv_1): _equalized_conv2d(256, 512, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (mtc): ChannelTransformer(\n",
       "    (embeddings_1): Channel_Embeddings(\n",
       "      (patch_embeddings): Conv2d(32, 32, kernel_size=(32, 32), stride=(32, 32))\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "    (embeddings_2): Channel_Embeddings(\n",
       "      (patch_embeddings): Conv2d(64, 64, kernel_size=(16, 16), stride=(16, 16))\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "    (embeddings_3): Channel_Embeddings(\n",
       "      (patch_embeddings): Conv2d(128, 128, kernel_size=(8, 8), stride=(8, 8))\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "    (embeddings_4): Channel_Embeddings(\n",
       "      (patch_embeddings): Conv2d(256, 256, kernel_size=(4, 4), stride=(4, 4))\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "    (encoder): Encoder(\n",
       "      (layer): ModuleList(\n",
       "        (0): Block_ViT(\n",
       "          (attn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm): LayerNorm((480,), eps=1e-06, elementwise_affine=True)\n",
       "          (channel_attn): Attention_org(\n",
       "            (query1): ModuleList(\n",
       "              (0): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (1): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (2): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (3): Linear(in_features=32, out_features=32, bias=False)\n",
       "            )\n",
       "            (query2): ModuleList(\n",
       "              (0): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (1): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (2): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (3): Linear(in_features=64, out_features=64, bias=False)\n",
       "            )\n",
       "            (query3): ModuleList(\n",
       "              (0): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (1): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (2): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            )\n",
       "            (query4): ModuleList(\n",
       "              (0): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (1): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (2): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (3): Linear(in_features=256, out_features=256, bias=False)\n",
       "            )\n",
       "            (key): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (value): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (psi): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "            (softmax): Softmax(dim=3)\n",
       "            (out1): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (out2): Linear(in_features=64, out_features=64, bias=False)\n",
       "            (out3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            (out4): Linear(in_features=256, out_features=256, bias=False)\n",
       "            (attn_dropout): Dropout(p=0.1, inplace=False)\n",
       "            (proj_dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "          (ffn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn1): Mlp(\n",
       "            (fc1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (fc2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn2): Mlp(\n",
       "            (fc1): Linear(in_features=64, out_features=256, bias=True)\n",
       "            (fc2): Linear(in_features=256, out_features=64, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn3): Mlp(\n",
       "            (fc1): Linear(in_features=128, out_features=512, bias=True)\n",
       "            (fc2): Linear(in_features=512, out_features=128, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn4): Mlp(\n",
       "            (fc1): Linear(in_features=256, out_features=1024, bias=True)\n",
       "            (fc2): Linear(in_features=1024, out_features=256, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (1): Block_ViT(\n",
       "          (attn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm): LayerNorm((480,), eps=1e-06, elementwise_affine=True)\n",
       "          (channel_attn): Attention_org(\n",
       "            (query1): ModuleList(\n",
       "              (0): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (1): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (2): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (3): Linear(in_features=32, out_features=32, bias=False)\n",
       "            )\n",
       "            (query2): ModuleList(\n",
       "              (0): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (1): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (2): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (3): Linear(in_features=64, out_features=64, bias=False)\n",
       "            )\n",
       "            (query3): ModuleList(\n",
       "              (0): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (1): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (2): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            )\n",
       "            (query4): ModuleList(\n",
       "              (0): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (1): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (2): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (3): Linear(in_features=256, out_features=256, bias=False)\n",
       "            )\n",
       "            (key): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (value): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (psi): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "            (softmax): Softmax(dim=3)\n",
       "            (out1): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (out2): Linear(in_features=64, out_features=64, bias=False)\n",
       "            (out3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            (out4): Linear(in_features=256, out_features=256, bias=False)\n",
       "            (attn_dropout): Dropout(p=0.1, inplace=False)\n",
       "            (proj_dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "          (ffn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn1): Mlp(\n",
       "            (fc1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (fc2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn2): Mlp(\n",
       "            (fc1): Linear(in_features=64, out_features=256, bias=True)\n",
       "            (fc2): Linear(in_features=256, out_features=64, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn3): Mlp(\n",
       "            (fc1): Linear(in_features=128, out_features=512, bias=True)\n",
       "            (fc2): Linear(in_features=512, out_features=128, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn4): Mlp(\n",
       "            (fc1): Linear(in_features=256, out_features=1024, bias=True)\n",
       "            (fc2): Linear(in_features=1024, out_features=256, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (2): Block_ViT(\n",
       "          (attn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm): LayerNorm((480,), eps=1e-06, elementwise_affine=True)\n",
       "          (channel_attn): Attention_org(\n",
       "            (query1): ModuleList(\n",
       "              (0): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (1): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (2): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (3): Linear(in_features=32, out_features=32, bias=False)\n",
       "            )\n",
       "            (query2): ModuleList(\n",
       "              (0): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (1): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (2): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (3): Linear(in_features=64, out_features=64, bias=False)\n",
       "            )\n",
       "            (query3): ModuleList(\n",
       "              (0): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (1): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (2): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            )\n",
       "            (query4): ModuleList(\n",
       "              (0): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (1): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (2): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (3): Linear(in_features=256, out_features=256, bias=False)\n",
       "            )\n",
       "            (key): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (value): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (psi): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "            (softmax): Softmax(dim=3)\n",
       "            (out1): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (out2): Linear(in_features=64, out_features=64, bias=False)\n",
       "            (out3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            (out4): Linear(in_features=256, out_features=256, bias=False)\n",
       "            (attn_dropout): Dropout(p=0.1, inplace=False)\n",
       "            (proj_dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "          (ffn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn1): Mlp(\n",
       "            (fc1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (fc2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn2): Mlp(\n",
       "            (fc1): Linear(in_features=64, out_features=256, bias=True)\n",
       "            (fc2): Linear(in_features=256, out_features=64, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn3): Mlp(\n",
       "            (fc1): Linear(in_features=128, out_features=512, bias=True)\n",
       "            (fc2): Linear(in_features=512, out_features=128, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn4): Mlp(\n",
       "            (fc1): Linear(in_features=256, out_features=1024, bias=True)\n",
       "            (fc2): Linear(in_features=1024, out_features=256, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (3): Block_ViT(\n",
       "          (attn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn_norm): LayerNorm((480,), eps=1e-06, elementwise_affine=True)\n",
       "          (channel_attn): Attention_org(\n",
       "            (query1): ModuleList(\n",
       "              (0): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (1): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (2): Linear(in_features=32, out_features=32, bias=False)\n",
       "              (3): Linear(in_features=32, out_features=32, bias=False)\n",
       "            )\n",
       "            (query2): ModuleList(\n",
       "              (0): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (1): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (2): Linear(in_features=64, out_features=64, bias=False)\n",
       "              (3): Linear(in_features=64, out_features=64, bias=False)\n",
       "            )\n",
       "            (query3): ModuleList(\n",
       "              (0): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (1): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (2): Linear(in_features=128, out_features=128, bias=False)\n",
       "              (3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            )\n",
       "            (query4): ModuleList(\n",
       "              (0): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (1): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (2): Linear(in_features=256, out_features=256, bias=False)\n",
       "              (3): Linear(in_features=256, out_features=256, bias=False)\n",
       "            )\n",
       "            (key): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (value): ModuleList(\n",
       "              (0): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (1): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (2): Linear(in_features=480, out_features=480, bias=False)\n",
       "              (3): Linear(in_features=480, out_features=480, bias=False)\n",
       "            )\n",
       "            (psi): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)\n",
       "            (softmax): Softmax(dim=3)\n",
       "            (out1): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (out2): Linear(in_features=64, out_features=64, bias=False)\n",
       "            (out3): Linear(in_features=128, out_features=128, bias=False)\n",
       "            (out4): Linear(in_features=256, out_features=256, bias=False)\n",
       "            (attn_dropout): Dropout(p=0.1, inplace=False)\n",
       "            (proj_dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "          (ffn_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "          (ffn1): Mlp(\n",
       "            (fc1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (fc2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn2): Mlp(\n",
       "            (fc1): Linear(in_features=64, out_features=256, bias=True)\n",
       "            (fc2): Linear(in_features=256, out_features=64, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn3): Mlp(\n",
       "            (fc1): Linear(in_features=128, out_features=512, bias=True)\n",
       "            (fc2): Linear(in_features=512, out_features=128, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "          (ffn4): Mlp(\n",
       "            (fc1): Linear(in_features=256, out_features=1024, bias=True)\n",
       "            (fc2): Linear(in_features=1024, out_features=256, bias=True)\n",
       "            (act_fn): GELU()\n",
       "            (dropout): Dropout(p=0.0, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (encoder_norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True)\n",
       "      (encoder_norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)\n",
       "      (encoder_norm3): LayerNorm((128,), eps=1e-06, elementwise_affine=True)\n",
       "      (encoder_norm4): LayerNorm((256,), eps=1e-06, elementwise_affine=True)\n",
       "    )\n",
       "    (reconstruct_1): Reconstruct(\n",
       "      (conv): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (activation): ReLU(inplace=True)\n",
       "    )\n",
       "    (reconstruct_2): Reconstruct(\n",
       "      (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (activation): ReLU(inplace=True)\n",
       "    )\n",
       "    (reconstruct_3): Reconstruct(\n",
       "      (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (activation): ReLU(inplace=True)\n",
       "    )\n",
       "    (reconstruct_4): Reconstruct(\n",
       "      (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (activation): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (Up5): up_conv(\n",
       "    (conv_1): _equalized_conv2d(256, 256, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (coatt5): CCA(\n",
       "    (mlp_x): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=256, out_features=256, bias=True)\n",
       "    )\n",
       "    (mlp_g): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=256, out_features=256, bias=True)\n",
       "    )\n",
       "    (relu): ReLU(inplace=True)\n",
       "  )\n",
       "  (Up_conv5): conv_block(\n",
       "    (conv_1): _equalized_conv2d(256, 512, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up_conv5_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(256, 256, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(256, 256, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up4): up_conv(\n",
       "    (conv_1): _equalized_conv2d(128, 256, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (coatt4): CCA(\n",
       "    (mlp_x): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=128, out_features=128, bias=True)\n",
       "    )\n",
       "    (mlp_g): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=128, out_features=128, bias=True)\n",
       "    )\n",
       "    (relu): ReLU(inplace=True)\n",
       "  )\n",
       "  (Up_conv4): conv_block(\n",
       "    (conv_1): _equalized_conv2d(128, 256, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up_conv4_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(128, 128, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(128, 128, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up3): up_conv(\n",
       "    (conv_1): _equalized_conv2d(64, 128, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (coatt3): CCA(\n",
       "    (mlp_x): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=64, out_features=64, bias=True)\n",
       "    )\n",
       "    (mlp_g): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=64, out_features=64, bias=True)\n",
       "    )\n",
       "    (relu): ReLU(inplace=True)\n",
       "  )\n",
       "  (Up_conv3): conv_block(\n",
       "    (conv_1): _equalized_conv2d(64, 128, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up_conv3_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(64, 64, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(64, 64, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up2): up_conv(\n",
       "    (conv_1): _equalized_conv2d(32, 64, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (coatt2): CCA(\n",
       "    (mlp_x): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=32, out_features=32, bias=True)\n",
       "    )\n",
       "    (mlp_g): Sequential(\n",
       "      (0): Flatten()\n",
       "      (1): Linear(in_features=32, out_features=32, bias=True)\n",
       "    )\n",
       "    (relu): ReLU(inplace=True)\n",
       "  )\n",
       "  (Up_conv2): conv_block(\n",
       "    (conv_1): _equalized_conv2d(32, 64, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Up_conv2_1): conv_block(\n",
       "    (conv_1): _equalized_conv2d(32, 32, 1, 1)\n",
       "    (conv_2): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (conv_3): _equalized_conv2d(32, 32, 3, 3)\n",
       "    (pixNorm): PixelwiseNorm()\n",
       "    (lrelu): LeakyReLU(negative_slope=0.2)\n",
       "  )\n",
       "  (Conv): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1))\n",
       ")"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "generator.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a238c2c1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\plt\\.conda\\envs\\py37\\lib\\site-packages\\torch\\nn\\functional.py:3063: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n",
      "  \"See the documentation of nn.Upsample for details.\".format(mode))\n"
     ]
    }
   ],
   "source": [
    "path='./TestU_90/input/'#要改\n",
    "path_list = os.listdir(path)\n",
    "path_list.sort(key=lambda x:int(x.split('.')[0]))\n",
    "i=1\n",
    "for item in path_list:\n",
    "    impath=path+item\n",
    "    imgx= cv2.imread(path+item)\n",
    "    imgx=cv2.resize(imgx,(256,256))\n",
    "    imgx = cv2.cvtColor(imgx, cv2.COLOR_BGR2RGB)\n",
    "    imgx = np.array(imgx).astype(dtype)\n",
    "\n",
    "    imgx= torch.from_numpy(imgx)\n",
    "    imgx=imgx.permute(2,0,1).unsqueeze(0)\n",
    "    imgx=imgx/255.0\n",
    "    #plt.imshow(imgx[0,:,:,:])\n",
    "    #plt.show()\n",
    "    imgx = Variable(imgx).cuda()\n",
    "    #print(imgx.shape)\n",
    "    output=generator(imgx)\n",
    "    out=output[3].data\n",
    "    save_image(out, \"./output_U90/%d.jpg\" % (i), nrow=5, normalize=True)\n",
    "    i=i+1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3c61990",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8cc41fc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9742ccd4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "800615b8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7c5fff2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e910be20",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60ecdcff",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "652135ca",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "16b71a50",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be831848",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "71ce6611",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
