{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-01-19T04:58:10.694373300Z",
     "start_time": "2024-01-19T04:58:10.673439100Z"
    }
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "import platform\n",
    "platform = platform.system()\n",
    "if platform == 'Linux':\n",
    "    !BASICSR_JIT=True\n",
    "    pretrained_discriminator = \"../experiments/ridcp_first_train/models/net_d_best_.pth\"\n",
    "    pretrained_hqp_path = \"../pretrained_models/pretrained_HQPs_renamed.pth\"\n",
    "    pretrained_net_path = \"../pretrained_models/pretrained_RIDCP_renamed.pth\"\n",
    "else:\n",
    "    !set BASICSR_JIT=True\n",
    "    pretrained_discriminator = \"../experiments/ridcp_first_train/models/net_d_best_.pth\"\n",
    "    pretrained_hqp_path = \"../pretrained_models/pretrained_HQPs_renamed.pth\"\n",
    "    pretrained_net_path = \"../pretrained_models/pretrained_RIDCP_renamed.pth\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "outputs": [],
   "source": [
    "from basicsr import GANLoss\n",
    "from PIL import Image\n",
    "from torchvision.transforms import ToTensor\n",
    "from basicsr.archs.ridcp_new_arch import RIDCPNew\n",
    "from basicsr.archs.ridcp.discriminator import UNetDiscriminatorSN"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-01-19T04:58:13.483605100Z",
     "start_time": "2024-01-19T04:58:13.476749400Z"
    }
   },
   "id": "4a1af87d6d9edc5"
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "outputs": [
    {
     "data": {
      "text/plain": "RIDCPNew(\n  (vq_encoder): VQEncoder(\n    (in_conv): Conv2d(3, 64, kernel_size=(4, 4), stride=(1, 1), padding=(1, 1))\n    (blocks): ModuleList(\n      (0): Sequential(\n        (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n        (1): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n      (1): Sequential(\n        (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n        (1): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 256, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 256, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 256, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 256, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n    )\n  )\n  (ridcp_encoder): SwinLayers(\n    (swin_blks): ModuleList(\n      (0-3): 4 x RSTB(\n        (residual_group): BasicLayer(\n          dim=256, input_resolution=(32, 32), depth=6\n          (blocks): ModuleList(\n            (0): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=0, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n            (1): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=4, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n            (2): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=0, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n            (3): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=4, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n            (4): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=0, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n            (5): SwinTransformerBlock(\n              dim=256, input_resolution=(32, 32), num_heads=8, window_size=8, shift_size=4, mlp_ratio=4.0\n              (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (attn): WindowAttention(\n                dim=256, window_size=(8, 8), num_heads=8\n                (qkv): Linear(in_features=256, out_features=768, bias=True)\n                (attn_drop): Dropout(p=0.0, inplace=False)\n                (proj): Linear(in_features=256, out_features=256, bias=True)\n                (proj_drop): Dropout(p=0.0, inplace=False)\n                (softmax): Softmax(dim=-1)\n              )\n              (drop_path): Identity()\n              (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n              (mlp): Mlp(\n                (fc1): Linear(in_features=256, out_features=1024, bias=True)\n                (act): GELU(approximate='none')\n                (fc2): Linear(in_features=1024, out_features=256, bias=True)\n                (drop): Dropout(p=0.0, inplace=False)\n              )\n            )\n          )\n        )\n        (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (patch_embed): PatchEmbed()\n        (patch_unembed): PatchUnEmbed()\n      )\n    )\n  )\n  (ridcp_decoder): RIDCPDecoder(\n    (upsampler): ModuleList(\n      (0): Sequential(\n        (0): Upsample(scale_factor=2.0, mode='nearest')\n        (1): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (3): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n      (1): Sequential(\n        (0): Upsample(scale_factor=2.0, mode='nearest')\n        (1): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (3): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n    )\n    (warp): ModuleList(\n      (0): WarpBlock(\n        (offset): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (dcn): DCNv2Pack(\n          (conv_offset): Conv2d(128, 108, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        )\n      )\n      (1): WarpBlock(\n        (offset): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (dcn): DCNv2Pack(\n          (conv_offset): Conv2d(64, 108, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        )\n      )\n    )\n  )\n  (vq_decoder_group): ModuleList(\n    (0): DecoderBlock(\n      (block): Sequential(\n        (0): Upsample(scale_factor=2.0, mode='nearest')\n        (1): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (3): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 128, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n    )\n    (1): DecoderBlock(\n      (block): Sequential(\n        (0): Upsample(scale_factor=2.0, mode='nearest')\n        (1): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n        (2): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n        (3): ResBlock(\n          (conv): Sequential(\n            (0): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (1): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n            (3): NormLayer(\n              (norm): GroupNorm(32, 64, eps=1e-06, affine=True)\n            )\n            (4): ActLayer(\n              (func): SiLU(inplace=True)\n            )\n            (5): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n          )\n        )\n      )\n    )\n  )\n  (out_conv): Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n  (residual_conv): Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n  (quantizer): VectorQuantizer(\n    (embedding): Embedding(1024, 512)\n  )\n  (before_quant): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))\n  (after_quant): CombineQuantBlock(\n    (conv): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n  )\n)"
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "discriminator = UNetDiscriminatorSN(512)\n",
    "discriminator.cuda()\n",
    "discriminator.load_state_dict(torch.load(pretrained_discriminator)[\"params\"])\n",
    "discriminator.eval()\n",
    "\n",
    "hqp = RIDCPNew(LQ_stage=False)\n",
    "hqp.cuda()\n",
    "hqp.load_state_dict(torch.load(pretrained_hqp_path)[\"params\"])\n",
    "hqp.eval()\n",
    "\n",
    "ridcp = RIDCPNew(LQ_stage=True)\n",
    "ridcp.cuda()\n",
    "ridcp.load_state_dict(torch.load(pretrained_net_path)[\"params\"])\n",
    "ridcp.eval()"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-01-19T04:58:13.966376200Z",
     "start_time": "2024-01-19T04:58:13.481605Z"
    }
   },
   "id": "8d0d179137a369cc"
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "outputs": [
    {
     "data": {
      "text/plain": "torch.Size([1, 3, 413, 550])"
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs = ToTensor()(Image.open('../datasets/rgb_500/0001.jpg').convert('RGB')).cuda()[None, ::]\n",
    "inputs.shape\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-01-19T04:58:22.226854300Z",
     "start_time": "2024-01-19T04:58:22.188670800Z"
    }
   },
   "id": "fb9b2bd787f643e2"
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'deform_conv_ext' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[27], line 2\u001B[0m\n\u001B[0;32m      1\u001B[0m inputs \u001B[38;5;241m=\u001B[39m torch\u001B[38;5;241m.\u001B[39mrandn(\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m3\u001B[39m, \u001B[38;5;241m512\u001B[39m, \u001B[38;5;241m512\u001B[39m)\u001B[38;5;241m.\u001B[39mcuda()\n\u001B[1;32m----> 2\u001B[0m out_img, out_img_residual, codebook_loss, feat_to_quant, z_quant, indices \u001B[38;5;241m=\u001B[39m \u001B[43mridcp\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m      3\u001B[0m _, _, _, hqp_feat, _, _ \u001B[38;5;241m=\u001B[39m hqp(inputs)\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1516\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1517\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1518\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1522\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1523\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1524\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1525\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1526\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1527\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1529\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1530\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\ridcp_new_arch.py:100\u001B[0m, in \u001B[0;36mRIDCPNew.forward\u001B[1;34m(self, inputs, gt_indices)\u001B[0m\n\u001B[0;32m     99\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, inputs, gt_indices\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m):\n\u001B[1;32m--> 100\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mencode_and_decode\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mgt_indices\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\ridcp_new_arch.py:122\u001B[0m, in \u001B[0;36mRIDCPNew.encode_and_decode\u001B[1;34m(self, inputs, gt_indices)\u001B[0m\n\u001B[0;32m    120\u001B[0m         residual_feature \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mridcp_decoder(enc_feats, code_decoder_output)\n\u001B[0;32m    121\u001B[0m     \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m--> 122\u001B[0m         residual_feature \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mridcp_decoder\u001B[49m\u001B[43m(\u001B[49m\u001B[43menc_feats\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdetach\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcode_decoder_output\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    123\u001B[0m     out_img_residual \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mresidual_conv(residual_feature)\n\u001B[0;32m    124\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1516\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1517\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1518\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1522\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1523\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1524\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1525\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1526\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1527\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1529\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1530\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\ridcp\\decoder.py:129\u001B[0m, in \u001B[0;36mRIDCPDecoder.forward\u001B[1;34m(self, x, code_decoder_output)\u001B[0m\n\u001B[0;32m    127\u001B[0m x \u001B[38;5;241m=\u001B[39m m(x)\n\u001B[0;32m    128\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39muse_warp:\n\u001B[1;32m--> 129\u001B[0m     x_vq \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mwarp\u001B[49m\u001B[43m[\u001B[49m\u001B[43midx\u001B[49m\u001B[43m]\u001B[49m\u001B[43m(\u001B[49m\u001B[43mcode_decoder_output\u001B[49m\u001B[43m[\u001B[49m\u001B[43midx\u001B[49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    130\u001B[0m     x \u001B[38;5;241m=\u001B[39m x \u001B[38;5;241m+\u001B[39m x_vq \u001B[38;5;241m*\u001B[39m (x\u001B[38;5;241m.\u001B[39mmean() \u001B[38;5;241m/\u001B[39m x_vq\u001B[38;5;241m.\u001B[39mmean())\n\u001B[0;32m    131\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1516\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1517\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1518\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1522\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1523\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1524\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1525\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1526\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1527\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1529\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1530\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\module\\util_block.py:137\u001B[0m, in \u001B[0;36mWarpBlock.forward\u001B[1;34m(self, x_vq, x_residual)\u001B[0m\n\u001B[0;32m    135\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, x_vq, x_residual):\n\u001B[0;32m    136\u001B[0m     x_residual \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39moffset(torch\u001B[38;5;241m.\u001B[39mcat([x_vq, x_residual], dim\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m))\n\u001B[1;32m--> 137\u001B[0m     feat_after_warp \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdcn\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx_vq\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mx_residual\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    139\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m feat_after_warp\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1516\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_compiled_call_impl(\u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m   1517\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m-> 1518\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1522\u001B[0m \u001B[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001B[39;00m\n\u001B[0;32m   1523\u001B[0m \u001B[38;5;66;03m# this function, and just call forward.\u001B[39;00m\n\u001B[0;32m   1524\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m (\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_pre_hooks\n\u001B[0;32m   1525\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_backward_pre_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_backward_hooks\n\u001B[0;32m   1526\u001B[0m         \u001B[38;5;129;01mor\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m _global_forward_pre_hooks):\n\u001B[1;32m-> 1527\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1529\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m   1530\u001B[0m     result \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\module\\util_block.py:125\u001B[0m, in \u001B[0;36mDCNv2Pack.forward\u001B[1;34m(self, x, feat)\u001B[0m\n\u001B[0;32m    122\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m offset_absmean \u001B[38;5;241m>\u001B[39m \u001B[38;5;241m50\u001B[39m:\n\u001B[0;32m    123\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mOffset abs mean is \u001B[39m\u001B[38;5;132;01m{\u001B[39;00moffset_absmean\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m, larger than 50.\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[1;32m--> 125\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mmodulated_deform_conv\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moffset\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmask\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mweight\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbias\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mstride\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mpadding\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    126\u001B[0m \u001B[43m                             \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdilation\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgroups\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdeformable_groups\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\Programs\\miniconda\\Lib\\site-packages\\torch\\autograd\\function.py:539\u001B[0m, in \u001B[0;36mFunction.apply\u001B[1;34m(cls, *args, **kwargs)\u001B[0m\n\u001B[0;32m    536\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m torch\u001B[38;5;241m.\u001B[39m_C\u001B[38;5;241m.\u001B[39m_are_functorch_transforms_active():\n\u001B[0;32m    537\u001B[0m     \u001B[38;5;66;03m# See NOTE: [functorch vjp and autograd interaction]\u001B[39;00m\n\u001B[0;32m    538\u001B[0m     args \u001B[38;5;241m=\u001B[39m _functorch\u001B[38;5;241m.\u001B[39mutils\u001B[38;5;241m.\u001B[39munwrap_dead_wrappers(args)\n\u001B[1;32m--> 539\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mapply\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m  \u001B[38;5;66;03m# type: ignore[misc]\u001B[39;00m\n\u001B[0;32m    541\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mcls\u001B[39m\u001B[38;5;241m.\u001B[39msetup_context \u001B[38;5;241m==\u001B[39m _SingleLevelFunction\u001B[38;5;241m.\u001B[39msetup_context:\n\u001B[0;32m    542\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mRuntimeError\u001B[39;00m(\n\u001B[0;32m    543\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mIn order to use an autograd.Function with functorch transforms \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    544\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m(vmap, grad, jvp, jacrev, ...), it must override the setup_context \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    545\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mstaticmethod. For more details, please see \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    546\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mhttps://pytorch.org/docs/master/notes/extending.func.html\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    547\u001B[0m     )\n",
      "File \u001B[1;32mE:\\DeepLearningCopies\\2023\\RIDCP\\basicsr\\archs\\dcn\\deform_conv.py:151\u001B[0m, in \u001B[0;36mModulatedDeformConvFunction.forward\u001B[1;34m(ctx, input, offset, mask, weight, bias, stride, padding, dilation, groups, deformable_groups)\u001B[0m\n\u001B[0;32m    149\u001B[0m output \u001B[38;5;241m=\u001B[39m \u001B[38;5;28minput\u001B[39m\u001B[38;5;241m.\u001B[39mnew_empty(ModulatedDeformConvFunction\u001B[38;5;241m.\u001B[39m_infer_shape(ctx, \u001B[38;5;28minput\u001B[39m, weight))\n\u001B[0;32m    150\u001B[0m ctx\u001B[38;5;241m.\u001B[39m_bufs \u001B[38;5;241m=\u001B[39m [\u001B[38;5;28minput\u001B[39m\u001B[38;5;241m.\u001B[39mnew_empty(\u001B[38;5;241m0\u001B[39m), \u001B[38;5;28minput\u001B[39m\u001B[38;5;241m.\u001B[39mnew_empty(\u001B[38;5;241m0\u001B[39m)]\n\u001B[1;32m--> 151\u001B[0m \u001B[43mdeform_conv_ext\u001B[49m\u001B[38;5;241m.\u001B[39mmodulated_deform_conv_forward(\u001B[38;5;28minput\u001B[39m, weight, bias, ctx\u001B[38;5;241m.\u001B[39m_bufs[\u001B[38;5;241m0\u001B[39m], offset, mask, output,\n\u001B[0;32m    152\u001B[0m                                               ctx\u001B[38;5;241m.\u001B[39m_bufs[\u001B[38;5;241m1\u001B[39m], weight\u001B[38;5;241m.\u001B[39mshape[\u001B[38;5;241m2\u001B[39m], weight\u001B[38;5;241m.\u001B[39mshape[\u001B[38;5;241m3\u001B[39m], ctx\u001B[38;5;241m.\u001B[39mstride,\n\u001B[0;32m    153\u001B[0m                                               ctx\u001B[38;5;241m.\u001B[39mstride, ctx\u001B[38;5;241m.\u001B[39mpadding, ctx\u001B[38;5;241m.\u001B[39mpadding, ctx\u001B[38;5;241m.\u001B[39mdilation, ctx\u001B[38;5;241m.\u001B[39mdilation,\n\u001B[0;32m    154\u001B[0m                                               ctx\u001B[38;5;241m.\u001B[39mgroups, ctx\u001B[38;5;241m.\u001B[39mdeformable_groups, ctx\u001B[38;5;241m.\u001B[39mwith_bias)\n\u001B[0;32m    155\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m output\n",
      "\u001B[1;31mNameError\u001B[0m: name 'deform_conv_ext' is not defined"
     ]
    }
   ],
   "source": [
    "inputs = torch.randn(1, 3, 512, 512).cuda()\n",
    "out_img, out_img_residual, codebook_loss, feat_to_quant, z_quant, indices = ridcp(inputs)\n",
    "_, _, _, hqp_feat, _, _ = hqp(inputs)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-01-19T04:58:33.891833700Z",
     "start_time": "2024-01-19T04:58:25.718550400Z"
    }
   },
   "id": "ff4d42f7c54bbc9e"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "out = discriminator(feat_to_quant)\n",
    "hqp_out = discriminator(hqp_feat)\n",
    "\n",
    "print(out.mean().item())\n",
    "print(hqp_out.mean().item())\n",
    "\n",
    "gan_loss = GANLoss('hinge')\n",
    "\n",
    "print(gan_loss(out, False, True))\n",
    "print(gan_loss(hqp_out, True, True))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f5a153f3a91b7a82"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
