{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "import h5py\n",
    "from argparse import ArgumentParser\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/AR32500/AR32500/MyPapers/box-prompt-learning-VFM/src\n"
     ]
    }
   ],
   "source": [
    "os.chdir(os.path.dirname(os.path.dirname(os.getcwd())))\n",
    "print(os.getcwd())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from Data.datamodule import SAMDataModule\n",
    "from Models.SAM_WithPromptGenerator import SAMPromptLearning_Ours\n",
    "from Utils.load_utils import get_dict_from_config, update_config_from_args\n",
    "from Utils.utils import find_matching_key"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_args():\n",
    "    parser = ArgumentParser()\n",
    "    parser.add_argument('--type', type=str, help='train or test',\n",
    "                        default='train')\n",
    "    \n",
    "    # These are the paths to the data and output folder\n",
    "    parser.add_argument('--data_dir', default='/home/AR32500/net/data', type=str, help='Directory for data')\n",
    "    parser.add_argument('--output_dir', default='/home/AR32500/AR32500/output_PromptLearningForSAM', type=str, help='Directory for output run')\n",
    "\n",
    "    # These are config files located in src/Config\n",
    "    parser.add_argument('--data_config',  type=str, \n",
    "                        default='data_config/ACDC_256.yaml'\n",
    "                        #default='data_config/CAMUS_512.yaml'\n",
    "                        #default='data_config/HC_640.yaml'\n",
    "                        #default='data_config/MSDSpleen_512.yaml'\n",
    "                        #default='data_config/MSDLiver_256.yaml'\n",
    "                        )\n",
    "    parser.add_argument('--model_config', type=str, \n",
    "                        default='model_config/ours_samh_config.yaml'\n",
    "                        )\n",
    "    parser.add_argument('--module_config', type=str, default='model_config/module_hardnet_config.yaml')\n",
    "    parser.add_argument('--train_config', type=str, default='train_config/train_config_200_100_00001.yaml')\n",
    "    parser.add_argument('--logger_config', type=str, default='logger_config.yaml')\n",
    "    parser.add_argument('--prompt_config', type=str, \n",
    "                        default='prompt_config/box_tight.yaml',\n",
    "                        )\n",
    "    parser.add_argument('--loss_config', type=str, nargs='+', \n",
    "                        help='type of loss to appply (does not matter here, just for initializating the model)',\n",
    "                        default=['loss_config/WBCE_Dice/wbcedice_gtpromptedpred.yaml'])\n",
    "    parser.add_argument('--seed', default=0, type=int)\n",
    "    parser.add_argument('--num_gpu', default=1, help='number of GPU devices to use')\n",
    "    parser.add_argument('--gpu_idx', default=[1], type=int, nargs='+', help='otherwise, gpu index, if we want to use a specific gpu')\n",
    "\n",
    "    parser.add_argument('--logger__project_name', type=str, help='name of project in comet',\n",
    "                        default='')\n",
    "\n",
    "    # Training hyper-parameters that we should change according to the dataset\n",
    "    # Arguments of data input and output\n",
    "    parser.add_argument('--data__compute_sam_embeddings', help='whether to use precomputed embeddings',\n",
    "                        action=\"store_true\", default=True)\n",
    "    parser.add_argument('--data__class_to_segment', type=int, nargs='+', help='class values to segment',\n",
    "                        default=(1))\n",
    "\n",
    "    parser.add_argument('--train__train_indices', type=int, nargs='+', help='indices of training data for Segmentation task',\n",
    "                        default=[])\n",
    "    parser.add_argument('--train__val_indices', help='indices of val data for Segmentation task',\n",
    "                        default=[])\n",
    "    parser.add_argument('--train__clip_gradient_norm_value', type=float, help='value to clip gradient norm (default: 0.0 = No clipping)',\n",
    "                        default=1.0)\n",
    "    args = parser.parse_args()\n",
    "    return args"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Backup the original sys.argv\n",
    "original_argv = sys.argv\n",
    "\n",
    "# Temporarily replace sys.argv with your desired arguments\n",
    "sys.argv = ['ipykernel_launcher.py', '--type', 'train']\n",
    "\n",
    "# Now parse_args() will work as expected\n",
    "args = parse_args()\n",
    "\n",
    "# Optionally restore sys.argv to its original state\n",
    "sys.argv = original_argv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "gpu_devices [1]\n"
     ]
    }
   ],
   "source": [
    "# We set the gpu devices (either a specific gpu or a given number of available gpus)\n",
    "if args.gpu_idx is not None:\n",
    "    gpu_devices = args.gpu_idx\n",
    "else:\n",
    "    gpu_devices = args.num_gpu\n",
    "print('gpu_devices {}'.format(gpu_devices))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# We extract the configs from the file names\n",
    "train_config = get_dict_from_config(args.train_config)\n",
    "data_config = get_dict_from_config(args.data_config)\n",
    "model_config = get_dict_from_config(args.model_config)\n",
    "module_config = get_dict_from_config(args.module_config)\n",
    "logger_config = get_dict_from_config(args.logger_config)\n",
    "train_config[\"loss\"] = {}\n",
    "\n",
    "# We add the loss configs to the train config. If two losses have the same type, we will add a subscript\n",
    "for _file_config in args.loss_config:\n",
    "    cur_config = get_dict_from_config(_file_config)\n",
    "    loss_name = cur_config[\"type\"]\n",
    "    # Check if the loss_name is already in the dictionary\n",
    "    original_loss_name = loss_name\n",
    "    count = 1\n",
    "    while loss_name in train_config[\"loss\"]:\n",
    "        # Append a number to the loss_name if it already exists\n",
    "        loss_name = f\"{original_loss_name}{count}\"\n",
    "        count += 1\n",
    "    # Add the (possibly renamed) loss_name to the train_config\n",
    "    train_config[\"loss\"][loss_name] = cur_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_config {'train_indices': [], 'num_workers': 16, 'num_epochs': 200, 'batch_size': 4, 'optimizer': {'type': 'Adam', 'lr': 0.0001, 'weight_decay': 0.0001}, 'sched': {'update_interval': 'epoch', 'update_freq': 1, 'MultiStepLR': {'milestones': [100], 'gamma': 0.1}}, 'loss': {'WBCE_Dice': {'type': 'WBCE_Dice', 'weight': 1, 'start_epoch': 0, 'kwargs': {'target_str': 'gt_prompted_pred_masks', 'idc': [1], 'alpha_CE': 0.5, 'reduction': 'mean'}, 'other_kwargs': {'bounds_name': None, 'bounds_params': None, 'fn': None}}}, 'val_indices': [], 'clip_gradient_norm_value': 1.0}\n"
     ]
    }
   ],
   "source": [
    "# We update the model and logger config files with the command-line arguments\n",
    "data_config = update_config_from_args(data_config, args, 'data')\n",
    "logger_config = update_config_from_args(logger_config, args, 'logger')\n",
    "train_config = update_config_from_args(train_config, args, 'train')\n",
    "print('train_config {}'.format(train_config))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_config['batch_size'] = 1\n",
    "device = 'cuda:1'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# If we are at inference, we can add the config on the prompts to be used\n",
    "if args.prompt_config != '':\n",
    "    prompt_config = get_dict_from_config(args.prompt_config)\n",
    "    train_config = {**train_config, **{'prompt': prompt_config}}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'model_class': 'SAMPromptLearning_Ours',\n",
       " 'model_name': 'vit_h',\n",
       " 'sam_checkpoint': '/home/AR32500/net/models/sam/sam_vit_h_4b8939.pth',\n",
       " 'image_size': 1024,\n",
       " 'in_channels': 3,\n",
       " 'out_channels': 2}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ">> 0th list of losses: WBCE_Dice - {'type': 'WBCE_Dice', 'weight': 1, 'start_epoch': 0, 'kwargs': {'target_str': 'gt_prompted_pred_masks', 'idc': [1], 'alpha_CE': 0.5, 'reduction': 'mean'}, 'other_kwargs': {'bounds_name': None, 'bounds_params': None, 'fn': None}}\n",
      "sam_args: Namespace(model_class='SAMPromptLearning_Ours', model_name='vit_h', sam_checkpoint='/home/AR32500/net/models/sam/sam_vit_h_4b8939.pth', image_size=1024, in_channels=3, out_channels=2)\n",
      "ImageNet pretrained weights for HarDNet85 is loaded\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "SAMPromptLearning_Ours(\n",
       "  (activation_fct): Sigmoid()\n",
       "  (sam): Sam(\n",
       "    (image_encoder): ImageEncoderViT(\n",
       "      (patch_embed): PatchEmbed(\n",
       "        (proj): Conv2d(3, 1280, kernel_size=(16, 16), stride=(16, 16))\n",
       "      )\n",
       "      (blocks): ModuleList(\n",
       "        (0-31): 32 x Block(\n",
       "          (norm1): LayerNorm((1280,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attention(\n",
       "            (qkv): Linear(in_features=1280, out_features=3840, bias=True)\n",
       "            (proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1280,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLPBlock(\n",
       "            (lin1): Linear(in_features=1280, out_features=5120, bias=True)\n",
       "            (lin2): Linear(in_features=5120, out_features=1280, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (neck): Sequential(\n",
       "        (0): Conv2d(1280, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (1): LayerNorm2d()\n",
       "        (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (3): LayerNorm2d()\n",
       "      )\n",
       "    )\n",
       "    (prompt_encoder): PromptEncoder(\n",
       "      (pe_layer): PositionEmbeddingRandom()\n",
       "      (point_embeddings): ModuleList(\n",
       "        (0-3): 4 x Embedding(1, 256)\n",
       "      )\n",
       "      (not_a_point_embed): Embedding(1, 256)\n",
       "      (mask_downscaling): Sequential(\n",
       "        (0): Conv2d(1, 4, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (1): LayerNorm2d()\n",
       "        (2): GELU(approximate='none')\n",
       "        (3): Conv2d(4, 16, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (4): LayerNorm2d()\n",
       "        (5): GELU(approximate='none')\n",
       "        (6): Conv2d(16, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (no_mask_embed): Embedding(1, 256)\n",
       "    )\n",
       "    (mask_decoder): MaskDecoder(\n",
       "      (transformer): TwoWayTransformer(\n",
       "        (layers): ModuleList(\n",
       "          (0-1): 2 x TwoWayAttentionBlock(\n",
       "            (self_attn): Attention(\n",
       "              (q_proj): Linear(in_features=256, out_features=256, bias=True)\n",
       "              (k_proj): Linear(in_features=256, out_features=256, bias=True)\n",
       "              (v_proj): Linear(in_features=256, out_features=256, bias=True)\n",
       "              (out_proj): Linear(in_features=256, out_features=256, bias=True)\n",
       "            )\n",
       "            (norm1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n",
       "            (cross_attn_token_to_image): Attention(\n",
       "              (q_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (k_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (v_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (out_proj): Linear(in_features=128, out_features=256, bias=True)\n",
       "            )\n",
       "            (norm2): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n",
       "            (mlp): MLPBlock(\n",
       "              (lin1): Linear(in_features=256, out_features=2048, bias=True)\n",
       "              (lin2): Linear(in_features=2048, out_features=256, bias=True)\n",
       "              (act): ReLU()\n",
       "            )\n",
       "            (norm3): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n",
       "            (norm4): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n",
       "            (cross_attn_image_to_token): Attention(\n",
       "              (q_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (k_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (v_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (out_proj): Linear(in_features=128, out_features=256, bias=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (final_attn_token_to_image): Attention(\n",
       "          (q_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "          (k_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "          (v_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "          (out_proj): Linear(in_features=128, out_features=256, bias=True)\n",
       "        )\n",
       "        (norm_final_attn): LayerNorm((256,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (iou_token): Embedding(1, 256)\n",
       "      (mask_tokens): Embedding(4, 256)\n",
       "      (output_upscaling): Sequential(\n",
       "        (0): ConvTranspose2d(256, 64, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (1): LayerNorm2d()\n",
       "        (2): GELU(approximate='none')\n",
       "        (3): ConvTranspose2d(64, 32, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (4): GELU(approximate='none')\n",
       "      )\n",
       "      (output_hypernetworks_mlps): ModuleList(\n",
       "        (0-3): 4 x MLP(\n",
       "          (layers): ModuleList(\n",
       "            (0-1): 2 x Linear(in_features=256, out_features=256, bias=True)\n",
       "            (2): Linear(in_features=256, out_features=32, bias=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (iou_prediction_head): MLP(\n",
       "        (layers): ModuleList(\n",
       "          (0-1): 2 x Linear(in_features=256, out_features=256, bias=True)\n",
       "          (2): Linear(in_features=256, out_features=4, bias=True)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (module): ModelEmb(\n",
       "    (backbone): HarDNet(\n",
       "      (base): ModuleList(\n",
       "        (0): ConvLayer(\n",
       "          (conv): Conv2d(3, 48, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (1): ConvLayer(\n",
       "          (conv): Conv2d(48, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "        (3): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(96, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(120, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(160, 70, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(70, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (4): ConvLayer(\n",
       "              (conv): Conv2d(70, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (5): ConvLayer(\n",
       "              (conv): Conv2d(94, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (6): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (7): ConvLayer(\n",
       "              (conv): Conv2d(230, 118, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(118, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (4): ConvLayer(\n",
       "          (conv): Conv2d(214, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "        (6): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(192, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(216, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(256, 70, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(70, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (4): ConvLayer(\n",
       "              (conv): Conv2d(70, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (5): ConvLayer(\n",
       "              (conv): Conv2d(94, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (6): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (7): ConvLayer(\n",
       "              (conv): Conv2d(326, 118, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(118, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (8): ConvLayer(\n",
       "              (conv): Conv2d(118, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (9): ConvLayer(\n",
       "              (conv): Conv2d(142, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (10): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (11): ConvLayer(\n",
       "              (conv): Conv2d(182, 70, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(70, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (12): ConvLayer(\n",
       "              (conv): Conv2d(70, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (13): ConvLayer(\n",
       "              (conv): Conv2d(94, 40, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (14): ConvLayer(\n",
       "              (conv): Conv2d(40, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (15): ConvLayer(\n",
       "              (conv): Conv2d(444, 200, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (7): ConvLayer(\n",
       "          (conv): Conv2d(392, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (8): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(256, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(284, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(48, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(332, 80, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (4): ConvLayer(\n",
       "              (conv): Conv2d(80, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (5): ConvLayer(\n",
       "              (conv): Conv2d(108, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (6): ConvLayer(\n",
       "              (conv): Conv2d(48, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (7): ConvLayer(\n",
       "              (conv): Conv2d(412, 138, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(138, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (8): ConvLayer(\n",
       "              (conv): Conv2d(138, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (9): ConvLayer(\n",
       "              (conv): Conv2d(166, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (10): ConvLayer(\n",
       "              (conv): Conv2d(48, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (11): ConvLayer(\n",
       "              (conv): Conv2d(214, 80, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (12): ConvLayer(\n",
       "              (conv): Conv2d(80, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (13): ConvLayer(\n",
       "              (conv): Conv2d(108, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (14): ConvLayer(\n",
       "              (conv): Conv2d(48, 28, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(28, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (15): ConvLayer(\n",
       "              (conv): Conv2d(550, 234, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(234, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (9): ConvLayer(\n",
       "          (conv): Conv2d(458, 320, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (10): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "        (11): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(320, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(356, 62, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(62, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(418, 104, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(104, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (4): ConvLayer(\n",
       "              (conv): Conv2d(104, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (5): ConvLayer(\n",
       "              (conv): Conv2d(140, 62, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (6): ConvLayer(\n",
       "              (conv): Conv2d(62, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (7): ConvLayer(\n",
       "              (conv): Conv2d(522, 176, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(176, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (8): ConvLayer(\n",
       "              (conv): Conv2d(176, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (9): ConvLayer(\n",
       "              (conv): Conv2d(212, 62, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (10): ConvLayer(\n",
       "              (conv): Conv2d(62, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (11): ConvLayer(\n",
       "              (conv): Conv2d(274, 104, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(104, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (12): ConvLayer(\n",
       "              (conv): Conv2d(104, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (13): ConvLayer(\n",
       "              (conv): Conv2d(140, 62, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (14): ConvLayer(\n",
       "              (conv): Conv2d(62, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(36, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (15): ConvLayer(\n",
       "              (conv): Conv2d(698, 300, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (12): ConvLayer(\n",
       "          (conv): Conv2d(588, 480, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (13): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(480, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(528, 82, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(82, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(82, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(610, 138, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(138, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (4): ConvLayer(\n",
       "              (conv): Conv2d(138, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (5): ConvLayer(\n",
       "              (conv): Conv2d(186, 82, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(82, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (6): ConvLayer(\n",
       "              (conv): Conv2d(82, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (7): ConvLayer(\n",
       "              (conv): Conv2d(748, 236, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(236, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (8): ConvLayer(\n",
       "              (conv): Conv2d(236, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (9): ConvLayer(\n",
       "              (conv): Conv2d(284, 82, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(82, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (10): ConvLayer(\n",
       "              (conv): Conv2d(82, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (11): ConvLayer(\n",
       "              (conv): Conv2d(366, 138, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(138, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (12): ConvLayer(\n",
       "              (conv): Conv2d(138, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (13): ConvLayer(\n",
       "              (conv): Conv2d(186, 82, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(82, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (14): ConvLayer(\n",
       "              (conv): Conv2d(82, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (15): ConvLayer(\n",
       "              (conv): Conv2d(984, 400, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(400, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (14): ConvLayer(\n",
       "          (conv): Conv2d(784, 720, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(720, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "        (15): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "        (16): HarDBlock(\n",
       "          (layers): ModuleList(\n",
       "            (0): ConvLayer(\n",
       "              (conv): Conv2d(720, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (1): ConvLayer(\n",
       "              (conv): Conv2d(976, 436, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(436, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (2): ConvLayer(\n",
       "              (conv): Conv2d(436, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "            (3): ConvLayer(\n",
       "              (conv): Conv2d(1412, 740, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "              (norm): BatchNorm2d(740, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "              (relu): ReLU6(inplace=True)\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (17): Dropout(p=0.1, inplace=False)\n",
       "        (18): ConvLayer(\n",
       "          (conv): Conv2d(1252, 1280, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "          (norm): BatchNorm2d(1280, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU6(inplace=True)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (decoder): SmallDecoder(\n",
       "      (up1): UpBlockSkip(\n",
       "        (Upsample): Upsample(scale_factor=2.0, mode='bilinear')\n",
       "        (conv1): Conv2d(1040, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv1_drop): Dropout2d(p=0, inplace=False)\n",
       "        (conv2): Conv2d(320, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv2_drop): Dropout2d(p=0, inplace=False)\n",
       "        (BN): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (up2): UpBlockSkip(\n",
       "        (Upsample): Upsample(scale_factor=2.0, mode='bilinear')\n",
       "        (conv1): Conv2d(512, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv1_drop): Dropout2d(p=0, inplace=False)\n",
       "        (conv2): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv2_drop): Dropout2d(p=0, inplace=False)\n",
       "        (BN): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (final): CNNBlock(\n",
       "        (conv1): Conv2d(192, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (conv1_drop): Dropout2d(p=0, inplace=False)\n",
       "        (conv2_drop): Dropout2d(p=0, inplace=False)\n",
       "        (BN1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (BN2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# We create model (importing the appropriate class from model_config['model_class'])\n",
    "model_cls = globals().get(model_config['model_class'])\n",
    "full_model = model_cls(num_devices=1,\n",
    "                        model_config=model_config,\n",
    "                        module_config=module_config,\n",
    "                        train_config=train_config,\n",
    "                        seed=args.seed,\n",
    "                )      \n",
    "full_model.to(device)\n",
    "full_model.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dataset_kwargs: {'prompt': {'prompt_type': ['box'], 'args': {'perturbation_bound': [0, 1]}}, 'data_shape': [256, 256], 'class_to_segment': 1, 'box_prior_args': None, 'bounds_args_list': [], 'compute_sam_embeddings': True, 'model_image_size': 1024, 'sam_checkpoint': '/home/AR32500/net/models/sam/sam_vit_h_4b8939.pth'}\n",
      "Number of training images 42\n",
      "Number of validation images 0\n",
      "Number of test images 0\n"
     ]
    }
   ],
   "source": [
    "# We create the datamodule  \n",
    "kwargs = {'prompt': train_config['prompt'] if 'prompt' in train_config else None,\n",
    "            'data_shape': data_config['data_shape'],\n",
    "            'class_to_segment': data_config['class_to_segment'],\n",
    "            'box_prior_args': find_matching_key(train_config[\"loss\"], \"TightBoxPrior\", default={}).get('kwargs', None),\n",
    "            'bounds_args_list': [{**_config[\"other_kwargs\"], 'C': model_config[\"out_channels\"]} for _, _config in train_config[\"loss\"].items() if (_config[\"other_kwargs\"] is not None and \"bounds_name\" in _config[\"other_kwargs\"] and _config[\"other_kwargs\"][\"bounds_name\"] is not None)],\n",
    "            'compute_sam_embeddings': data_config['compute_sam_embeddings'],\n",
    "            'model_image_size': model_config.get('image_size', None),\n",
    "            'sam_checkpoint': model_config.get('sam_checkpoint', None)\n",
    "        }\n",
    "\n",
    "data_module = SAMDataModule(data_dir=args.data_dir,\n",
    "                            dataset_name=data_config[\"dataset_name\"],\n",
    "                            batch_size=train_config[\"batch_size\"],\n",
    "                            val_batch_size=train_config[\"batch_size\"],\n",
    "                            num_workers=train_config[\"num_workers\"],\n",
    "                            train_indices=train_config[\"train_indices\"],\n",
    "                            dataset_kwargs=kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<monai.data.dataloader.DataLoader at 0x7f734c164a90>"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_module.setup()\n",
    "data_module.train_dataloader()\n",
    "data_module.val_dataloader()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<monai.data.dataloader.DataLoader at 0x7f734c167460>"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_module.setup('test')\n",
    "data_module.test_dataloader()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# We save the embeddings to re-use later\n",
    "base_savefolder = os.path.join(args.data_dir, \n",
    "                               data_config[\"dataset_name\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/AR32500/net/data/ACDC/preprocessed_sam/image_embeddings/sam_vit_h_4b8939-pth'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "os.path.join(base_savefolder, 'image_embeddings', os.path.basename(model_config['sam_checkpoint']).replace('.', '-'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Saving SAM embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.use_deterministic_algorithms(True, warn_only=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 patient005_frame01_slice7\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/AR32500/AR32500/miniconda3/envs/py310/lib/python3.10/site-packages/torch/_tensor.py:1668: UserWarning: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information, go to https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility (Triggered internally at /pytorch/aten/src/ATen/Context.cpp:233.)\n",
      "  ret = func(*args, **kwargs)\n",
      "/home/AR32500/AR32500/miniconda3/envs/py310/lib/python3.10/site-packages/torch/functional.py:422: UserWarning: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information, go to https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility (Triggered internally at /pytorch/aten/src/ATen/Context.cpp:233.)\n",
      "  return _VF.einsum(equation, operands)  # type: ignore[attr-defined]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1 patient001_frame01_slice2\n",
      "2 patient002_frame01_slice5\n",
      "3 patient003_frame01_slice2\n",
      "4 patient001_frame01_slice8\n",
      "5 patient003_frame01_slice8\n",
      "6 patient002_frame01_slice3\n",
      "7 patient005_frame01_slice3\n",
      "8 patient005_frame01_slice8\n",
      "9 patient004_frame01_slice3\n",
      "10 patient005_frame01_slice4\n",
      "11 patient005_frame01_slice6\n",
      "12 patient004_frame01_slice6\n",
      "13 patient003_frame01_slice5\n",
      "14 patient004_frame01_slice2\n",
      "15 patient005_frame01_slice5\n",
      "16 patient004_frame01_slice1\n",
      "17 patient003_frame01_slice1\n",
      "18 patient003_frame01_slice7\n",
      "19 patient003_frame01_slice6\n",
      "20 patient002_frame01_slice4\n",
      "21 patient003_frame01_slice4\n",
      "22 patient005_frame01_slice2\n",
      "23 patient002_frame01_slice6\n",
      "24 patient004_frame01_slice0\n",
      "25 patient001_frame01_slice9\n",
      "26 patient001_frame01_slice7\n",
      "27 patient001_frame01_slice5\n",
      "28 patient003_frame01_slice3\n",
      "29 patient002_frame01_slice7\n",
      "30 patient004_frame01_slice4\n",
      "31 patient001_frame01_slice6\n",
      "32 patient002_frame01_slice1\n",
      "33 patient001_frame01_slice1\n",
      "34 patient005_frame01_slice1\n",
      "35 patient002_frame01_slice0\n",
      "36 patient001_frame01_slice3\n",
      "37 patient004_frame01_slice7\n",
      "38 patient002_frame01_slice2\n",
      "39 patient004_frame01_slice5\n",
      "40 patient005_frame01_slice9\n",
      "41 patient001_frame01_slice4\n"
     ]
    }
   ],
   "source": [
    "context = 'train_2d_images'\n",
    "\n",
    "for batch_idx, batched_input in enumerate(data_module.train_loader):\n",
    "    #print(batch_idx)\n",
    "    B = len(batched_input['filename'])\n",
    "    assert B == 1\n",
    "    i = 0\n",
    "    filename = batched_input['filename'][i].split('.')[0]\n",
    "    embed_savefolder = os.path.join(base_savefolder, 'image_embeddings', \n",
    "                                    os.path.basename(model_config['sam_checkpoint']).replace('.', '-'), context, filename)\n",
    "    \n",
    "    if not os.path.exists(embed_savefolder + '.h5'):\n",
    "        print(batch_idx, filename)\n",
    "        \n",
    "        if not os.path.exists(os.path.dirname(embed_savefolder)):\n",
    "            # If it doesn't exist, create it\n",
    "            os.makedirs(os.path.dirname(embed_savefolder))\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            batched_input = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batched_input.items()}\n",
    "            image_embeddings = full_model.sam.image_encoder(batched_input['data'])\n",
    "\n",
    "            with h5py.File(embed_savefolder + '.h5', 'w') as f:\n",
    "                f.create_dataset(filename, data=image_embeddings[i].detach().cpu().numpy())\n",
    "\n",
    "    else:\n",
    "        print(f\"Dataset {filename} already exists\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "context = 'val_2d_images'\n",
    "\n",
    "for batch_idx, batched_input in enumerate(data_module.val_loader):\n",
    "    #print(batch_idx)\n",
    "    B = len(batched_input['filename'])\n",
    "    assert B == 1\n",
    "    i = 0\n",
    "    filename = batched_input['filename'][i].split('.')[0]\n",
    "    embed_savefolder = os.path.join(base_savefolder, 'image_embeddings', \n",
    "                                    os.path.basename(model_config['sam_checkpoint']).replace('.', '-'), context, filename)\n",
    "\n",
    "    if not os.path.exists(embed_savefolder + '.h5'):\n",
    "        print(batch_idx, filename)\n",
    "        \n",
    "        if not os.path.exists(os.path.dirname(embed_savefolder)):\n",
    "            # If it doesn't exist, create it\n",
    "            os.makedirs(os.path.dirname(embed_savefolder))\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            batched_input = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batched_input.items()}\n",
    "            image_embeddings = full_model.sam.image_encoder(batched_input['data'])\n",
    "\n",
    "            with h5py.File(embed_savefolder + '.h5', 'w') as f:\n",
    "                f.create_dataset(filename, data=image_embeddings[i].detach().cpu().numpy())\n",
    "    else:\n",
    "        print(f\"Dataset {filename} already exists\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "context = 'test_2d_images'\n",
    "\n",
    "for batch_idx, batched_input in enumerate(data_module.test_loader):\n",
    "    #print(batch_idx)\n",
    "    B = len(batched_input['filename'])\n",
    "    assert B == 1\n",
    "    i = 0\n",
    "    filename = batched_input['filename'][i].split('.')[0]\n",
    "    embed_savefolder = os.path.join(base_savefolder, 'image_embeddings', \n",
    "                                    os.path.basename(model_config['sam_checkpoint']).replace('.', '-'), context, filename)\n",
    "\n",
    "    if not os.path.exists(embed_savefolder + '.h5'):\n",
    "        print(batch_idx, filename)\n",
    "        \n",
    "        if not os.path.exists(os.path.dirname(embed_savefolder)):\n",
    "            # If it doesn't exist, create it\n",
    "            os.makedirs(os.path.dirname(embed_savefolder))\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            batched_input = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batched_input.items()}\n",
    "            image_embeddings = full_model.sam.image_encoder(batched_input['data'])\n",
    "\n",
    "            with h5py.File(embed_savefolder + '.h5', 'w') as f:\n",
    "                f.create_dataset(filename, data=image_embeddings[i].detach().cpu().numpy())\n",
    "\n",
    "    else:\n",
    "        print(f\"Dataset {filename} already exists\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py310",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
