{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "id": "ZejZZonxwCMH",
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-06-07 10:54:41.089479: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
      "2024-06-07 10:54:41.143951: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MONAI version: 1.2.0\n",
      "Numpy version: 1.24.3\n",
      "Pytorch version: 2.0.0+cu118\n",
      "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n",
      "MONAI rev id: c33f1ba588ee00229a309000e888f9817b4f1934\n",
      "MONAI __file__: /home/yawei/anaconda3/lib/python3.10/site-packages/monai/__init__.py\n",
      "\n",
      "Optional dependencies:\n",
      "Pytorch Ignite version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "ITK version: 5.3.0\n",
      "Nibabel version: 5.1.0\n",
      "scikit-image version: 0.19.3\n",
      "Pillow version: 9.4.0\n",
      "Tensorboard version: 2.15.0a20230914\n",
      "gdown version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "TorchVision version: 0.15.1+cu118\n",
      "tqdm version: 4.64.1\n",
      "lmdb version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "psutil version: 5.9.0\n",
      "pandas version: 1.5.3\n",
      "einops version: 0.7.0\n",
      "transformers version: 4.36.2\n",
      "mlflow version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "pynrrd version: 1.0.0\n",
      "\n",
      "For details about installing the optional dependencies, please visit:\n",
      "    https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from monai.utils import first, set_determinism\n",
    "from monai.transforms import (\n",
    "    AsDiscrete,\n",
    "    AsDiscreted,\n",
    "    EnsureChannelFirstd,\n",
    "    Compose,\n",
    "    CropForegroundd,\n",
    "    LoadImaged,\n",
    "    Orientationd,\n",
    "    RandCropByPosNegLabeld,\n",
    "    ScaleIntensityRanged,\n",
    "    Spacingd,\n",
    "    Invertd,\n",
    "    SpatialPadd,\n",
    ")\n",
    "from monai.handlers.utils import from_engine\n",
    "# https://docs.monai.io/en/stable/networks.html#nets\n",
    "from monai.networks.nets import UNet,AttentionUnet, DynUNet, SegResNet, VNet, SegResNetVAE, UNETR\n",
    "from monai.networks.layers import Norm\n",
    "from monai.metrics import DiceMetric\n",
    "from monai.losses import DiceLoss\n",
    "from monai.inferers import sliding_window_inference\n",
    "from monai.data import CacheDataset, DataLoader, Dataset, decollate_batch\n",
    "from monai.config import print_config\n",
    "from monai.apps import download_and_extract\n",
    "import aim\n",
    "from aim.pytorch import track_gradients_dists, track_params_dists\n",
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import tempfile\n",
    "import shutil\n",
    "import os\n",
    "import glob\n",
    "\n",
    "print_config()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "w3EPRPqBwCMN"
   },
   "source": [
    "## Set MSD dataset path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "id": "lVZr7-kBwCMO"
   },
   "outputs": [],
   "source": [
    "root_dir = '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data'\n",
    "data_dir = '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/'\n",
    "train_images = sorted(glob.glob(os.path.join(data_dir, \"imagesTr\", \"*.nii.gz\")))\n",
    "# train_labels = sorted(glob.glob(os.path.join(data_dir, \"labelsTr\", \"*.nii.gz\")))\n",
    "train_labels = [i.replace('imagesTr','labelsTr') for i in train_images]\n",
    "\n",
    "data_dicts = [{\"image\": image_name, \"label\": label_name} for image_name, label_name in zip(train_images, train_labels)]\n",
    "train_num = int(len(data_dicts)*0.7)\n",
    "train_files, val_files = data_dicts[:-train_num], data_dicts[-train_num:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_00.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_00.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_01.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_01.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_02.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_02.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_04.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_04.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_06.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_06.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_07.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_07.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_10.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_10.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_13.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_13.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_14.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_14.nii.gz'},\n",
       " {'image': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_16.nii.gz',\n",
       "  'label': '/mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/labelsTr/prostate_16.nii.gz'}]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_files"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(320, 320, 15, 2) (320, 320, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_00.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_01.nii.gz\n",
      "(320, 320, 24, 2) (320, 320, 24) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_02.nii.gz\n",
      "(320, 320, 15, 2) (320, 320, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_04.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_06.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_07.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_10.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_13.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_14.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_16.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_17.nii.gz\n",
      "(256, 256, 18, 2) (256, 256, 18) [0. 1.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_18.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_20.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_21.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_24.nii.gz\n",
      "(320, 320, 19, 2) (320, 320, 19) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_25.nii.gz\n",
      "(384, 384, 11, 2) (384, 384, 11) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_28.nii.gz\n",
      "(320, 320, 15, 2) (320, 320, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_29.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_31.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_32.nii.gz\n",
      "(320, 320, 15, 2) (320, 320, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_34.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_35.nii.gz\n",
      "(256, 256, 15, 2) (256, 256, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_37.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_38.nii.gz\n",
      "(320, 320, 15, 2) (320, 320, 15) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_39.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_40.nii.gz\n",
      "(256, 256, 18, 2) (256, 256, 18) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_41.nii.gz\n",
      "(320, 320, 22, 2) (320, 320, 22) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_42.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_43.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_44.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_46.nii.gz\n",
      "(320, 320, 20, 2) (320, 320, 20) [0. 1. 2.] /mnt/datawow/lyl/models/nnUNet-master/nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task05_Prostate/imagesTr/prostate_47.nii.gz\n"
     ]
    }
   ],
   "source": [
    "import nibabel as nib\n",
    "import numpy as np\n",
    "\n",
    "def get_shape(name):\n",
    "    img = nib.load(name).get_fdata()\n",
    "    label = nib.load(name.replace('_0000','').replace('imagesTr','labelsTr')).get_fdata()\n",
    "    print(img.shape, label.shape, np.unique(label), name)\n",
    "    return 0\n",
    "\n",
    "for name in train_images:\n",
    "    get_shape(name)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Q1Wi6EtAwCMO"
   },
   "source": [
    "## Set deterministic training for reproducibility"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "id": "dr8HRsffwCMO"
   },
   "outputs": [],
   "source": [
    "set_determinism(seed=1645)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "13ZnlKGCwCMO"
   },
   "source": [
    "## Setup transforms for training and validation\n",
    "\n",
    "Here we use several transforms to augment the dataset:\n",
    "1. `LoadImaged` loads the spleen CT images and labels from NIfTI format files.\n",
    "1. `EnsureChannelFirstd` ensures the original data to construct \"channel first\" shape.\n",
    "1. `Spacingd` adjusts the spacing by `pixdim=(1.5, 1.5, 2.)` based on the affine matrix.\n",
    "1. `Orientationd` unifies the data orientation based on the affine matrix.\n",
    "1. `ScaleIntensityRanged` extracts intensity range [-57, 164] and scales to [0, 1].\n",
    "1. `CropForegroundd` removes all zero borders to focus on the valid body area of the images and labels.\n",
    "1. `RandCropByPosNegLabeld` randomly crop patch samples from big image based on pos / neg ratio.  \n",
    "The image centers of negative samples must be in valid body area.\n",
    "1. `RandAffined` efficiently performs `rotate`, `scale`, `shear`, `translate`, etc. together based on PyTorch affine transform.\n",
    "1. `EnsureTyped` converts the numpy array to PyTorch Tensor for further steps."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "id": "jf7siKPOwCMO"
   },
   "outputs": [],
   "source": [
    "roi_size = (160, 160, 32)\n",
    "train_transforms = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\", \"label\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"],\n",
    "            a_min=-150,\n",
    "            a_max=250,\n",
    "            b_min=0.0,\n",
    "            b_max=1.0,\n",
    "            clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n",
    "        Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n",
    "        Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n",
    "        SpatialPadd(keys=[\"image\", \"label\"], spatial_size=roi_size, mode=\"constant\"),\n",
    "        RandCropByPosNegLabeld(\n",
    "            keys=[\"image\", \"label\"],\n",
    "            label_key=\"label\",\n",
    "            spatial_size=roi_size,\n",
    "            pos=1,\n",
    "            neg=1,\n",
    "            num_samples=4,\n",
    "            image_key=\"image\",\n",
    "            image_threshold=0,\n",
    "        ),\n",
    "        \n",
    "        # user can also add other random transforms\n",
    "        # RandAffined(\n",
    "        #     keys=['image', 'label'],\n",
    "        #     mode=('bilinear', 'nearest'),\n",
    "        #     prob=1.0, spatial_size=(96, 96, 96),\n",
    "        #     rotate_range=(0, 0, np.pi/15),\n",
    "        #     scale_range=(0.1, 0.1, 0.1)),\n",
    "    ]\n",
    ")\n",
    "val_transforms = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\", \"label\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"],\n",
    "            a_min=-150,\n",
    "            a_max=250,\n",
    "            b_min=0.0,\n",
    "            b_max=1.0,\n",
    "            clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n",
    "        Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n",
    "        Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n",
    "        SpatialPadd(keys=[\"image\", \"label\"], spatial_size=roi_size, mode=\"constant\"),\n",
    "\n",
    "    ]\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "au4rmQfDwCMP"
   },
   "source": [
    "## Check transforms in DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 408
    },
    "id": "qqcFPuVkwCMP",
    "outputId": "4189428e-4569-4453-e379-df4466208c85",
    "tags": []
   },
   "outputs": [],
   "source": [
    "# check_ds = Dataset(data=val_files, transform=val_transforms)\n",
    "# check_loader = DataLoader(check_ds, batch_size=1)\n",
    "# check_data = first(check_loader)\n",
    "# image, label = (check_data[\"image\"][0][0], check_data[\"label\"][0][0])\n",
    "# print(f\"image shape: {image.shape}, label shape: {label.shape}\")\n",
    "# # plot the slice [:, :, 80]\n",
    "\n",
    "# plt.figure(\"check\", (12, 6))\n",
    "# plt.subplot(1, 2, 1)\n",
    "# plt.title(\"image\")\n",
    "# plt.imshow(image[:, :, 30], cmap=\"gray\")\n",
    "# plt.subplot(1, 2, 2)\n",
    "# plt.title(\"label\")\n",
    "# plt.imshow(label[:, :, 30])\n",
    "# plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "D0_EHJ7FwCMQ"
   },
   "source": [
    "## Define CacheDataset and DataLoader for training and validation\n",
    "\n",
    "Here we use CacheDataset to accelerate training and validation process, it's 10x faster than the regular Dataset.  \n",
    "To achieve best performance, set `cache_rate=1.0` to cache all the data, if memory is not enough, set lower value.  \n",
    "Users can also set `cache_num` instead of `cache_rate`, will use the minimum value of the 2 settings.  \n",
    "And set `num_workers` to enable multi-threads during caching.  \n",
    "If want to to try the regular Dataset, just change to use the commented code below."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "kKA4gboPwCMQ",
    "outputId": "2496df99-8445-4c70-a3b1-721f9e552b34",
    "tags": []
   },
   "outputs": [],
   "source": [
    "# train_ds = CacheDataset(data=train_files, transform=train_transforms, cache_rate=1.0, num_workers=4)\n",
    "train_ds = Dataset(data=train_files, transform=train_transforms)\n",
    "\n",
    "# use batch_size=2 to load images and use RandCropByPosNegLabeld\n",
    "# to generate 2 x 4 images for network training\n",
    "train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)\n",
    "\n",
    "# val_ds = CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0, num_workers=4)\n",
    "val_ds = Dataset(data=val_files, transform=val_transforms)\n",
    "val_loader = DataLoader(val_ds, batch_size=1, num_workers=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 2, 160, 160, 32]) torch.Size([8, 1, 160, 160, 32])\n",
      "torch.Size([8, 2, 160, 160, 32]) torch.Size([8, 1, 160, 160, 32])\n",
      "torch.Size([8, 2, 160, 160, 32]) torch.Size([8, 1, 160, 160, 32])\n",
      "torch.Size([8, 2, 160, 160, 32]) torch.Size([8, 1, 160, 160, 32])\n",
      "torch.Size([8, 2, 160, 160, 32]) torch.Size([8, 1, 160, 160, 32])\n",
      "3\n"
     ]
    }
   ],
   "source": [
    "\n",
    "step=0\n",
    "for batch_data in train_loader:\n",
    "    step+=1\n",
    "    inputs, labels = (\n",
    "        batch_data[\"image\"],\n",
    "        batch_data[\"label\"],\n",
    "    )\n",
    "    print(inputs.shape, labels.shape)\n",
    "    if step==5:\n",
    "        break\n",
    "\n",
    "import numpy as np\n",
    "num_classes = len(np.unique(labels))\n",
    "print(num_classes)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "nOgy1x1BwCMQ"
   },
   "source": [
    "## Create Model, Loss, Optimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset_name = 'Task005_prostate'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "id": "VM-5g2bmwCMQ"
   },
   "outputs": [],
   "source": [
    "# standard PyTorch program style: create UNet, DiceLoss and Adam optimizer\n",
    "device = torch.device(\"cuda:0\")\n",
    "\n",
    "UNet_meatdata = {\n",
    "    \"spatial_dims\": 3,\n",
    "    \"in_channels\": 2,\n",
    "    \"out_channels\": num_classes,\n",
    "     \"strides\": (2, 2, 2),\n",
    "    \"num_res_units\": 2,\n",
    "    \"channels\":(4, 8, 16, 32),\n",
    "    \"norm\": Norm.BATCH,\n",
    "}\n",
    "\n",
    "model = UNet(**UNet_meatdata).to(device)\n",
    "loss_function = DiceLoss(to_onehot_y=True, softmax=True)\n",
    "loss_type = \"DiceLoss\"\n",
    "optimizer = torch.optim.Adam(model.parameters(), 1e-4)\n",
    "dice_metric = DiceMetric(include_background=False, reduction=\"mean\")\n",
    "\n",
    "Optimizer_metadata = {}\n",
    "for ind, param_group in enumerate(optimizer.param_groups):\n",
    "    optim_meta_keys = list(param_group.keys())\n",
    "    Optimizer_metadata[f\"param_group_{ind}\"] = {\n",
    "        key: value for (key, value) in param_group.items() if \"params\" not in key\n",
    "    }\n",
    "aim_run = aim.Run()\n",
    "aim_run.name = f'{dataset_name}_{model.__class__.__name__}'\n",
    "# log model metadata\n",
    "aim_run[f\"{model.__class__.__name__}_meatdata\"] = UNet_meatdata\n",
    "# log optimizer metadata\n",
    "aim_run[\"Optimizer_metadata\"] = Optimizer_metadata"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "VM-5g2bmwCMQ"
   },
   "outputs": [],
   "source": [
    "# standard PyTorch program style: create UNet, DiceLoss and Adam optimizer\n",
    "device = torch.device(\"cuda:0\")\n",
    "\n",
    "net_metadata = {\n",
    "    \"spatial_dims\": 3,\n",
    "    \"in_channels\": 4,\n",
    "    \"out_channels\": num_classes,\n",
    "    \"img_size\": (96, 96, 32),\n",
    "    \"mlp_dim\": 3072\n",
    "    #  \"strides\": (2, 2, 2, 2),\n",
    "    # \"num_res_units\": 2,\n",
    "    # \"channels\":(4, 8, 16, 32, 64),\n",
    "    # \"norm\": Norm.BATCH,\n",
    "}\n",
    "\n",
    "model = UNETR(**net_metadata).to(device)\n",
    "loss_function = DiceLoss(to_onehot_y=True, softmax=True)\n",
    "loss_type = \"DiceLoss\"\n",
    "optimizer = torch.optim.Adam(model.parameters(), 1e-4)\n",
    "dice_metric = DiceMetric(include_background=False, reduction=\"mean\")\n",
    "\n",
    "Optimizer_metadata = {}\n",
    "for ind, param_group in enumerate(optimizer.param_groups):\n",
    "    optim_meta_keys = list(param_group.keys())\n",
    "    Optimizer_metadata[f\"param_group_{ind}\"] = {\n",
    "        key: value for (key, value) in param_group.items() if \"params\" not in key\n",
    "    }\n",
    "aim_run = aim.Run()\n",
    "aim_run.name = f'{dataset_name}_{model.__class__.__name__}'\n",
    "# log model metadata\n",
    "aim_run[f\"{model.__class__.__name__}_meatdata\"] = net_metadata\n",
    "# log optimizer metadata\n",
    "aim_run[\"Optimizer_metadata\"] = Optimizer_metadata\n",
    "print(aim_run.name)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4nD1pAY-wCMR"
   },
   "source": [
    "## Execute a typical PyTorch training process"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "KayxFseYwCMR",
    "scrolled": false,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----------\n",
      "epoch 1/600\n",
      "1/5, train_loss: 0.8159\n",
      "2/5, train_loss: 0.8326\n",
      "3/5, train_loss: 0.8338\n",
      "4/5, train_loss: 0.8260\n",
      "5/5, train_loss: 0.8343\n",
      "epoch 1 average loss: 0.8285\n",
      "----------\n",
      "epoch 2/600\n",
      "1/5, train_loss: 0.8312\n",
      "2/5, train_loss: 0.8115\n",
      "3/5, train_loss: 0.8305\n",
      "4/5, train_loss: 0.8274\n",
      "5/5, train_loss: 0.8253\n",
      "epoch 2 average loss: 0.8252\n",
      "----------\n",
      "epoch 3/600\n",
      "1/5, train_loss: 0.8213\n",
      "2/5, train_loss: 0.8096\n",
      "3/5, train_loss: 0.8289\n",
      "4/5, train_loss: 0.8230\n",
      "5/5, train_loss: 0.8268\n",
      "epoch 3 average loss: 0.8219\n",
      "----------\n",
      "epoch 4/600\n",
      "1/5, train_loss: 0.8245\n",
      "2/5, train_loss: 0.8033\n",
      "3/5, train_loss: 0.8222\n",
      "4/5, train_loss: 0.8192\n",
      "5/5, train_loss: 0.8245\n",
      "epoch 4 average loss: 0.8188\n",
      "----------\n",
      "epoch 5/600\n",
      "1/5, train_loss: 0.8185\n",
      "2/5, train_loss: 0.8047\n",
      "3/5, train_loss: 0.8189\n",
      "4/5, train_loss: 0.8207\n",
      "5/5, train_loss: 0.8158\n",
      "epoch 5 average loss: 0.8157\n",
      "saved new best metric model at the 5th epoch\n",
      "current epoch: 5 current mean dice: 0.0200 \n",
      "best mean dice: 0.0200  at epoch: 5\n",
      "----------\n",
      "epoch 6/600\n",
      "1/5, train_loss: 0.8209\n",
      "2/5, train_loss: 0.8172\n",
      "3/5, train_loss: 0.8144\n",
      "4/5, train_loss: 0.8139\n",
      "5/5, train_loss: 0.7974\n",
      "epoch 6 average loss: 0.8127\n",
      "----------\n",
      "epoch 7/600\n",
      "1/5, train_loss: 0.7984\n",
      "2/5, train_loss: 0.8131\n",
      "3/5, train_loss: 0.8143\n",
      "4/5, train_loss: 0.8120\n",
      "5/5, train_loss: 0.8135\n",
      "epoch 7 average loss: 0.8103\n",
      "----------\n",
      "epoch 8/600\n",
      "1/5, train_loss: 0.8121\n",
      "2/5, train_loss: 0.8124\n",
      "3/5, train_loss: 0.8075\n",
      "4/5, train_loss: 0.7926\n",
      "5/5, train_loss: 0.8119\n",
      "epoch 8 average loss: 0.8073\n",
      "----------\n",
      "epoch 9/600\n",
      "1/5, train_loss: 0.8111\n",
      "2/5, train_loss: 0.8071\n",
      "3/5, train_loss: 0.7950\n",
      "4/5, train_loss: 0.8051\n",
      "5/5, train_loss: 0.8052\n",
      "epoch 9 average loss: 0.8047\n",
      "----------\n",
      "epoch 10/600\n",
      "1/5, train_loss: 0.8014\n",
      "2/5, train_loss: 0.8090\n",
      "3/5, train_loss: 0.8059\n",
      "4/5, train_loss: 0.7875\n",
      "5/5, train_loss: 0.8068\n",
      "epoch 10 average loss: 0.8021\n",
      "current epoch: 10 current mean dice: 0.0152 \n",
      "best mean dice: 0.0200  at epoch: 5\n",
      "----------\n",
      "epoch 11/600\n",
      "1/5, train_loss: 0.8023\n",
      "2/5, train_loss: 0.7996\n",
      "3/5, train_loss: 0.8067\n",
      "4/5, train_loss: 0.7851\n",
      "5/5, train_loss: 0.8046\n",
      "epoch 11 average loss: 0.7996\n",
      "----------\n",
      "epoch 12/600\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception ignored in: <function _after_at_fork_child_reinit_locks at 0x7fb721ee9480>Exception ignored in: <function _releaseLock at 0x7fb721ee92d0>\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/yawei/anaconda3/lib/python3.10/logging/__init__.py\", line 228, in _releaseLock\n",
      "    def _releaseLock():\n",
      "KeyboardInterrupt: \n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/yawei/anaconda3/lib/python3.10/logging/__init__.py\", line 255, in _after_at_fork_child_reinit_locks\n",
      "    for handler in _at_fork_reinit_lock_weakset:\n",
      "  File \"/home/yawei/anaconda3/lib/python3.10/_weakrefset.py\", line 66, in __iter__\n",
      "    item = itemref()\n",
      "KeyboardInterrupt: \n"
     ]
    }
   ],
   "source": [
    "max_epochs = 600\n",
    "val_interval = 5\n",
    "best_metric = -1\n",
    "best_metric_epoch = -1\n",
    "epoch_loss_values = []\n",
    "metric_values = []\n",
    "post_pred = Compose([AsDiscrete(argmax=True, to_onehot=num_classes)])\n",
    "post_label = Compose([AsDiscrete(to_onehot=num_classes)])\n",
    "\n",
    "slice_to_track = 15\n",
    "\n",
    "for epoch in range(max_epochs):\n",
    "    print(\"-\" * 10)\n",
    "    print(f\"epoch {epoch + 1}/{max_epochs}\")\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    step = 0\n",
    "    for batch_data in train_loader:\n",
    "        step += 1\n",
    "        inputs, labels = (\n",
    "            batch_data[\"image\"].to(device),\n",
    "            batch_data[\"label\"].to(device),\n",
    "        )\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        outputs = model(inputs)\n",
    "        loss = loss_function(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        epoch_loss += loss.item()\n",
    "        print(f\"{step}/{len(train_ds) // train_loader.batch_size}, \" f\"train_loss: {loss.item():.4f}\")\n",
    "        # track batch loss metric\n",
    "        aim_run.track(loss.item(), name=\"batch_loss\", context={\"type\": loss_type})\n",
    "\n",
    "    epoch_loss /= step\n",
    "    epoch_loss_values.append(epoch_loss)\n",
    "\n",
    "    # track epoch loss metric\n",
    "    aim_run.track(epoch_loss, name=\"epoch_loss\", context={\"type\": loss_type})\n",
    "\n",
    "    print(f\"epoch {epoch + 1} average loss: {epoch_loss:.4f}\")\n",
    "\n",
    "    if (epoch + 1) % val_interval == 0:\n",
    "        if (epoch + 1) % val_interval * 2 == 0:\n",
    "            # track model params and gradients\n",
    "            track_params_dists(model, aim_run)\n",
    "            # THIS SEGMENT TAKES RELATIVELY LONG (Advise Against it)\n",
    "            track_gradients_dists(model, aim_run)\n",
    "\n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            for index, val_data in enumerate(val_loader):\n",
    "                val_inputs, val_labels = (\n",
    "                    val_data[\"image\"].to(device),\n",
    "                    val_data[\"label\"].to(device),\n",
    "                )\n",
    "                # roi_size = (160, 160, 160)\n",
    "\n",
    "                sw_batch_size = 4\n",
    "                val_outputs = sliding_window_inference(val_inputs, roi_size, sw_batch_size, model)\n",
    "\n",
    "                # tracking input, label and output images with Aim\n",
    "                output = torch.argmax(val_outputs, dim=1)[0, :, :, slice_to_track].float()\n",
    "\n",
    "                aim_run.track(\n",
    "                    aim.Image(val_inputs[0, 0, :, :, slice_to_track], caption=f\"Input Image: {index}\"),\n",
    "                    name=\"validation\",\n",
    "                    context={\"type\": \"input\"},\n",
    "                )\n",
    "                aim_run.track(\n",
    "                    aim.Image(val_labels[0, 0, :, :, slice_to_track], caption=f\"Label Image: {index}\"),\n",
    "                    name=\"validation\",\n",
    "                    context={\"type\": \"label\"},\n",
    "                )\n",
    "                aim_run.track(\n",
    "                    aim.Image(output, caption=f\"Predicted Label: {index}\"),\n",
    "                    name=\"predictions\",\n",
    "                    context={\"type\": \"labels\"},\n",
    "                )\n",
    "\n",
    "                val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]\n",
    "                val_labels = [post_label(i) for i in decollate_batch(val_labels)]\n",
    "                # compute metric for current iteration\n",
    "                dice_metric(y_pred=val_outputs, y=val_labels)\n",
    "\n",
    "            # aggregate the final mean dice result\n",
    "            metric = dice_metric.aggregate().item()\n",
    "            # track val metric\n",
    "            aim_run.track(metric, name=\"val_metric\", context={\"type\": loss_type})\n",
    "\n",
    "            # reset the status for next validation round\n",
    "            dice_metric.reset()\n",
    "\n",
    "            metric_values.append(metric)\n",
    "            if metric > best_metric:\n",
    "                best_metric = metric\n",
    "                best_metric_epoch = epoch + 1\n",
    "                torch.save(model.state_dict(), os.path.join(root_dir, f\"{aim_run.name}_best_metric_model.pth\"))\n",
    "\n",
    "                best_model_log_message = f\"saved new best metric model at the {epoch+1}th epoch\"\n",
    "                aim_run.track(aim.Text(best_model_log_message), name=\"best_model_log_message\", epoch=epoch + 1)\n",
    "                print(best_model_log_message)\n",
    "\n",
    "            message1 = f\"current epoch: {epoch + 1} current mean dice: {metric:.4f}\"\n",
    "            message2 = f\"\\nbest mean dice: {best_metric:.4f} \"\n",
    "            message3 = f\"at epoch: {best_metric_epoch}\"\n",
    "\n",
    "            aim_run.track(aim.Text(message1 + \"\\n\" + message2 + message3), name=\"epoch_summary\", epoch=epoch + 1)\n",
    "            print(message1, message2, message3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "d1WMn7DFKkbV"
   },
   "outputs": [],
   "source": [
    "# finalize Aim Run\n",
    "aim_run.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ygo9hrWswCMR",
    "tags": []
   },
   "outputs": [],
   "source": [
    "print(f\"train completed, best_metric: {best_metric:.4f} \" f\"at epoch: {best_metric_epoch}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "run = aim.Run('dc7f4adf500345a78a890961')\n",
    "run.name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "run.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "jGhCvBg-wCMS"
   },
   "outputs": [],
   "source": [
    "%load_ext aim\n",
    "%aim up"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "zV7fV0CIwCMS"
   },
   "source": [
    "Once the above cell is executed, you will see the Aim UI running in output cell\n",
    "\n",
    "![Aim UI](https://user-images.githubusercontent.com/13848158/156644374-ba04963f-4f63-4fb9-b3ef-4d4e1ae521cc.jpg)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "KRw5pgLiwCMS"
   },
   "source": [
    "## Explore the loss and metric"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "MeX1wBjXwCMS"
   },
   "source": [
    "Compare metrics curves with Metrics Explorer - group and aggregate by any hyperparameter to easily compare training runs\n",
    "\n",
    "![Metrics Explorer](https://user-images.githubusercontent.com/13848158/156642623-8cf4911d-bed2-42b8-9f39-374f8d31def8.jpg)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "zGZ5vozGwCMS"
   },
   "source": [
    "## Compare and analyze model outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "mZIUa0aNwCMS"
   },
   "source": [
    "Compare models of different runs with input images and labels\n",
    "\n",
    "![Images Explorer](https://user-images.githubusercontent.com/13848158/156642615-c003fb3c-9f37-40f4-b499-ee6623db59ef.jpg)\n",
    "\n",
    "![Images Explorer](https://user-images.githubusercontent.com/13848158/156642618-0c0c380a-75aa-45b1-b431-149f735b3fde.jpg)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "uZKhs2DFwCMS"
   },
   "source": [
    "## Evaluation on original image spacings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "Ws5wpqPlwCMT"
   },
   "outputs": [],
   "source": [
    "val_org_transforms = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\", \"label\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n",
    "        Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n",
    "        Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"],\n",
    "            a_min=-57,\n",
    "            a_max=164,\n",
    "            b_min=0.0,\n",
    "            b_max=1.0,\n",
    "            clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n",
    "    ]\n",
    ")\n",
    "\n",
    "val_org_ds = Dataset(data=val_files, transform=val_org_transforms)\n",
    "val_org_loader = DataLoader(val_org_ds, batch_size=1, num_workers=4)\n",
    "\n",
    "post_transforms = Compose(\n",
    "    [\n",
    "        Invertd(\n",
    "            keys=\"pred\",\n",
    "            transform=val_org_transforms,\n",
    "            orig_keys=\"image\",\n",
    "            meta_keys=\"pred_meta_dict\",\n",
    "            orig_meta_keys=\"image_meta_dict\",\n",
    "            meta_key_postfix=\"meta_dict\",\n",
    "            nearest_interp=False,\n",
    "            to_tensor=True,\n",
    "        ),\n",
    "        AsDiscreted(keys=\"pred\", argmax=True, to_onehot=2),\n",
    "        AsDiscreted(keys=\"label\", to_onehot=2),\n",
    "    ]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "JTkKUwRGwCMT"
   },
   "outputs": [],
   "source": [
    "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n",
    "model.eval()\n",
    "\n",
    "with torch.no_grad():\n",
    "    for val_data in val_org_loader:\n",
    "        val_data[\"image\"] = val_data[\"image\"].to(device)\n",
    "        val_data[\"label\"] = val_data[\"label\"].to(device)\n",
    "        roi_size = (160, 160, 160)\n",
    "        sw_batch_size = 4\n",
    "        val_data[\"pred\"] = sliding_window_inference(val_data[\"image\"], roi_size, sw_batch_size, model)\n",
    "        val_data = [post_transforms(i) for i in decollate_batch(val_data)]\n",
    "        val_outputs, val_labels = from_engine([\"pred\", \"label\"])(val_data)\n",
    "        # compute metric for current iteration\n",
    "        dice_metric(y_pred=val_outputs, y=val_labels)\n",
    "\n",
    "    # aggregate the final mean dice result\n",
    "    metric_org = dice_metric.aggregate().item()\n",
    "    # reset the status for next validation round\n",
    "    dice_metric.reset()\n",
    "\n",
    "print(\"Metric on original image spacing: \", metric_org)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "chILRaduwCMT"
   },
   "source": [
    "## Cleanup data directory\n",
    "\n",
    "Remove directory if a temporary was used."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "yuCFCxOcwCMT"
   },
   "outputs": [],
   "source": [
    "if directory is None:\n",
    "    shutil.rmtree(root_dir)"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "name": "spleen_segmentation_3d_visualization.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  },
  "vscode": {
   "interpreter": {
    "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
