{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "43d13ad5-f188-4b49-be18-e590ad332901",
   "metadata": {},
   "outputs": [],
   "source": [
    "#from segment_anything import SamPredictor, sam_model_registry\n",
    "from models.sam import SamPredictor, sam_model_registry\n",
    "from models.sam.utils.transforms import ResizeLongestSide\n",
    "from skimage.measure import label\n",
    "from models.sam_LoRa import LoRA_Sam\n",
    "#Scientific computing \n",
    "import numpy as np\n",
    "import os\n",
    "#Pytorch packages\n",
    "import torch\n",
    "from torch import nn\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "from torchvision import datasets\n",
    "#Visulization\n",
    "import matplotlib.pyplot as plt\n",
    "from torchvision import transforms\n",
    "from PIL import Image\n",
    "#Others\n",
    "from torch.utils.data import DataLoader, Subset\n",
    "from torch.autograd import Variable\n",
    "import matplotlib.pyplot as plt\n",
    "import copy\n",
    "from utils.dataset import Public_dataset\n",
    "import torch.nn.functional as F\n",
    "from torch.nn.functional import one_hot\n",
    "from pathlib import Path\n",
    "from tqdm import tqdm\n",
    "from utils.losses import DiceLoss\n",
    "from utils.dsc import dice_coeff\n",
    "import cv2\n",
    "import monai\n",
    "from utils.utils import vis_image,inverse_normalize,torch_percentile\n",
    "from argparse import Namespace\n",
    "import cfg\n",
    "import PIL\n",
    "import torchio as tio\n",
    "import json\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "arch=\"vit_t\"  # Change this value as needed\n",
    "finetune_type=\"adapter\"\n",
    "dataset_name=\"MRI-Prostate\"  # Assuming you set this if it's dynamic\n",
    "\n",
    "# Construct the checkpoint directory argument\n",
    "checkpoint_dir= f\"2D-SAM_{arch}_encoderdecoder_{finetune_type}_{dataset_name}_noprompt\"\n",
    "\n",
    "args_path = f\"{checkpoint_dir}/args.json\"\n",
    "\n",
    "# Reading the args from the json file\n",
    "with open(args_path, 'r') as f:\n",
    "    args_dict = json.load(f)\n",
    "\n",
    "\n",
    "# Converting dictionary to Namespace\n",
    "args = Namespace(**args_dict)\n",
    "\n",
    "sam_fine_tune = sam_model_registry[args.arch](args,checkpoint=os.path.join(args.dir_checkpoint,'checkpoint_best.pth'),num_classes=args.num_cls)\n",
    "sam_fine_tune = sam_fine_tune.to('cuda').eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "22f35640-a2e5-4f0d-a59d-0fa891062cf7",
   "metadata": {},
   "source": [
    "## evaluate a image volume and save predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f1654874-2247-40c0-81f1-1e19dc400c7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate_1_volume(image_vol, model, slice_id=None, target_spacing=None, orientation=['L', 'P', 'S']):\n",
    "    \"\"\"\n",
    "    Evaluates a single volume using the provided model.\n",
    "\n",
    "    Parameters:\n",
    "    - image_vol: The volume of images to be evaluated.\n",
    "    - model: The model used for evaluation.\n",
    "    - slice_id: The specific slice to evaluate. If None, the middle slice is chosen.\n",
    "    - target_spacing: Desired spacing for resampling the volume. If None, no resampling is performed.\n",
    "    - orientation: Orientation for flipping the image, default is ['L', 'P', 'S'].\n",
    "\n",
    "    Returns:\n",
    "    - ori_img: The original image after normalization.\n",
    "    - pred: The prediction from the model.\n",
    "    - voxel_spacing: The spacing of the voxels in the image volume.\n",
    "    - Pil_img: The PIL image of the selected slice.\n",
    "    - slice_id: The ID of the evaluated slice.\n",
    "    \"\"\"\n",
    "    \n",
    "    # Normalize the volume data to [0, 1] range\n",
    "    image_vol.data = image_vol.data / (image_vol.data.max() * 1.0)\n",
    "    voxel_spacing = image_vol.spacing\n",
    "    \n",
    "    # Resample the volume to target spacing if specified\n",
    "    if target_spacing and (voxel_spacing != target_spacing):\n",
    "        resample = tio.Resample(target_spacing, image_interpolation='nearest')\n",
    "        image_vol = resample(image_vol)\n",
    "    \n",
    "    # Extract the first channel of the volume data\n",
    "    image_vol = image_vol.data[0]\n",
    "    slice_num = image_vol.shape[2]\n",
    "    \n",
    "    # Determine the slice to be evaluated\n",
    "    if slice_id is not None:\n",
    "        if slice_id > slice_num:\n",
    "            slice_id = -1  # Use the last slice if specified slice if out of range\n",
    "    else:\n",
    "        slice_id = slice_num // 2  # Default to the middle slice if not specified\n",
    "    \n",
    "    # Get the 2D slice image\n",
    "    img_arr = image_vol[:, :, slice_id]\n",
    "    \n",
    "    # Normalize the slice image to [0, 255] range and convert to uint8\n",
    "    img_arr = np.array((img_arr - img_arr.min()) / (img_arr.max() - img_arr.min() + 1e-8) * 255, dtype=np.uint8)\n",
    "    \n",
    "    # Convert the single channel image to 3 channels by duplicating the single channel\n",
    "    img_3c = np.tile(img_arr[:, :, None], [1, 1, 3])\n",
    "    img = Image.fromarray(img_3c, 'RGB')\n",
    "    Pil_img = img.copy()\n",
    "    \n",
    "    # Resize the image to 1024x1024\n",
    "    img = transforms.Resize((1024, 1024))(img)\n",
    "    \n",
    "    # Transform the image to a tensor and normalize\n",
    "    transform_img = transforms.Compose([\n",
    "        transforms.ToTensor(),\n",
    "    ])\n",
    "    img = transform_img(img)\n",
    "    imgs = torch.unsqueeze(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img), 0).cuda()\n",
    "    \n",
    "    # Flip the image along the vertical axis if orientation is 'R'\n",
    "    if orientation[0] == 'R':\n",
    "        imgs = torch.flip(imgs, dims=[3])\n",
    "    \n",
    "    # Perform model inference without gradient calculation\n",
    "    with torch.no_grad():\n",
    "        # Get image embeddings from the image encoder\n",
    "        img_emb = model.image_encoder(imgs)\n",
    "        \n",
    "        # Get sparse and dense embeddings from the prompt encoder\n",
    "        sparse_emb, dense_emb = model.prompt_encoder(\n",
    "            points=None,\n",
    "            boxes=None,\n",
    "            masks=None,\n",
    "        )\n",
    "        \n",
    "        # Get the prediction from the mask decoder\n",
    "        pred, _ = model.mask_decoder(\n",
    "            image_embeddings=img_emb,\n",
    "            image_pe=model.prompt_encoder.get_dense_pe(),\n",
    "            sparse_prompt_embeddings=sparse_emb,\n",
    "            dense_prompt_embeddings=dense_emb,\n",
    "            multimask_output=True,\n",
    "        )\n",
    "        \n",
    "        # Get the most likely prediction\n",
    "        pred = pred.argmax(dim=1)\n",
    "    \n",
    "    # Get the original image after normalization\n",
    "    ori_img = inverse_normalize(imgs.cpu()[0])\n",
    "    \n",
    "    return ori_img, pred, voxel_spacing, Pil_img, slice_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9c2bf140-fd2c-48aa-8a84-0a5a1dd278af",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Example target mapping (you need to define by yourself under the format of: label:value; ...\n",
    "target_mapping = {\n",
    "    'tumor': 1\n",
    "}\n",
    "\n",
    "# Example usage\n",
    "test_dir = 'Volumes_to_test/'\n",
    "predict_dir = 'Predict_results/'\n",
    "\n",
    "# Retrieve case IDs from the directory names in test volume dir\n",
    "# Assume you have your image voumes saved in the following architecture\n",
    "# test_dir\n",
    "# |_ case_id1\n",
    "#    |_image.nii.gz\n",
    "#    |_gt.nii.gz (not necessary)\n",
    "# |_ case_id2\n",
    "#    |_image.nii.gz\n",
    "#    |_gt.nii.gz (not necessary)\n",
    "\n",
    "case_ids = [d for d in os.listdir(test_dir) if os.path.isdir(os.path.join(test_dir, d))]\n",
    "\n",
    "\n",
    "for case_id in case_ids:\n",
    "    case_dir = os.path.join(test_dir, case_id)\n",
    "    nii_files = glob.glob(os.path.join(case_dir, '*.nii.gz'))\n",
    "    if len(nii_files) == 0:\n",
    "        print(f\"No .nii.gz files found in {case_dir}\")\n",
    "        continue\n",
    "    \n",
    "    image_file = nii_files[0]  # Assuming there is only one .nii.gz file per case\n",
    "    image1_vol = tio.ScalarImage(image_file)\n",
    "    print(f'Processing {image_file}')\n",
    "    print('Volume shape: %s Volume spacing: %s' % (image1_vol.shape, image1_vol.spacing))\n",
    "\n",
    "\n",
    "    # Define the percentiles\n",
    "    lower_percentile = 0\n",
    "    upper_percentile = 100\n",
    "    image_tensor = image1_vol.data\n",
    "    lower_bound = torch_percentile(image_tensor, lower_percentile)\n",
    "    upper_bound = torch_percentile(image_tensor, upper_percentile)\n",
    "\n",
    "    # Clip and normalize the data\n",
    "    image_tensor = torch.clamp(image_tensor, lower_bound, upper_bound)\n",
    "    image_tensor = (image_tensor - lower_bound) / (upper_bound - lower_bound)\n",
    "    image1_vol.set_data(image_tensor)\n",
    "\n",
    "    mask_vol_numpy = np.zeros(image1_vol.shape)\n",
    "    id_list = list(range(image1_vol.shape[3]))\n",
    "    for id in id_list:\n",
    "        ori_img, pred_1, voxel_spacing1, Pil_img1, slice_id1 = evaluate_1_volume(image1_vol, sam_fine_tune, slice_id=id, orientation=image1_vol.orientation)\n",
    "        img1_size = Pil_img1.size\n",
    "        mask_pred_1 = ((pred_1).cpu()).float()   \n",
    "        pil_mask1 = Image.fromarray(np.array(mask_pred_1[0], dtype=np.uint8), 'L').resize(img1_size, resample=PIL.Image.NEAREST)\n",
    "        mask_vol_numpy[0, :, :, id] = np.asarray(pil_mask1)\n",
    "    \n",
    "    # Convert to tensor and handle orientation\n",
    "    mask_tensor = torch.tensor(mask_vol_numpy, dtype=torch.int)\n",
    "    if image1_vol.orientation[0] == 'R':\n",
    "        mask_tensor = torch.flip(mask_tensor, dims=[2])\n",
    "\n",
    "    # Save each organ mask\n",
    "    mask_save_folder = os.path.join(predict_dir, case_id,'predictions')\n",
    "    Path(mask_save_folder).mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    # Save the combined mask volume\n",
    "    combined_mask_vol = tio.LabelMap(tensor=mask_tensor, affine=image1_vol.affine)\n",
    "    combined_mask_filename = os.path.join(predict_dir, case_id, 'pred_mask.nii.gz')\n",
    "    Path(os.path.dirname(combined_mask_filename)).mkdir(parents=True, exist_ok=True)\n",
    "    combined_mask_vol.save(combined_mask_filename)\n",
    "\n",
    "    print(f\"Combined mask saved for case: {case_id}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d415e43d-012b-45bf-91c2-55f671830b6d",
   "metadata": {},
   "source": [
    "## visualize results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9105aa1-da83-4b26-81f0-a445240cea3c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import SimpleITK as sitk\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "# Set the paths to the directories containing the image volumes and masks\n",
    "test_path = '../AbdomenAtlasTest'\n",
    "predict_dir = '../AbdomenAtlasPredict'\n",
    "\n",
    "def read_image(image_path):\n",
    "    \"\"\"Read an image using SimpleITK.\"\"\"\n",
    "    return sitk.ReadImage(image_path)\n",
    "\n",
    "def read_mask(mask_path):\n",
    "    \"\"\"Read a mask using SimpleITK.\"\"\"\n",
    "    return sitk.ReadImage(mask_path)\n",
    "\n",
    "def visualize_slice_with_overlay(image_path, mask_path, slice_index):\n",
    "    \"\"\"Visualize a specific slice from a 3D image with its corresponding mask overlay.\"\"\"\n",
    "    image = read_image(image_path)\n",
    "    mask = read_mask(mask_path)\n",
    "\n",
    "    # Convert SimpleITK images to numpy arrays\n",
    "    image_array = sitk.GetArrayFromImage(image)  # z, y, x\n",
    "    mask_array = sitk.GetArrayFromImage(mask)  # z, y, x\n",
    "\n",
    "    # Select the slice to visualize\n",
    "    image_slice = image_array[slice_index, :, :]\n",
    "    mask_slice = mask_array[slice_index, :, :]\n",
    "\n",
    "    # Create a figure to show the images\n",
    "    plt.figure(figsize=(10, 5))\n",
    "\n",
    "    # Show the image\n",
    "    plt.subplot(1, 2, 1)\n",
    "    plt.imshow(np.flipud(np.fliplr(image_slice)), cmap='gray')\n",
    "    plt.title('Original Image')\n",
    "    plt.axis('off')\n",
    "\n",
    "    # Show the image with the mask overlay\n",
    "    plt.subplot(1, 2, 2)\n",
    "    plt.imshow(np.flipud(np.fliplr(image_slice)), cmap='gray')\n",
    "    plt.imshow(np.flipud(np.fliplr(mask_slice)), alpha=0.4, cmap='jet')  # alpha controls the transparency\n",
    "    plt.title('Image with Mask Overlay')\n",
    "    plt.axis('off')\n",
    "\n",
    "    plt.show()\n",
    "\n",
    "# List all volumes and their corresponding masks\n",
    "# Retrieve case IDs from the directory names in test volume dir\n",
    "case_ids = [d for d in os.listdir(test_dir) if os.path.isdir(os.path.join(test_dir, d))]\n",
    "\n",
    "i = 0 # the first volume\n",
    "case_dir = os.path.join(test_dir, case_ids[i])\n",
    "nii_files = glob.glob(os.path.join(case_dir, '*.nii.gz'))\n",
    "if len(nii_files) == 0:\n",
    "    print(f\"No .nii.gz files found in {case_dir}\")\n",
    "    continue\n",
    "\n",
    "image_file = nii_files[0]  # Assuming there is only one .nii.gz file per case\n",
    "mask_path = os.path.join(predict_dir, case_id, 'pred_mask.nii.gz')\n",
    "slice_index = 200  # Example slice index\n",
    "\n",
    "visualize_slice_with_overlay(image_path, mask_path, slice_index)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
