{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from lightning_model import SegmentationModel \n",
    "from datasets import SACropTypeDataModule\n",
    "from lightning.pytorch.utilities import model_summary\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "from torchcam.utils import overlay_mask\n",
    "from torchvision.transforms.functional import to_pil_image\n",
    "from torchcam.methods import GradCAM\n",
    "from comet_ml import Experiment\n",
    "from torchmetrics import JaccardIndex, Precision, Recall, F1Score\n",
    "from torchmetrics.wrappers import ClasswiseWrapper\n",
    "from skimage.filters import threshold_otsu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment = Experiment(\n",
    "    api_key=\"your_api_key\",\n",
    "    project_name=\"your_project_name\",\n",
    "    workspace=\"your_workspace\",\n",
    ")\n",
    "\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CHECKPOINT_UKAN = \"your_checkpoint_path\"\n",
    "CHECKPOINT_UNET = \"your_checkpoint_path\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_ukan = SegmentationModel.load_from_checkpoint(CHECKPOINT_UKAN, map_location=\"cpu\")\n",
    "model_summary.summarize(model_ukan)\n",
    "model_ukan.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_unet = SegmentationModel.load_from_checkpoint(CHECKPOINT_UNET, map_location=\"cpu\")\n",
    "model_summary.summarize(model_unet)\n",
    "model_unet.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dm = SACropTypeDataModule(num_workers=0, binarize=True, batch_size=16, path=\"your_data_path\")\n",
    "dm.setup('test')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# UKAN and UNET test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def compute_Iou_cam(activation_map, y):\n",
    "    # Ensure the tensors are boolean\n",
    "    preds = activation_map.bool()\n",
    "    targets = y.bool()\n",
    "    \n",
    "    # Compute the intersection and union\n",
    "    intersection = (preds & targets).float().sum(dim=(1, 2))  # Summing over height and width\n",
    "    union = (preds | targets).float().sum(dim=(1, 2))  # Summing over height and width\n",
    "    \n",
    "    # Compute the IoU for each image in the batch\n",
    "    iou = intersection / union\n",
    "    \n",
    "    # Return the mean IoU for the batch\n",
    "    return iou.mean().item()\n",
    "\n",
    "def IoU_images(y_hat, y):\n",
    "    metric = ClasswiseWrapper(\n",
    "            JaccardIndex(task=\"multiclass\", num_classes=2, average=\"none\")\n",
    "        )\n",
    "    \n",
    "    metric.update(y_hat, y)\n",
    "    summary = metric.compute()\n",
    "    return summary\n",
    "\n",
    "def Precision_images(y_hat, y):\n",
    "    metric = ClasswiseWrapper(\n",
    "            Precision(task=\"multiclass\", num_classes=2, average=\"none\")\n",
    "        )\n",
    "    \n",
    "    metric.update(y_hat, y)\n",
    "    summary = metric.compute()\n",
    "    return summary\n",
    "\n",
    "def Recall_images(y_hat, y):\n",
    "    metric = ClasswiseWrapper(\n",
    "            Recall(task=\"multiclass\", num_classes=2, average=\"none\")\n",
    "        )\n",
    "    \n",
    "    metric.update(y_hat, y)\n",
    "    summary = metric.compute()\n",
    "    return summary\n",
    "\n",
    "def F1Score_images(y_hat, y):\n",
    "    metric = ClasswiseWrapper(\n",
    "            F1Score(task=\"multiclass\", num_classes=2, average=\"none\")\n",
    "        )\n",
    "    \n",
    "    metric.update(y_hat, y)\n",
    "    summary = metric.compute()\n",
    "    return summary\n",
    "\n",
    "def get_grad_cam(model, target_layer, x):\n",
    "\n",
    "    # Index of the class to be explained \n",
    "    class_to_explain=1\n",
    "    \n",
    "    with GradCAM(model, target_layer) as cam_extractor:\n",
    "        # Preprocess your data and feed it to the model\n",
    "        out = model(x)\n",
    "        # Retrieve the CAM by passing the class index and the model output\n",
    "        activation_map = cam_extractor(class_idx=class_to_explain, scores=out)\n",
    "    return activation_map\n",
    "\n",
    "\n",
    "def plot_raw_cam(activation_map):\n",
    "    # Visualize the raw CAM heatmap\n",
    "    plt.imshow(activation_map[0][0].numpy()); plt.axis('off'); plt.tight_layout(); plt.show()\n",
    "\n",
    "\n",
    "def image_transf(x, rgb=True, normalize=True):\n",
    "    # RGB image \n",
    "    out = x\n",
    "    if rgb:\n",
    "        out = out[:, (3, 2, 1), :, :]\n",
    "    if normalize:\n",
    "        out = (out - out.min()) / (out.max() - out.min())\n",
    "\n",
    "    return out\n",
    "\n",
    "def plot_overlay_cam(activation_map, x, idx=0):\n",
    "    image = image_transf(x)\n",
    "    result = overlay_mask(to_pil_image(image[idx]), to_pil_image(activation_map[idx].squeeze(0), mode='F'), alpha=0.5)\n",
    "    result = Image.fromarray(np.array(result))\n",
    "    return result\n",
    "\n",
    "def calculate_threshold(activation_map):\n",
    "\n",
    "    masked_tensor = []\n",
    "\n",
    "    for map in activation_map:\n",
    "        # Calculate the percentile\n",
    "        tmp = map.clone().detach().cpu().numpy()\n",
    "\n",
    "        thr = threshold_otsu(tmp)  \n",
    "\n",
    "        mask = tmp > thr\n",
    "\n",
    "        # Plot the mask\n",
    "        # plt.imshow(mask); plt.axis('off'); plt.tight_layout(); plt.show()\n",
    "\n",
    "        masked_tensor.append(torch.tensor(mask))\n",
    "\n",
    "    # Return the masked tensor as a torch tensorCos\n",
    "    return torch.stack(masked_tensor)\n",
    "\n",
    "def apply_mask_original_img(x, mask):\n",
    "    masked_image = []\n",
    "    # Apply the mask to each channel of the input image - and mask the original RGB image\n",
    "    for img, m in zip(x, mask):\n",
    "        masked_image.append(img * m)\n",
    "    return torch.stack(masked_image)\n",
    "\n",
    "\n",
    "def write_metric_results(file, results, type='default'):\n",
    "\n",
    "    if type == 'default':\n",
    "\n",
    "        file.write(f\"IOU\\n\")\n",
    "        for el in results['iou']:\n",
    "            file.write(f\"{el}\\n\")\n",
    "\n",
    "        file.write(f\"Precision\\n\")\n",
    "        for el in results['precision']:\n",
    "            file.write(f\"{el}\\n\")\n",
    "\n",
    "        file.write(f\"Recall\\n\")\n",
    "        for el in results['recall']:\n",
    "            file.write(f\"{el}\\n\")\n",
    "        \n",
    "        file.write(f\"F1 Score\\n\")\n",
    "        for el in results['f1_score']:\n",
    "            file.write(f\"{el}\\n\")\n",
    "\n",
    "    else:\n",
    "        file.write(f\"Average Saliency per channel, per image, per batch\\n\")\n",
    "        for el in results:\n",
    "            file.write(f\"{el.item()}\\n\")\n",
    "    \n",
    "\n",
    "def calculate_sufficiency(model, target_layer, x, y):\n",
    "\n",
    "    activation_map_unet = get_grad_cam(model, [target_layer], x)\n",
    "\n",
    "    # Calculate the percentile and threshold the activation maps\n",
    "    thresholded_tensor = calculate_threshold(activation_map_unet[0])\n",
    "\n",
    "    # Apply the mask to the original image\n",
    "    masked_original_imgs = apply_mask_original_img(x, thresholded_tensor)\n",
    "    \n",
    "    # Do a forward pass with the masked image\n",
    "    y_hat_masked = model_ukan(masked_original_imgs)\n",
    "\n",
    "    # Compute the IoU of the prediction\n",
    "    iou_sufficiency = IoU_images(y_hat_masked, y)\n",
    "\n",
    "    # Compute the Precision of the prediction\n",
    "    precision = Precision_images(y_hat_masked, y)\n",
    "\n",
    "    # Compute the Recall of the prediction\n",
    "    recall = Recall_images(y_hat_masked, y)\n",
    "\n",
    "    # Compute the F1 Score of the prediction\n",
    "    f1_score = F1Score_images(y_hat_masked, y)\n",
    "\n",
    "    return {'iou': iou_sufficiency, 'precision':precision, 'recall': recall, 'f1_score': f1_score}\n",
    "\n",
    "def calculate_plausibility(model, target_layer, x, y):\n",
    "\n",
    "    activation_map = get_grad_cam(model, [target_layer], x)\n",
    "\n",
    "    # Calculate the percentile and threshold the activation maps\n",
    "    thresholded_tensor = calculate_threshold(activation_map[0])\n",
    "\n",
    "    # Compute the IoU of the prediction\n",
    "    iou_plausibility = IoU_images(thresholded_tensor, y)\n",
    "\n",
    "    # Compute the Precision of the prediction\n",
    "    precision = Precision_images(thresholded_tensor, y)\n",
    "\n",
    "    # Compute the Recall of the prediction\n",
    "    recall = Recall_images(thresholded_tensor, y)\n",
    "\n",
    "    # Compute the F1 Score of the prediction\n",
    "    f1_score = F1Score_images(thresholded_tensor, y)\n",
    "\n",
    "    return {'iou': iou_plausibility, 'precision':precision, 'recall': recall, 'f1_score': f1_score}\n",
    "\n",
    "    \n",
    "def plot_predictions (x, idx, y, y_hat, i):\n",
    "    fig, _ = dm.dataset_test.plot({\"image\": x[idx], \"mask\": y[idx], \"prediction\": y_hat[idx, 1].detach().numpy()})\n",
    "    experiment.log_figure(figure=fig, figure_name=f\"ukan_{i}\")\n",
    "\n",
    "def save_overlay_cam(name, model, target_layer, activation_map, x, idx=0):\n",
    "    activation_map = get_grad_cam(model, [target_layer], x)\n",
    "    cam = plot_overlay_cam(activation_map, x, idx=idx)\n",
    "    experiment.log_image(cam, name=f\"{name}_cam_{idx}\")\n",
    "\n",
    "def compute_saliency_per_channel(x, activation_map_original, n):\n",
    "\n",
    "    running_saliency = torch.empty(12, len(x))\n",
    "    total_running_saliency = []\n",
    "\n",
    "    for num_img, img in enumerate(x):\n",
    "        act_map = activation_map_original[num_img]\n",
    "        for c in range(12):\n",
    "            image_to_mask = img.clone()\n",
    "            image_to_mask[c, :, :] = 0 # Mask one channel at time\n",
    "            \n",
    "            # Compute the new saliency map with the masked image\n",
    "            activan_map_masked = get_grad_cam(model_ukan, [model_ukan.model.final],image_to_mask.unsqueeze(0))\n",
    "            overlay_cam_ukan = plot_overlay_cam(activan_map_masked[0], x, idx=0)\n",
    "\n",
    "\n",
    "            res = compute_Iou_cam(activan_map_masked[0], act_map)\n",
    "            \n",
    "            if res == float('nan'):\n",
    "                res = 0.0\n",
    "            running_saliency[c, num_img] = res\n",
    "\n",
    "            # Plot the overlay CAM or store in the experiment\n",
    "            #plt.imshow(overlay_cam_ukan, vmin=0, vmax=1); plt.axis('off'); plt.tight_layout(); plt.show()\n",
    "            experiment.log_image(overlay_cam_ukan, name=f\"img_{num_img}_unet_channel_masked_{c}_batch_{n}\")       \n",
    "\n",
    "    return running_saliency"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Open files to write the results\n",
    "model_name_ukan = 'UKAN'\n",
    "task = 'channel_relevance'\n",
    "f_ukan = open(f\"results_{model_name_ukan}_{task}.csv\", \"w\")\n",
    "model_name_unet = 'UNET'\n",
    "f_unet = open(f\"results_{model_name_unet}_{task}.csv\", \"w\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "idx = 0 # Index of the image to be explained\n",
    "target_layer_ukan = model_ukan.model.final # Layer to be explained\n",
    "target_layer_unet = model_unet.model.up4 # Layer to be explained\n",
    "\n",
    "running_IoU_ukan = 0\n",
    "running_IoU_unet = 0\n",
    "\n",
    "# Make the empty lists to store the results\n",
    "\n",
    "iou_ukan_suff, precision_ukan_suff, recall_ukan_suff, f1_score_ukan_suff = [], [], [], []\n",
    "iou_unet_suff, precision_unet_suff, recall_unet_suff, f1_score_unet_suff = [], [], [], []\n",
    "iou_ukan_plaus, precision_ukan_plaus, recall_ukan_plaus, f1_score_ukan_plaus = [], [], [], []\n",
    "iou_unet_plaus, precision_unet_plaus, recall_unet_plaus, f1_score_unet_plaus = [], [], [], []\n",
    "\n",
    "\n",
    "saliency_ukan = []\n",
    "saliency_unet = []\n",
    "\n",
    "for i, batch in enumerate(dm.test_dataloader()):\n",
    "   \n",
    "    x, y = batch\n",
    "    y_hat = model_ukan(x)\n",
    "    y_hat_unet = model_unet(x)\n",
    "\n",
    "    # ================ COMPUTE GRAD CAM =================\n",
    "    activation_map_ukan = get_grad_cam(model_ukan, [target_layer_ukan], x)\n",
    "    activation_map_unet = get_grad_cam(model_unet, [target_layer_unet], x)\n",
    "\n",
    "    # ================ COMPUTE THE SALIENCY PER CHANNEL =================\n",
    "    saliency_ukan.append(compute_saliency_per_channel(x, activation_map_ukan[0], i))\n",
    "    saliency_unet.append(compute_saliency_per_channel(x, activation_map_unet[0], i))\n",
    "    \n",
    "    # ================ COMPUTE THE IOU UKAN  =================\n",
    "    iou_ukan = IoU_images(y_hat, y)\n",
    "    # experiment.log_metric(\"multiclassjaccardindex_1_ukan\", iou_ukan['multiclassjaccardindex_1'].detach(), step=i)\n",
    "    f_ukan.write(f\"{iou_ukan['multiclassjaccardindex_1'].detach()}\\n\")\n",
    "\n",
    "    # ================ COMPUTE THE IOU UNET =================\n",
    "    iou_unet = IoU_images(y_hat_unet, y)\n",
    "    # experiment.log_metric(\"multiclassjaccardindex_1_unet\", iou_unet['multiclassjaccardindex_1'].detach(), step=i)\n",
    "    f_unet.write(f\"{iou_unet['multiclassjaccardindex_1'].detach()}\\n\")\n",
    "    \n",
    "    # ================ PLOT PREDICTIONS =================\n",
    "    plot_predictions(x, idx, y, y_hat, i)\n",
    "    plot_predictions(x, idx, y, y_hat_unet, i)\n",
    "\n",
    "\n",
    "    # ================ SUFFICIENCY =================\n",
    "    dict_sufficiency_metrics = calculate_sufficiency(model_ukan, model_ukan.model.final, x, y, threshold=0.1)\n",
    "    iou_ukan_suff.append(dict_sufficiency_metrics['iou']['multiclassjaccardindex_1'].item())\n",
    "    precision_ukan_suff.append(dict_sufficiency_metrics['precision']['multiclassprecision_1'].item())\n",
    "    recall_ukan_suff.append(dict_sufficiency_metrics['recall']['multiclassrecall_1'].item())\n",
    "    f1_score_ukan_suff.append(dict_sufficiency_metrics['f1_score']['multiclassf1score_1'].item())\n",
    "\n",
    "    dict_sufficiency_metrics = calculate_sufficiency(model_unet, model_unet.model.up4, x, y, threshold=0.1)\n",
    "    iou_unet_suff.append(dict_sufficiency_metrics['iou']['multiclassjaccardindex_1'].item())\n",
    "    precision_unet_suff.append(dict_sufficiency_metrics['precision']['multiclassprecision_1'].item())\n",
    "    recall_unet_suff.append(dict_sufficiency_metrics['recall']['multiclassrecall_1'].item())\n",
    "    f1_score_unet_suff.append(dict_sufficiency_metrics['f1_score']['multiclassf1score_1'].item())\n",
    "\n",
    "    \n",
    "    # ================ PLAUSIBILITY =================\n",
    "\n",
    "    dict_plausibility_metrics = calculate_plausibility(model_ukan, model_ukan.model.final, x, y)\n",
    "    iou_ukan_plaus.append(dict_plausibility_metrics['iou']['multiclassjaccardindex_1'].item())\n",
    "    precision_ukan_plaus.append(dict_plausibility_metrics['precision']['multiclassprecision_1'].item())\n",
    "    recall_ukan_plaus.append(dict_plausibility_metrics['recall']['multiclassrecall_1'].item())\n",
    "    f1_score_ukan_plaus.append(dict_plausibility_metrics['f1_score']['multiclassf1score_1'].item())\n",
    "\n",
    "    dict_plausibility_metrics = calculate_plausibility(model_unet, model_unet.model.up4, x, y)\n",
    "    iou_unet_plaus.append(dict_plausibility_metrics['iou']['multiclassjaccardindex_1'].item())\n",
    "    precision_unet_plaus.append(dict_plausibility_metrics['precision']['multiclassprecision_1'].item())\n",
    "    recall_unet_plaus.append(dict_plausibility_metrics['recall']['multiclassrecall_1'].item())\n",
    "    f1_score_unet_plaus.append(dict_plausibility_metrics['f1_score']['multiclassf1score_1'].item())\n",
    "\n",
    "    # ================ SAVE THE OVERLAY CAM ================   \n",
    "    save_overlay_cam(model_name_ukan, model_name_ukan, model_ukan.model.final, x, idx=idx) \n",
    "    save_overlay_cam(model_name_unet, model_name_unet, model_unet.model.up4, x, idx=idx)\n",
    "\n",
    "# Concatenate the results of per channel saliency\n",
    "final_salience_ukan = torch.cat(saliency_ukan, dim=-1)\n",
    "final_salience_unet = torch.cat(saliency_unet, dim=-1)\n",
    "\n",
    "# Write results for sufficiency and plausibility\n",
    "write_metric_results(f_ukan, results={'iou': iou_ukan_suff, 'precision': precision_ukan_suff, 'recall': recall_ukan_suff, 'f1_score': f1_score_ukan_suff})\n",
    "write_metric_results(f_unet, results={'iou': iou_unet_suff, 'precision': precision_unet_suff, 'recall': recall_unet_suff, 'f1_score': f1_score_unet_suff})\n",
    "\n",
    "write_metric_results(f_ukan, results={'iou': iou_ukan_plaus, 'precision': precision_ukan_plaus, 'recall': recall_ukan_plaus, 'f1_score': f1_score_ukan_plaus})\n",
    "write_metric_results(f_unet, results={'iou': iou_unet_plaus, 'precision': precision_unet_plaus, 'recall': recall_unet_plaus, 'f1_score': f1_score_unet_plaus})\n",
    "\n",
    "\n",
    "# Apply the mean \n",
    "mean_salience_ukan = final_salience_ukan.mean(dim=-1)\n",
    "mean_salience_unet = final_salience_unet.mean(dim=-1)\n",
    "\n",
    "# Write the results of the saliency\n",
    "write_metric_results(f_ukan, results=mean_salience_ukan, type='saliency')\n",
    "write_metric_results(f_unet, results=mean_salience_unet, type='saliency')\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
