{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "30c278eb",
   "metadata": {},
   "source": [
    "# Which3D-源码\n",
    "\n",
    "调源码，请自强，出问题欢迎讨论【你先通过了张老师的代码考试！】"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "846159c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n",
    "from monai.utils import first, set_determinism\n",
    "from monai.transforms import (\n",
    "    AsDiscrete,\n",
    "    AsDiscreted,\n",
    "    EnsureChannelFirstd,\n",
    "    Compose,\n",
    "    CropForegroundd,\n",
    "    LoadImaged,\n",
    "    Orientationd,\n",
    "    RandCropByPosNegLabeld,\n",
    "    ScaleIntensityRanged,\n",
    "    Spacingd,\n",
    "    EnsureTyped,\n",
    "    EnsureType,\n",
    "    Invertd,\n",
    ")\n",
    "from monai.handlers.utils import from_engine\n",
    "from monai.networks.nets import UNet, SegResNet, VNet, UNETR\n",
    "from monai.networks.layers import Norm\n",
    "from monai.inferers import sliding_window_inference\n",
    "from monai.data import CacheDataset, DataLoader, Dataset, decollate_batch\n",
    "from monai.config import print_config\n",
    "from monai.apps import download_and_extract\n",
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import tempfile\n",
    "import shutil\n",
    "import os\n",
    "import glob\n",
    "import sys"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4b0eedd8",
   "metadata": {},
   "source": [
    "### 数据集划分\n",
    "\n",
    "默认进行随机划分，使用最后的8个作为测试集合。\n",
    "\n",
    "`seg_idx = 1`可以通过修改seg_idx的值选择训练那个模型\n",
    "\n",
    "  1. `1`代表训练T1\n",
    "  2. `2`代表训练T2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d326fc2d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import random\n",
    "import os\n",
    "import nibabel as nib\n",
    "import numpy as np\n",
    "import json\n",
    "from onekey_algo.custom.components.Radiology import diagnose_3d_image_mask_settings\n",
    "\n",
    "# Taks名称，随意\n",
    "sel_modal = 'CLS2'\n",
    "# 数据的目录\n",
    "root_dir = r'E:\\MedicalSeg\\Task10_Colon'\n",
    "model_root = os.path.join(root_dir, 'models')\n",
    "os.makedirs(model_root, exist_ok=True)\n",
    "# 可以调，调成多少，自己试，但是一般情况都是2的n次方的倍数，如果你的ROI比较小，这个roi建议小一点，反之大一些。\n",
    "# 需要注意，配合下面spacing采样一起调。\n",
    "roi_size = (48, 48, 48)\n",
    "\n",
    "# 这里选择任务类型\n",
    "train_files = []\n",
    "for i in os.listdir(os.path.join(root_dir, 'images')):\n",
    "    train_files.append({'image': os.path.join(root_dir, 'images', i),\n",
    "                        'label': os.path.join(root_dir, 'masks', i)})\n",
    "train_files"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5250bc86",
   "metadata": {},
   "outputs": [],
   "source": [
    "val_ratio = 0.2\n",
    "\n",
    "random.seed(0)\n",
    "random.shuffle(train_files)\n",
    "val_idx = int(len(train_files) * (1-val_ratio))\n",
    "val_files = train_files[val_idx:]\n",
    "train_files = train_files[:val_idx]\n",
    "print(f\"一共获取到{len(train_files)}训练样本, {len(val_files)}测试样本\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "554e33ab",
   "metadata": {},
   "source": [
    "## Setup transforms for training and validation\n",
    "\n",
    "Here we use several transforms to augment the dataset:\n",
    "1. `LoadImaged` loads the spleen CT images and labels from NIfTI format files.\n",
    "1. `AddChanneld` as the original data doesn't have channel dim, add 1 dim to construct \"channel first\" shape.\n",
    "1. `Orientationd` unifies the data orientation based on the affine matrix.\n",
    "1. `Spacingd` adjusts the spacing by `pixdim=(1.5, 1.5, 2.)` based on the affine matrix.\n",
    "1. `ScaleIntensityRanged` extracts intensity range [-57, 164] and scales to [0, 1].\n",
    "1. `CropForegroundd` removes all zero borders to focus on the valid body area of the images and labels.\n",
    "1. `RandCropByPosNegLabeld` randomly crop patch samples from big image based on pos / neg ratio.  \n",
    "The image centers of negative samples must be in valid body area.\n",
    "1. `RandAffined` efficiently performs `rotate`, `scale`, `shear`, `translate`, etc. together based on PyTorch affine transform.\n",
    "1. `EnsureTyped` converts the numpy array to PyTorch Tensor for further steps."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "920baab6",
   "metadata": {},
   "outputs": [],
   "source": [
    "set_determinism(seed=0)\n",
    "train_transforms = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\", \"label\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n",
    "#         Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n",
    "        # 采样，一般1*1*1\n",
    "        Spacingd(keys=[\"image\", \"label\"], pixdim=(1, 1, 1), mode=(\"bilinear\", \"nearest\")),\n",
    "        # 对比度调整，最关键参数\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"], a_min=-160, a_max=240,\n",
    "            b_min=0.0, b_max=1.0, clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n",
    "        # 调节正负样本比例【正样本指的是ROI，负样本背景】，如果roi比较小，负样本多一些；如果ROI比较大，尽量1:1\n",
    "        RandCropByPosNegLabeld(\n",
    "            keys=[\"image\", \"label\"],\n",
    "            label_key=\"label\",\n",
    "            spatial_size=roi_size,\n",
    "            pos=1,\n",
    "            neg=3,\n",
    "            num_samples=32,\n",
    "        ),\n",
    "        EnsureTyped(keys=[\"image\", \"label\"]),\n",
    "    ]\n",
    ")\n",
    "# 数据预处理方式与train保持一致。\n",
    "val_transforms = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\", \"label\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n",
    "#         Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n",
    "        Spacingd(keys=[\"image\", \"label\"], pixdim=(1, 1, 1), mode=(\"bilinear\", \"nearest\")),\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"], a_min=-160, a_max=240,\n",
    "            b_min=0.0, b_max=1.0, clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n",
    "        EnsureTyped(keys=[\"image\", \"label\"]),\n",
    "    ]\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "136419a6",
   "metadata": {},
   "source": [
    "### Dataloader\n",
    "\n",
    "检查Transform以及相应的Dataloader。\n",
    "\n",
    "Here we use CacheDataset to accelerate training and validation process, it's 10x faster than the regular Dataset.  To achieve best performance, set `cache_rate=1.0` to cache all the data, if memory is not enough, set lower value.  Users can also set `cache_num` instead of `cache_rate`, will use the minimum value of the 2 settings.  And set `num_workers` to enable multi-threads during caching.  If want to to try the regular Dataset, just change to use the commented code below."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d3bf2827",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果你的电脑还不错，9950x + 192G内存 + 5090显卡；数据全都加载到内存\n",
    "# train_ds = CacheDataset(data=train_files, transform=train_transforms, cache_rate=1, num_workers=4)\n",
    "# 普通电脑\n",
    "train_ds = Dataset(data=train_files, transform=train_transforms)\n",
    "train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=8)\n",
    "\n",
    "# 测试集，一般情况下，可以放在内存\n",
    "# val_ds = CacheDataset(data=val_files, transform=val_transforms, cache_rate=1, num_workers=4)\n",
    "val_ds = Dataset(data=val_files, transform=val_transforms)\n",
    "val_loader = DataLoader(val_ds, batch_size=1, num_workers=8)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1cd95e14",
   "metadata": {},
   "source": [
    "#### 数据可视化\n",
    "报错了也无所谓~"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "99ddef87",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import nibabel as nib\n",
    "import numpy as np\n",
    "# pick one image from DecathlonDataset to visualize and check the 4 channels\n",
    "print(val_files[2]['image'])\n",
    "val_data_example = np.array(nib.load(val_files[2]['image']).dataobj)\n",
    "print(f\"image shape: {val_data_example.shape}\")\n",
    "plt.figure(\"image\", (24, 6))\n",
    "plt.imshow(val_data_example[:, :, 65].T, cmap=\"gray\")\n",
    "plt.show()\n",
    "\n",
    "val_data_example = np.array(nib.load(val_files[2]['label']).dataobj)\n",
    "print(f\"image shape: {val_data_example.shape}\")\n",
    "plt.figure(\"image\", (24, 6))\n",
    "plt.imshow(val_data_example[:, :, 65].T, cmap=\"gray\")\n",
    "plt.show()\n",
    "\n",
    "\n",
    "val_data_example = val_ds[2]\n",
    "print(f\"image shape: {val_data_example['image'].shape}\")\n",
    "plt.figure(\"image\", (24, 6))\n",
    "plt.imshow(val_data_example[\"image\"][0, :, :, 65].T.detach().cpu(), cmap=\"gray\")\n",
    "plt.show()\n",
    "\n",
    "# also visualize the 3 channels label corresponding to this image\n",
    "print(f\"label shape: {val_data_example['label'].shape}\")\n",
    "plt.figure(\"label\", (24, 6))\n",
    "plt.imshow(val_data_example[\"label\"][0, :, :, 65].T.detach().cpu())\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "746817a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "np.unique(val_data_example['label'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a0004113",
   "metadata": {},
   "source": [
    "## 生成 Model, Loss, Optimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf5d360e",
   "metadata": {},
   "outputs": [],
   "source": [
    "from monai.metrics import DiceMetric\n",
    "from monai.losses import DiceLoss, DiceFocalLoss, DiceCELoss\n",
    "\n",
    "device = torch.device(f\"cuda:0\")\n",
    "# 模型的名称，模型都有具体的参数配置，改挂了，看论文去~~~，必要的时候，看源码\n",
    "# onekey_envs\\Lib\\site-packages\\onekey_core\\models\\segmentation3d\n",
    "mtype = 'unet'\n",
    "# roi自动勾画一共花了多少种区域，num_classes需要+1\n",
    "# num_class = 你勾画的roi的最大值 + 1\n",
    "num_classes = 2\n",
    "if mtype.lower() == 'unet':\n",
    "    #Unet\n",
    "    model = UNet(\n",
    "        spatial_dims=3,\n",
    "        in_channels=1,\n",
    "        out_channels=num_classes,\n",
    "        channels=(16, 32, 64, 128, 256),\n",
    "        strides=(2, 2, 2, 2),\n",
    "        num_res_units=2,\n",
    "        norm=Norm.BATCH,\n",
    "    ).to(device)\n",
    "elif mtype.lower() == 'segresnet':\n",
    "    #SegResNet\n",
    "    model = SegResNet(\n",
    "        blocks_down=[1, 2, 2, 4],\n",
    "        blocks_up=[1, 1, 1],\n",
    "        init_filters=16,\n",
    "        in_channels=1,\n",
    "        out_channels=num_classes,\n",
    "        dropout_prob=0.2,\n",
    "    ).to(device)\n",
    "elif mtype.lower() == 'unetr':\n",
    "    # UNETR\n",
    "    model = UNETR(\n",
    "        in_channels=1,\n",
    "        out_channels=num_classes,\n",
    "        img_size=(96, 96, 96),\n",
    "        feature_size=16,\n",
    "        hidden_size=768,\n",
    "        mlp_dim=3072,\n",
    "        num_heads=12,\n",
    "        pos_embed=\"perceptron\",\n",
    "        norm_name=\"instance\",\n",
    "        res_block=True,\n",
    "        dropout_rate=0.0,\n",
    "    ).to(device)\n",
    "elif mtype.lower() == 'vnet':\n",
    "    model = VNet(spatial_dims=3, \n",
    "                 in_channels=1, \n",
    "                 out_channels=num_classes,\n",
    "                 dropout_prob=0.2, \n",
    "                 dropout_dim=3, \n",
    "                 bias=False).to(device)\n",
    "else:\n",
    "    raise ValueError(f'{mtype} not found!')\n",
    "\n",
    "print(f\"使用{mtype.upper()}进行训练！\")\n",
    "loss_function = DiceCELoss(to_onehot_y=True, softmax=True)\n",
    "optimizer = torch.optim.Adam(model.parameters(), 1e-3)\n",
    "dice_metric = DiceMetric(include_background=False, reduction=\"mean\")\n",
    "if os.path.exists(os.path.join(model_root, f\"{mtype}_{sel_modal}.pth\")):\n",
    "    print('加载预训练模型...')\n",
    "    model.load_state_dict(torch.load(os.path.join(model_root, f\"{mtype}_{sel_modal}.pth\"), map_location=device))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "add6812c",
   "metadata": {},
   "source": [
    "### 模型训练\n",
    "\n",
    "`max_epochs`最大迭代次数，int类型，默认： 600\n",
    "\n",
    "`val_interval` 多少次训练进行一次validation，默认： 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "877e388c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 最大的迭代次数\n",
    "max_epochs = 600\n",
    "# 评估频率\n",
    "val_interval = 2\n",
    "best_metric = -1\n",
    "best_metric_epoch = -1\n",
    "epoch_loss_values = []\n",
    "metric_values = []\n",
    "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=num_classes)])\n",
    "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=num_classes)])\n",
    "# 早停的次数\n",
    "early_stopping_epoch = 128\n",
    "for epoch in range(max_epochs):\n",
    "    print(\"-\" * 10)\n",
    "    print(f\"epoch {epoch + 1}/{max_epochs}\")\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    step = 0\n",
    "    for batch_data in train_loader:\n",
    "        step += 1\n",
    "        inputs, labels = (\n",
    "            batch_data[\"image\"].to(device),\n",
    "            batch_data[\"label\"].to(device),\n",
    "        )\n",
    "        optimizer.zero_grad()\n",
    "#         print(inputs.size())\n",
    "        outputs = model(inputs)\n",
    "        loss = loss_function(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        epoch_loss += loss.item()\n",
    "        if step % 2 == 0:\n",
    "            print(f\"{step}/{len(train_ds) // train_loader.batch_size}, train_loss: {loss.item():.4f}\")\n",
    "    epoch_loss /= step\n",
    "    epoch_loss_values.append(epoch_loss)\n",
    "    print(f\"epoch {epoch + 1} average loss: {epoch_loss:.4f}\")\n",
    "\n",
    "    if (epoch + 1) % val_interval == 0:\n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            for val_data in val_loader:\n",
    "                val_inputs, val_labels = (\n",
    "                    val_data[\"image\"].to(device),\n",
    "                    val_data[\"label\"].to(device),\n",
    "                )\n",
    "                sw_batch_size = 4\n",
    "                val_outputs = sliding_window_inference(val_inputs, roi_size, sw_batch_size, model)\n",
    "                val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]\n",
    "                val_labels = [post_label(i) for i in decollate_batch(val_labels)]\n",
    "                # compute metric for current iteration\n",
    "                dice_metric(y_pred=val_outputs, y=val_labels)\n",
    "\n",
    "            # aggregate the final mean dice result\n",
    "            metric = dice_metric.aggregate().item()\n",
    "            # reset the status for next validation round\n",
    "            dice_metric.reset()\n",
    "\n",
    "            metric_values.append(metric)\n",
    "            torch.save(model.state_dict(), os.path.join(model_root, f\"{mtype}_{sel_modal}-Epoch{epoch+1}.pth\"))\n",
    "            if metric > best_metric:\n",
    "                best_metric = metric\n",
    "                best_metric_epoch = epoch + 1\n",
    "                torch.save(model.state_dict(), \n",
    "                           os.path.join(model_root, f\"{mtype}_{sel_modal}.pth\"))\n",
    "                print(\"saved new best metric model\")\n",
    "            if epoch - best_metric_epoch > early_stopping_epoch:\n",
    "                print(f'Early Stop @{epoch+1}')\n",
    "                break\n",
    "            print(\n",
    "                f\"current epoch: {epoch + 1} current mean dice: {metric:.4f}\"\n",
    "                f\"\\nbest mean dice: {best_metric:.4f} \"\n",
    "                f\"at epoch: {best_metric_epoch}\"\n",
    "            )\n",
    "print(f\"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1641c3fc",
   "metadata": {},
   "source": [
    "### 打印训练过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ff1829d",
   "metadata": {},
   "outputs": [],
   "source": [
    "os.makedirs('img', exist_ok=True)\n",
    "plt.figure(\"train\", (12, 6))\n",
    "plt.subplot(1, 2, 1)\n",
    "plt.title(\"Epoch Average Loss\")\n",
    "x = [i + 1 for i in range(len(epoch_loss_values))]\n",
    "y = epoch_loss_values\n",
    "plt.xlabel(\"epoch\")\n",
    "plt.plot(x, y)\n",
    "plt.subplot(1, 2, 2)\n",
    "plt.title(\"Val Mean Dice\")\n",
    "x = [val_interval * (i + 1) for i in range(len(metric_values))]\n",
    "y = metric_values\n",
    "plt.xlabel(\"epoch\")\n",
    "plt.plot(x, y)\n",
    "plt.savefig(f'img/{mtype}_{sel_modal}_train_process.svg', bbox_inch='tight')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13b576a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "from monai.transforms import KeepLargestConnectedComponentd, RemoveSmallObjectsd\n",
    "import SimpleITK as sitk\n",
    "import numpy as np\n",
    "from monai.config import KeysCollection\n",
    "from monai.transforms import MapTransform\n",
    "\n",
    "\n",
    "val_t = Compose(\n",
    "    [\n",
    "        LoadImaged(keys=[\"image\"]),\n",
    "        EnsureChannelFirstd(keys=[\"image\"]),\n",
    "#         Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n",
    "        Spacingd(keys=[\"image\"], pixdim=(1, 1, 1), mode=(\"bilinear\")),\n",
    "        ScaleIntensityRanged(\n",
    "            keys=[\"image\"], a_min=-160, a_max=240,\n",
    "            b_min=0.0, b_max=1.0, clip=True,\n",
    "        ),\n",
    "        CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n",
    "        EnsureTyped(keys=[\"image\"]),\n",
    "    ]\n",
    ")\n",
    "\n",
    "post_ori_t = Compose(\n",
    "    [\n",
    "        Invertd(\n",
    "            keys=\"pred\",\n",
    "            transform=val_t,\n",
    "            orig_keys=\"image\",\n",
    "            meta_keys=\"pred_meta_dict\",\n",
    "            orig_meta_keys=\"image_meta_dict\",\n",
    "            meta_key_postfix=\"meta_dict\",\n",
    "            nearest_interp=False,\n",
    "            to_tensor=True,\n",
    "            device=\"cpu\",\n",
    "        ),\n",
    "        AsDiscreted(keys=\"pred\", argmax=True),\n",
    "        KeepLargestConnectedComponentd(keys='pred', num_components=2)\n",
    "    ]\n",
    ")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "42e9dd76",
   "metadata": {},
   "source": [
    "### 预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5e29f42",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from glob import glob\n",
    "\n",
    "import os\n",
    "from onekey_algo import OnekeyDS\n",
    "from onekey_algo.segmentation3D.modelzoo.eval_3dsegmentation import init as init3d\n",
    "from onekey_algo.segmentation3D.modelzoo.eval_3dsegmentation import inference as inference3d\n",
    "\n",
    "model_root = r'E:/MedicalSeg/Task10_Colon/models/'\n",
    "data_dir=  r'E:\\MedicalSeg\\Task10_Colon\\imagesTs'\n",
    "sel_modal = 'CLS2'\n",
    "\n",
    "mtype = 'Unet'\n",
    "model_path = os.path.join(model_root, f'{mtype}_{sel_modal}.pth')\n",
    "os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n",
    "\n",
    "num_classes = 2\n",
    "roi_size = (48, 48, 48)\n",
    "m, t, d = init3d('Unet', model_path=model_path, num_classes=num_classes, roi_size=roi_size)\n",
    "data = glob(os.path.join(data_dir, '*.nii.gz'))\n",
    "m = m.to('cuda:0')\n",
    "for data_ in data:\n",
    "    inference3d([data_], m, (val_t, post_ori_t), 'cuda:0', \n",
    "                roi_size=roi_size, save_dir=os.path.dirname(data_), save_name=data_.replace('.nii.gz', '.infer.nii.gz'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "078fb447",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from monai.metrics import compute_average_surface_distance, compute_hausdorff_distance\n",
    "\n",
    "def calc_dice(p_cls, l_cls):\n",
    "    # cal the inter & conv\n",
    "    s = p_cls + l_cls\n",
    "    inter = len(np.where(s >= 2)[0])\n",
    "    conv = len(np.where(s >= 1)[0]) + inter\n",
    "    try:\n",
    "        dice = 2.0 * inter / conv\n",
    "    except:\n",
    "        print(\"conv is zeros when dice = 2.0 * inter / conv\")\n",
    "        dice = None\n",
    "    return dice\n",
    "\n",
    "def calc_iou(p_cls, l_cls):\n",
    "    # cal the inter & conv\n",
    "    s = p_cls + l_cls\n",
    "    inter = len(np.where(s >= 2)[0])\n",
    "    conv = len(np.where(s >= 1)[0])\n",
    "    try:\n",
    "        iou = inter / conv\n",
    "    except:\n",
    "        print(\"conv is zeros when dice = 2.0 * inter / conv\")\n",
    "        iou = None\n",
    "    return iou\n",
    "    \n",
    "def calc_sa(p_cls, l_cls):\n",
    "    # cal the inter & conv\n",
    "    error = np.bitwise_xor(p_cls, l_cls) & l_cls\n",
    "    try:\n",
    "        sa = 1 - np.sum(error) / np.sum(l_cls)\n",
    "    except:\n",
    "        print(\"SA segmentation is error!\")\n",
    "        sa = None\n",
    "    return sa\n",
    "\n",
    "def calc_os(p_cls, l_cls):\n",
    "    # cal the inter & conv\n",
    "    error = np.bitwise_xor(p_cls, l_cls) & p_cls\n",
    "    try:\n",
    "        over_s = np.sum(error) / (np.sum(l_cls) + np.sum(p_cls))\n",
    "    except:\n",
    "        print(\"Over segmentation is error!\")\n",
    "        over_s = None\n",
    "    return over_s\n",
    "\n",
    "def calc_us(p_cls, l_cls):\n",
    "    # cal the inter & conv\n",
    "    error = np.bitwise_xor(p_cls & l_cls, l_cls)\n",
    "    try:\n",
    "        us = np.sum(error) / (np.sum(l_cls) + np.sum(np.bitwise_xor(p_cls, l_cls) & p_cls))\n",
    "    except:\n",
    "        print(\"Under segmentation is error!\")\n",
    "        us = None\n",
    "    return us\n",
    "\n",
    "def calc_asd(p_cls, l_cls):\n",
    "    asd = compute_average_surface_distance(p_cls[np.newaxis, np.newaxis, :], l_cls[np.newaxis, np.newaxis, :])\n",
    "    return float(asd)\n",
    "\n",
    "def calc_hausdorff_distance(p_cls, l_cls): \n",
    "    hd = compute_hausdorff_distance(p_cls[np.newaxis, np.newaxis, :], l_cls[np.newaxis, np.newaxis, :])\n",
    "    return float(hd)\n",
    "\n",
    "def seg_eval(pred, label, clss=[0, 1]):\n",
    "    \"\"\"\n",
    "    calculate the dice between prediction and ground truth\n",
    "    input:\n",
    "        pred: predicted mask\n",
    "        label: groud truth\n",
    "        clss: eg. [0, 1] for binary class\n",
    "    \"\"\"\n",
    "    Ncls = len(clss)\n",
    "    eval_matric = [None] * Ncls\n",
    "    [depth, height, width] = pred.shape\n",
    "    for idx, cls in enumerate(clss):\n",
    "        # binary map\n",
    "        pred_cls = np.zeros([depth, height, width], dtype=np.uint8)\n",
    "        pred_cls[np.where(pred == cls)] = 1\n",
    "        label_cls = np.zeros([depth, height, width], dtype=np.uint8)\n",
    "        label_cls[np.where(label == cls)] = 1\n",
    "\n",
    "        metric = [calc_dice(pred_cls, label_cls), calc_iou(pred_cls, label_cls), \n",
    "                  calc_sa(pred_cls, label_cls), calc_os(pred_cls, label_cls), calc_us(pred_cls, label_cls), \n",
    "                  calc_asd(pred_cls, label_cls), calc_hausdorff_distance(pred_cls, label_cls)]\n",
    "        eval_matric[idx] = metric\n",
    "\n",
    "    return eval_matric"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eb98a001",
   "metadata": {},
   "source": [
    "### 后处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95684e37",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "import SimpleITK as sitk\n",
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "# mtype='unetr'\n",
    "metric_names = ['Dice', 'mIOU', 'SA', 'OS', 'US', 'ASD', 'HD']\n",
    "data = [vf['image'] for vf in val_files] + [vf['image'] for vf in train_files]\n",
    "all_metrics = []\n",
    "for mtype in ['Unet_']:\n",
    "    metrics = []\n",
    "    for data_ in data:\n",
    "        mask_path = data_.replace('.nii.gz', '.infer.nii.gz')\n",
    "        gt_mask = sitk.GetArrayFromImage(sitk.ReadImage(data_.replace('.nii.gz', '.mask.nii.gz')))\n",
    "        itk_mask = sitk.ReadImage(mask_path)\n",
    "        cc_filter = sitk.ConnectedComponentImageFilter()\n",
    "        cc_filter.SetFullyConnected(True)\n",
    "        omask_arry = sitk.GetArrayFromImage(cc_filter.Execute(itk_mask))\n",
    "        unique_labels = np.unique(omask_arry)\n",
    "        mask_label_voxels = {}\n",
    "        for ul in unique_labels:\n",
    "            mask_label_voxels[ul] = np.sum(omask_arry == ul)\n",
    "        mask_label_voxels = sorted(mask_label_voxels.items(), key=lambda x: x[1], reverse=True)\n",
    "        mask_postprocess = np.zeros_like(omask_arry)\n",
    "        for idx, (ul, cnt) in enumerate(mask_label_voxels):\n",
    "            if 0 < idx < 3:\n",
    "                mask_postprocess[omask_arry == ul] = 1\n",
    "        print(mask_path, mask_label_voxels[:4])\n",
    "        itk_mask = sitk.GetImageFromArray(mask_postprocess)\n",
    "        sitk.WriteImage(itk_mask, data_.replace('.nii.gz', '.post.nii.gz'))\n",
    "        metrics.append([seg_eval(sitk.GetArrayFromImage(sitk.ReadImage(mask_path)), gt_mask), seg_eval(mask_postprocess, gt_mask)])\n",
    "#     print(metrics[-1])\n",
    "    mm = pd.DataFrame(np.mean(np.array(metrics)[:, :, 1, ...], axis=0), columns= metric_names, index=['Origin', 'PostProcess'])\n",
    "    mm['ModelName'] = mtype[:-1]\n",
    "    all_metrics.append(mm)\n",
    "    \n",
    "pd.concat(all_metrics, axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "722dd88f",
   "metadata": {},
   "outputs": [],
   "source": [
    "pd.DataFrame(np.mean(np.array(metrics)[:, :, 1, ...], axis=0), columns= metric_names, index=['Origin', 'Post'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fb827ea5",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
