{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3D Segmentation with UNet\n",
    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/main/3d_segmentation/unet_segmentation_3d_ignite.ipynb)\n",
    "\n",
    "## Setup environment\n",
    "- Step 1: 创建新环境，`conda create --name monai_env python=3.10`\n",
    "- Step 2: 安装torch gpu, conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia 根据电脑的cuda版本在pytorch官网获取相应的命令，若不使用GPU版本的pytorch则直接到Step 3\n",
    "- Step 3: 安装Monai, `pip install \"monai-weekly[ignite, nibabel, tensorboard, mlflow]\"`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MONAI version: 1.4.dev2426\n",
      "Numpy version: 1.26.4\n",
      "Pytorch version: 2.3.1+cu121\n",
      "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n",
      "MONAI rev id: d622a16f927841fdd7d057b7553805405f0805e4\n",
      "MONAI __file__: /home/<username>/anaconda3/envs/monai/lib/python3.9/site-packages/monai/__init__.py\n",
      "\n",
      "Optional dependencies:\n",
      "Pytorch Ignite version: 0.4.11\n",
      "ITK version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "Nibabel version: 5.2.1\n",
      "scikit-image version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "scipy version: 1.13.1\n",
      "Pillow version: 10.4.0\n",
      "Tensorboard version: 2.17.0\n",
      "gdown version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "TorchVision version: 0.18.1+cu121\n",
      "tqdm version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "lmdb version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "psutil version: 6.0.0\n",
      "pandas version: 2.2.2\n",
      "einops version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "transformers version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "mlflow version: 2.14.2\n",
      "pynrrd version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "clearml version: NOT INSTALLED or UNKNOWN VERSION.\n",
      "\n",
      "For details about installing the optional dependencies, please visit:\n",
      "    https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import glob\n",
    "import logging\n",
    "import os\n",
    "from pathlib import Path\n",
    "import shutil\n",
    "import sys\n",
    "import tempfile\n",
    "\n",
    "import nibabel as nib\n",
    "import numpy as np\n",
    "from monai.config import print_config\n",
    "from monai.data import ArrayDataset, create_test_image_3d, decollate_batch, DataLoader\n",
    "from monai.handlers import (\n",
    "    MeanDice,\n",
    "    MLFlowHandler,\n",
    "    StatsHandler,\n",
    "    TensorBoardImageHandler,\n",
    "    TensorBoardStatsHandler,\n",
    ")\n",
    "from monai.losses import DiceLoss\n",
    "from monai.networks.nets import UNet\n",
    "from monai.transforms import (\n",
    "    Activations,\n",
    "    EnsureChannelFirst,\n",
    "    AsDiscrete,\n",
    "    Compose,\n",
    "    LoadImage,\n",
    "    RandSpatialCrop,\n",
    "    Resize,\n",
    "    ScaleIntensity,\n",
    ")\n",
    "from monai.utils import first\n",
    "\n",
    "import ignite\n",
    "import torch\n",
    "\n",
    "print_config()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup data directory\n",
    "\n",
    "You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable.  \n",
    "This allows you to save results and reuse downloads.  \n",
    "If not specified a temporary directory will be used."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/chyang/Documents/monai_tutorials_gitee/data\n"
     ]
    }
   ],
   "source": [
    "directory = os.environ.get(\"MONAI_DATA_DIRECTORY\")\n",
    "if directory is not None:\n",
    "    os.makedirs(directory, exist_ok=True)\n",
    "directory = \"/home/chyang/Documents/monai_tutorials_gitee/data\"\n",
    "root_dir = tempfile.mkdtemp() if directory is None else directory\n",
    "print(root_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup logging"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "logging.basicConfig(stream=sys.stdout, level=logging.INFO)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup demo data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(40):\n",
    "    im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)\n",
    "\n",
    "    n = nib.Nifti1Image(im, np.eye(4))\n",
    "    nib.save(n, os.path.join(root_dir, f\"im{i}.nii.gz\"))\n",
    "\n",
    "    n = nib.Nifti1Image(seg, np.eye(4))\n",
    "    nib.save(n, os.path.join(root_dir, f\"seg{i}.nii.gz\"))\n",
    "\n",
    "images = sorted(glob.glob(os.path.join(root_dir, \"im*.nii.gz\")))\n",
    "segs = sorted(glob.glob(os.path.join(root_dir, \"seg*.nii.gz\")))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup transforms, dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([10, 1, 96, 96, 96]) torch.Size([10, 1, 96, 96, 96])\n"
     ]
    }
   ],
   "source": [
    "# Define transforms for image and segmentation\n",
    "imtrans = Compose(\n",
    "    [\n",
    "        LoadImage(image_only=True),\n",
    "        ScaleIntensity(),\n",
    "        EnsureChannelFirst(),\n",
    "        RandSpatialCrop((96, 96, 96), random_size=False),\n",
    "    ]\n",
    ")\n",
    "segtrans = Compose(\n",
    "    [\n",
    "        LoadImage(image_only=True),\n",
    "        EnsureChannelFirst(),\n",
    "        RandSpatialCrop((96, 96, 96), random_size=False),\n",
    "    ]\n",
    ")\n",
    "\n",
    "# Define nifti dataset, dataloader\n",
    "ds = ArrayDataset(images, imtrans, segs, segtrans)\n",
    "loader = DataLoader(ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())\n",
    "im, seg = first(loader)\n",
    "print(im.shape, seg.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Create Model, Loss, Optimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create UNet, DiceLoss and Adam optimizer\n",
    "device = torch.device(\"cuda:0\")\n",
    "net = UNet(\n",
    "    spatial_dims=3,\n",
    "    in_channels=1,\n",
    "    out_channels=1,\n",
    "    channels=(16, 32, 64, 128, 256),\n",
    "    strides=(2, 2, 2, 2),\n",
    "    num_res_units=2,\n",
    ").to(device)\n",
    "\n",
    "loss = DiceLoss(sigmoid=True)\n",
    "lr = 1e-3\n",
    "opt = torch.optim.Adam(net.parameters(), lr)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Create supervised_trainer using ignite"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create trainer\n",
    "trainer = ignite.engine.create_supervised_trainer(net, opt, loss, device, False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Setup event handlers for checkpointing and logging"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# optional section for checkpoint and tensorboard logging\n",
    "# adding checkpoint handler to save models (network\n",
    "# params and optimizer stats) during training\n",
    "log_dir = os.path.join(root_dir, \"logs\")\n",
    "checkpoint_handler = ignite.handlers.ModelCheckpoint(log_dir, \"net\", n_saved=10, require_empty=False)\n",
    "trainer.add_event_handler(\n",
    "    event_name=ignite.engine.Events.EPOCH_COMPLETED,\n",
    "    handler=checkpoint_handler,\n",
    "    to_save={\"net\": net, \"opt\": opt},\n",
    ")\n",
    "\n",
    "# StatsHandler prints loss at every iteration\n",
    "# user can also customize print functions and can use output_transform to convert\n",
    "# engine.state.output if it's not a loss value\n",
    "train_stats_handler = StatsHandler(name=\"trainer\", output_transform=lambda x: x)\n",
    "train_stats_handler.attach(trainer)\n",
    "\n",
    "# TensorBoardStatsHandler plots loss at every iteration\n",
    "train_tensorboard_stats_handler = TensorBoardStatsHandler(log_dir=log_dir, output_transform=lambda x: x)\n",
    "train_tensorboard_stats_handler.attach(trainer)\n",
    "\n",
    "# MLFlowHandler plots loss at every iteration on MLFlow web UI\n",
    "mlflow_dir = os.path.join(log_dir, \"mlruns\")\n",
    "train_mlflow_handler = MLFlowHandler(tracking_uri=Path(mlflow_dir).as_uri(), output_transform=lambda x: x)\n",
    "train_mlflow_handler.attach(trainer)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Add Validation every N epochs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<ignite.engine.events.RemovableEventHandle at 0x7a1d7fa61e80>"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# optional section for model validation during training\n",
    "validation_every_n_epochs = 1\n",
    "# Set parameters for validation\n",
    "metric_name = \"Mean_Dice\"\n",
    "# add evaluation metric to the evaluator engine\n",
    "val_metrics = {metric_name: MeanDice()}\n",
    "post_pred = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])\n",
    "post_label = Compose([AsDiscrete(threshold=0.5)])\n",
    "# Ignite evaluator expects batch=(img, seg) and\n",
    "# returns output=(y_pred, y) at every iteration,\n",
    "# user can add output_transform to return other values\n",
    "evaluator = ignite.engine.create_supervised_evaluator(\n",
    "    net,\n",
    "    val_metrics,\n",
    "    device,\n",
    "    True,\n",
    "    output_transform=lambda x, y, y_pred: (\n",
    "        [post_pred(i) for i in decollate_batch(y_pred)],\n",
    "        [post_label(i) for i in decollate_batch(y)],\n",
    "    ),\n",
    ")\n",
    "\n",
    "# create a validation data loader\n",
    "val_imtrans = Compose(\n",
    "    [\n",
    "        LoadImage(image_only=True),\n",
    "        ScaleIntensity(),\n",
    "        EnsureChannelFirst(),\n",
    "        Resize((96, 96, 96)),\n",
    "    ]\n",
    ")\n",
    "val_segtrans = Compose(\n",
    "    [\n",
    "        LoadImage(image_only=True),\n",
    "        EnsureChannelFirst(),\n",
    "        Resize((96, 96, 96)),\n",
    "    ]\n",
    ")\n",
    "val_ds = ArrayDataset(images[21:], val_imtrans, segs[21:], val_segtrans)\n",
    "val_loader = DataLoader(val_ds, batch_size=5, num_workers=8, pin_memory=torch.cuda.is_available())\n",
    "\n",
    "\n",
    "@trainer.on(ignite.engine.Events.EPOCH_COMPLETED(every=validation_every_n_epochs))\n",
    "def run_validation(engine):\n",
    "    evaluator.run(val_loader)\n",
    "\n",
    "\n",
    "# Add stats event handler to print validation stats via evaluator\n",
    "val_stats_handler = StatsHandler(\n",
    "    name=\"evaluator\",\n",
    "    # no need to print loss value, so disable per iteration output\n",
    "    output_transform=lambda x: None,\n",
    "    # fetch global epoch number from trainer\n",
    "    global_epoch_transform=lambda x: trainer.state.epoch,\n",
    ")\n",
    "val_stats_handler.attach(evaluator)\n",
    "\n",
    "# add handler to record metrics to TensorBoard at every validation epoch\n",
    "val_tensorboard_stats_handler = TensorBoardStatsHandler(\n",
    "    log_dir=log_dir,\n",
    "    # no need to plot loss value, so disable per iteration output\n",
    "    output_transform=lambda x: None,\n",
    "    # fetch global epoch number from trainer\n",
    "    global_epoch_transform=lambda x: trainer.state.epoch,\n",
    ")\n",
    "val_tensorboard_stats_handler.attach(evaluator)\n",
    "\n",
    "# add handler to record metrics to MLFlow at every validation epoch\n",
    "val_mlflow_handler = MLFlowHandler(\n",
    "    tracking_uri=Path(mlflow_dir).as_uri(),\n",
    "    # no need to plot loss value, so disable per iteration output\n",
    "    output_transform=lambda x: None,\n",
    "    # fetch global epoch number from trainer\n",
    "    global_epoch_transform=lambda x: trainer.state.epoch,\n",
    ")\n",
    "val_mlflow_handler.attach(evaluator)\n",
    "\n",
    "# add handler to draw the first image and the corresponding\n",
    "# label and model output in the last batch\n",
    "# here we draw the 3D output as GIF format along Depth\n",
    "# axis, at every validation epoch\n",
    "val_tensorboard_image_handler = TensorBoardImageHandler(\n",
    "    log_dir=log_dir,\n",
    "    batch_transform=lambda batch: (batch[0], batch[1]),\n",
    "    output_transform=lambda output: output[0],\n",
    "    global_iter_transform=lambda x: trainer.state.epoch,\n",
    ")\n",
    "evaluator.add_event_handler(\n",
    "    event_name=ignite.engine.Events.EPOCH_COMPLETED,\n",
    "    handler=val_tensorboard_image_handler,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Run training loop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=10.\n",
      "2024-07-04 16:28:15,168 - INFO - Epoch: 1/10, Iter: 1/4 -- Loss: 0.6247 \n",
      "2024-07-04 16:28:15,226 - INFO - Epoch: 1/10, Iter: 2/4 -- Loss: 0.5973 \n",
      "2024-07-04 16:28:15,282 - INFO - Epoch: 1/10, Iter: 3/4 -- Loss: 0.5732 \n",
      "2024-07-04 16:28:15,334 - INFO - Epoch: 1/10, Iter: 4/4 -- Loss: 0.4740 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:16,220 - INFO - Epoch[1] Metrics -- Mean_Dice: 0.3574 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.780\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.866\n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:02.016\n",
      "2024-07-04 16:28:16,707 - INFO - Epoch: 2/10, Iter: 1/4 -- Loss: 0.5163 \n",
      "2024-07-04 16:28:16,759 - INFO - Epoch: 2/10, Iter: 2/4 -- Loss: 0.4984 \n",
      "2024-07-04 16:28:16,809 - INFO - Epoch: 2/10, Iter: 3/4 -- Loss: 0.4929 \n",
      "2024-07-04 16:28:16,859 - INFO - Epoch: 2/10, Iter: 4/4 -- Loss: 0.4502 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:17,427 - INFO - Epoch[2] Metrics -- Mean_Dice: 0.4460 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.459\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.547\n",
      "INFO:ignite.engine.engine.Engine:Epoch[2] Complete. Time taken: 00:00:01.202\n",
      "2024-07-04 16:28:17,881 - INFO - Epoch: 3/10, Iter: 1/4 -- Loss: 0.4668 \n",
      "2024-07-04 16:28:17,933 - INFO - Epoch: 3/10, Iter: 2/4 -- Loss: 0.4624 \n",
      "2024-07-04 16:28:17,984 - INFO - Epoch: 3/10, Iter: 3/4 -- Loss: 0.4757 \n",
      "2024-07-04 16:28:18,034 - INFO - Epoch: 3/10, Iter: 4/4 -- Loss: 0.4498 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:18,581 - INFO - Epoch[3] Metrics -- Mean_Dice: 0.4772 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.443\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.532\n",
      "INFO:ignite.engine.engine.Engine:Epoch[3] Complete. Time taken: 00:00:01.154\n",
      "2024-07-04 16:28:19,058 - INFO - Epoch: 4/10, Iter: 1/4 -- Loss: 0.4355 \n",
      "2024-07-04 16:28:19,109 - INFO - Epoch: 4/10, Iter: 2/4 -- Loss: 0.4690 \n",
      "2024-07-04 16:28:19,160 - INFO - Epoch: 4/10, Iter: 3/4 -- Loss: 0.4273 \n",
      "2024-07-04 16:28:19,210 - INFO - Epoch: 4/10, Iter: 4/4 -- Loss: 0.5115 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:19,765 - INFO - Epoch[4] Metrics -- Mean_Dice: 0.5584 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.448\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.537\n",
      "INFO:ignite.engine.engine.Engine:Epoch[4] Complete. Time taken: 00:00:01.183\n",
      "2024-07-04 16:28:20,256 - INFO - Epoch: 5/10, Iter: 1/4 -- Loss: 0.4083 \n",
      "2024-07-04 16:28:20,307 - INFO - Epoch: 5/10, Iter: 2/4 -- Loss: 0.4431 \n",
      "2024-07-04 16:28:20,357 - INFO - Epoch: 5/10, Iter: 3/4 -- Loss: 0.4727 \n",
      "2024-07-04 16:28:20,408 - INFO - Epoch: 5/10, Iter: 4/4 -- Loss: 0.4387 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:20,944 - INFO - Epoch[5] Metrics -- Mean_Dice: 0.6871 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.432\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.521\n",
      "INFO:ignite.engine.engine.Engine:Epoch[5] Complete. Time taken: 00:00:01.181\n",
      "2024-07-04 16:28:21,421 - INFO - Epoch: 6/10, Iter: 1/4 -- Loss: 0.4100 \n",
      "2024-07-04 16:28:21,472 - INFO - Epoch: 6/10, Iter: 2/4 -- Loss: 0.4335 \n",
      "2024-07-04 16:28:21,522 - INFO - Epoch: 6/10, Iter: 3/4 -- Loss: 0.4558 \n",
      "2024-07-04 16:28:21,573 - INFO - Epoch: 6/10, Iter: 4/4 -- Loss: 0.4715 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:22,100 - INFO - Epoch[6] Metrics -- Mean_Dice: 0.7094 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.423\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.510\n",
      "INFO:ignite.engine.engine.Engine:Epoch[6] Complete. Time taken: 00:00:01.155\n",
      "2024-07-04 16:28:22,584 - INFO - Epoch: 7/10, Iter: 1/4 -- Loss: 0.3925 \n",
      "2024-07-04 16:28:22,635 - INFO - Epoch: 7/10, Iter: 2/4 -- Loss: 0.4677 \n",
      "2024-07-04 16:28:22,685 - INFO - Epoch: 7/10, Iter: 3/4 -- Loss: 0.4457 \n",
      "2024-07-04 16:28:22,736 - INFO - Epoch: 7/10, Iter: 4/4 -- Loss: 0.4211 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:23,274 - INFO - Epoch[7] Metrics -- Mean_Dice: 0.6853 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.433\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.521\n",
      "INFO:ignite.engine.engine.Engine:Epoch[7] Complete. Time taken: 00:00:01.174\n",
      "2024-07-04 16:28:23,769 - INFO - Epoch: 8/10, Iter: 1/4 -- Loss: 0.4805 \n",
      "2024-07-04 16:28:23,820 - INFO - Epoch: 8/10, Iter: 2/4 -- Loss: 0.4151 \n",
      "2024-07-04 16:28:23,871 - INFO - Epoch: 8/10, Iter: 3/4 -- Loss: 0.4194 \n",
      "2024-07-04 16:28:23,921 - INFO - Epoch: 8/10, Iter: 4/4 -- Loss: 0.3469 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:24,512 - INFO - Epoch[8] Metrics -- Mean_Dice: 0.7316 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.485\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.572\n",
      "INFO:ignite.engine.engine.Engine:Epoch[8] Complete. Time taken: 00:00:01.236\n",
      "2024-07-04 16:28:24,989 - INFO - Epoch: 9/10, Iter: 1/4 -- Loss: 0.3700 \n",
      "2024-07-04 16:28:25,040 - INFO - Epoch: 9/10, Iter: 2/4 -- Loss: 0.4328 \n",
      "2024-07-04 16:28:25,091 - INFO - Epoch: 9/10, Iter: 3/4 -- Loss: 0.4159 \n",
      "2024-07-04 16:28:25,141 - INFO - Epoch: 9/10, Iter: 4/4 -- Loss: 0.4258 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:25,673 - INFO - Epoch[9] Metrics -- Mean_Dice: 0.7584 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.426\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.514\n",
      "INFO:ignite.engine.engine.Engine:Epoch[9] Complete. Time taken: 00:00:01.159\n",
      "2024-07-04 16:28:26,172 - INFO - Epoch: 10/10, Iter: 1/4 -- Loss: 0.3832 \n",
      "2024-07-04 16:28:26,223 - INFO - Epoch: 10/10, Iter: 2/4 -- Loss: 0.3761 \n",
      "2024-07-04 16:28:26,274 - INFO - Epoch: 10/10, Iter: 3/4 -- Loss: 0.3946 \n",
      "2024-07-04 16:28:26,324 - INFO - Epoch: 10/10, Iter: 4/4 -- Loss: 0.4441 \n",
      "INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\n",
      "2024-07-04 16:28:26,878 - INFO - Epoch[10] Metrics -- Mean_Dice: 0.7701 \n",
      "INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00.447\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:00.534\n",
      "INFO:ignite.engine.engine.Engine:Epoch[10] Complete. Time taken: 00:00:01.205\n",
      "INFO:ignite.engine.engine.Engine:Engine run complete. Time taken: 00:00:12.747\n"
     ]
    }
   ],
   "source": [
    "# create a training data loader\n",
    "train_ds = ArrayDataset(images[:20], imtrans, segs[:20], segtrans)\n",
    "train_loader = DataLoader(\n",
    "    train_ds,\n",
    "    batch_size=5,\n",
    "    shuffle=True,\n",
    "    num_workers=8,\n",
    "    pin_memory=torch.cuda.is_available(),\n",
    ")\n",
    "\n",
    "max_epochs = 10\n",
    "state = trainer.run(train_loader, max_epochs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Visualizing Tensorboard logs"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "monai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
