{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Logging using the MLFlow Logger\n",
    "\n",
    "This notebook demonstrates how to use the logging function for MLFLow"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Installation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "You can install everything from the command line using the following commands."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Installing Anomalib\n",
    "\n",
    "The easiest way to install anomalib is to use pip."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "%pip install -qU anomalib"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Install Anomalib and MLFlow\n",
    "\n",
    "Install anomalib with MLFlow using pip."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "%pip install -qU anomalib[loggers]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Install MLFlow\n",
    "\n",
    "Install MLFlow using pip."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "%pip install -qU mlflow"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Run MLFlow Server"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "You can execute the following command in a seperate terminal to access the MLFlow UI.\n",
    "\n",
    "```bash\n",
    "mlflow server --backend-store-uri ./notebooks/600_loggers/mlruns/\n",
    "```\n",
    "\n",
    "Or you can return to the following cell, uncomment the cell and then execute it.\n",
    "\n",
    "ATTENTION: This cell runs indefinitely and must be interrupted manually to continue!"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !mlflow server"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dataset Directory"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This cell is to ensure we change the directory to have access to the datasets.\n",
    "\n",
    "This part is borrowed from the datamodule mvtec notebook."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "\n",
    "# NOTE: Provide the path to the dataset root directory.\n",
    "#   If the datasets is not downloaded, it will be downloaded\n",
    "#   to this directory.\n",
    "dataset_root = Path.cwd().parent / \"datasets\" / \"MVTec\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "\n",
    "from lightning.pytorch.callbacks import EarlyStopping\n",
    "\n",
    "from anomalib import TaskType\n",
    "from anomalib.callbacks.checkpoint import ModelCheckpoint\n",
    "from anomalib.data import MVTec\n",
    "from anomalib.engine import Engine\n",
    "from anomalib.loggers import AnomalibMLFlowLogger\n",
    "from anomalib.models import Fastflow\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data Module"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Using the data module to load the MVTec dataset. But first let's print the docstring."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on class MVTec in module anomalib.data.image.mvtec:\n",
      "\n",
      "class MVTec(anomalib.data.base.datamodule.AnomalibDataModule)\n",
      " |  MVTec(root: pathlib.Path | str = './datasets/MVTec', category: str = 'bottle', train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, task: anomalib.TaskType | str = <TaskType.SEGMENTATION: 'segmentation'>, image_size: tuple[int, int] | None = None, transform: torchvision.transforms.v2._transform.Transform | None = None, train_transform: torchvision.transforms.v2._transform.Transform | None = None, eval_transform: torchvision.transforms.v2._transform.Transform | None = None, test_split_mode: anomalib.data.utils.split.TestSplitMode | str = <TestSplitMode.FROM_DIR: 'from_dir'>, test_split_ratio: float = 0.2, val_split_mode: anomalib.data.utils.split.ValSplitMode | str = <ValSplitMode.SAME_AS_TEST: 'same_as_test'>, val_split_ratio: float = 0.5, seed: int | None = None) -> None\n",
      " |  \n",
      " |  MVTec Datamodule.\n",
      " |  \n",
      " |  Args:\n",
      " |      root (Path | str): Path to the root of the dataset.\n",
      " |          Defaults to ``\"./datasets/MVTec\"``.\n",
      " |      category (str): Category of the MVTec dataset (e.g. \"bottle\" or \"cable\").\n",
      " |          Defaults to ``\"bottle\"``.\n",
      " |      train_batch_size (int, optional): Training batch size.\n",
      " |          Defaults to ``32``.\n",
      " |      eval_batch_size (int, optional): Test batch size.\n",
      " |          Defaults to ``32``.\n",
      " |      num_workers (int, optional): Number of workers.\n",
      " |          Defaults to ``8``.\n",
      " |      task TaskType): Task type, 'classification', 'detection' or 'segmentation'\n",
      " |          Defaults to ``TaskType.SEGMENTATION``.\n",
      " |      image_size (tuple[int, int], optional): Size to which input images should be resized.\n",
      " |          Defaults to ``None``.\n",
      " |      transform (Transform, optional): Transforms that should be applied to the input images.\n",
      " |          Defaults to ``None``.\n",
      " |      train_transform (Transform, optional): Transforms that should be applied to the input images during training.\n",
      " |          Defaults to ``None``.\n",
      " |      eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation.\n",
      " |          Defaults to ``None``.\n",
      " |      test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained.\n",
      " |          Defaults to ``TestSplitMode.FROM_DIR``.\n",
      " |      test_split_ratio (float): Fraction of images from the train set that will be reserved for testing.\n",
      " |          Defaults to ``0.2``.\n",
      " |      val_split_mode (ValSplitMode): Setting that determines how the validation subset is obtained.\n",
      " |          Defaults to ``ValSplitMode.SAME_AS_TEST``.\n",
      " |      val_split_ratio (float): Fraction of train or test images that will be reserved for validation.\n",
      " |          Defaults to ``0.5``.\n",
      " |      seed (int | None, optional): Seed which may be set to a fixed value for reproducibility.\n",
      " |          Defualts to ``None``.\n",
      " |  \n",
      " |  Examples:\n",
      " |      To create an MVTec AD datamodule with default settings:\n",
      " |  \n",
      " |      >>> datamodule = MVTec()\n",
      " |      >>> datamodule.setup()\n",
      " |      >>> i, data = next(enumerate(datamodule.train_dataloader()))\n",
      " |      >>> data.keys()\n",
      " |      dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask'])\n",
      " |  \n",
      " |      >>> data[\"image\"].shape\n",
      " |      torch.Size([32, 3, 256, 256])\n",
      " |  \n",
      " |      To change the category of the dataset:\n",
      " |  \n",
      " |      >>> datamodule = MVTec(category=\"cable\")\n",
      " |  \n",
      " |      To change the image and batch size:\n",
      " |  \n",
      " |      >>> datamodule = MVTec(image_size=(512, 512), train_batch_size=16, eval_batch_size=8)\n",
      " |  \n",
      " |      MVTec AD dataset does not provide a validation set. If you would like\n",
      " |      to use a separate validation set, you can use the ``val_split_mode`` and\n",
      " |      ``val_split_ratio`` arguments to create a validation set.\n",
      " |  \n",
      " |      >>> datamodule = MVTec(val_split_mode=ValSplitMode.FROM_TEST, val_split_ratio=0.1)\n",
      " |  \n",
      " |      This will subsample the test set by 10% and use it as the validation set.\n",
      " |      If you would like to create a validation set synthetically that would\n",
      " |      not change the test set, you can use the ``ValSplitMode.SYNTHETIC`` option.\n",
      " |  \n",
      " |      >>> datamodule = MVTec(val_split_mode=ValSplitMode.SYNTHETIC, val_split_ratio=0.2)\n",
      " |  \n",
      " |  Method resolution order:\n",
      " |      MVTec\n",
      " |      anomalib.data.base.datamodule.AnomalibDataModule\n",
      " |      lightning.pytorch.core.datamodule.LightningDataModule\n",
      " |      lightning.pytorch.core.hooks.DataHooks\n",
      " |      lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin\n",
      " |      abc.ABC\n",
      " |      builtins.object\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  __init__(self, root: pathlib.Path | str = './datasets/MVTec', category: str = 'bottle', train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, task: anomalib.TaskType | str = <TaskType.SEGMENTATION: 'segmentation'>, image_size: tuple[int, int] | None = None, transform: torchvision.transforms.v2._transform.Transform | None = None, train_transform: torchvision.transforms.v2._transform.Transform | None = None, eval_transform: torchvision.transforms.v2._transform.Transform | None = None, test_split_mode: anomalib.data.utils.split.TestSplitMode | str = <TestSplitMode.FROM_DIR: 'from_dir'>, test_split_ratio: float = 0.2, val_split_mode: anomalib.data.utils.split.ValSplitMode | str = <ValSplitMode.SAME_AS_TEST: 'same_as_test'>, val_split_ratio: float = 0.5, seed: int | None = None) -> None\n",
      " |      Attributes:\n",
      " |          prepare_data_per_node:\n",
      " |              If True, each LOCAL_RANK=0 will call prepare data.\n",
      " |              Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data.\n",
      " |          allow_zero_length_dataloader_with_multiple_devices:\n",
      " |              If True, dataloader with zero length within local rank is allowed.\n",
      " |              Default value is False.\n",
      " |  \n",
      " |  prepare_data(self) -> None\n",
      " |      Download the dataset if not available.\n",
      " |      \n",
      " |      This method checks if the specified dataset is available in the file system.\n",
      " |      If not, it downloads and extracts the dataset into the appropriate directory.\n",
      " |      \n",
      " |      Example:\n",
      " |          Assume the dataset is not available on the file system.\n",
      " |          Here's how the directory structure looks before and after calling the\n",
      " |          `prepare_data` method:\n",
      " |      \n",
      " |          Before:\n",
      " |      \n",
      " |          .. code-block:: bash\n",
      " |      \n",
      " |              $ tree datasets\n",
      " |              datasets\n",
      " |              ├── dataset1\n",
      " |              └── dataset2\n",
      " |      \n",
      " |          Calling the method:\n",
      " |      \n",
      " |          .. code-block:: python\n",
      " |      \n",
      " |              >> datamodule = MVTec(root=\"./datasets/MVTec\", category=\"bottle\")\n",
      " |              >> datamodule.prepare_data()\n",
      " |      \n",
      " |          After:\n",
      " |      \n",
      " |          .. code-block:: bash\n",
      " |      \n",
      " |              $ tree datasets\n",
      " |              datasets\n",
      " |              ├── dataset1\n",
      " |              ├── dataset2\n",
      " |              └── MVTec\n",
      " |                  ├── bottle\n",
      " |                  ├── ...\n",
      " |                  └── zipper\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes defined here:\n",
      " |  \n",
      " |  __abstractmethods__ = frozenset()\n",
      " |  \n",
      " |  __annotations__ = {}\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from anomalib.data.base.datamodule.AnomalibDataModule:\n",
      " |  \n",
      " |  predict_dataloader(self) -> Any\n",
      " |      Use the test dataloader for inference unless overridden.\n",
      " |  \n",
      " |  setup(self, stage: str | None = None) -> None\n",
      " |      Set up train, validation and test data.\n",
      " |      \n",
      " |      Args:\n",
      " |          stage: str | None:  Train/Val/Test stages.\n",
      " |              Defaults to ``None``.\n",
      " |  \n",
      " |  test_dataloader(self) -> Any\n",
      " |      Get test dataloader.\n",
      " |  \n",
      " |  train_dataloader(self) -> Any\n",
      " |      Get train dataloader.\n",
      " |  \n",
      " |  val_dataloader(self) -> Any\n",
      " |      Get validation dataloader.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties inherited from anomalib.data.base.datamodule.AnomalibDataModule:\n",
      " |  \n",
      " |  eval_transform\n",
      " |      Get the transform that will be passed to the val/test/predict datasets.\n",
      " |      \n",
      " |      If the eval_transform is not set, the engine will request the transform from the model.\n",
      " |  \n",
      " |  name\n",
      " |      Name of the datamodule.\n",
      " |  \n",
      " |  train_transform\n",
      " |      Get the transforms that will be passed to the train dataset.\n",
      " |      \n",
      " |      If the train_transform is not set, the engine will request the transform from the model.\n",
      " |  \n",
      " |  transform\n",
      " |      Property that returns the user-specified transform for the datamodule, if any.\n",
      " |      \n",
      " |      This property is accessed by the engine to set the transform for the model. The eval_transform takes precedence\n",
      " |      over the train_transform, because the transform that we store in the model is the one that should be used during\n",
      " |      inference.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from anomalib.data.base.datamodule.AnomalibDataModule:\n",
      " |  \n",
      " |  category\n",
      " |      Get the category of the datamodule.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from lightning.pytorch.core.datamodule.LightningDataModule:\n",
      " |  \n",
      " |  load_from_checkpoint(cls, checkpoint_path: Union[str, pathlib.Path, IO], map_location: Union[torch.device, str, int, Callable[[torch.storage.UntypedStorage, str], Optional[torch.storage.UntypedStorage]], Dict[Union[torch.device, str, int], Union[torch.device, str, int]], NoneType] = None, hparams_file: Union[str, pathlib.Path, NoneType] = None, **kwargs: Any) -> typing_extensions.Self\n",
      " |      Primary way of loading a datamodule from a checkpoint. When Lightning saves a checkpoint it stores the\n",
      " |      arguments passed to ``__init__``  in the checkpoint under ``\"datamodule_hyper_parameters\"``.\n",
      " |      \n",
      " |      Any arguments specified through \\*\\*kwargs will override args stored in ``\"datamodule_hyper_parameters\"``.\n",
      " |      \n",
      " |      Args:\n",
      " |          checkpoint_path: Path to checkpoint. This can also be a URL, or file-like object\n",
      " |          map_location:\n",
      " |              If your checkpoint saved a GPU model and you now load on CPUs\n",
      " |              or a different number of GPUs, use this to map to the new setup.\n",
      " |              The behaviour is the same as in :func:`torch.load`.\n",
      " |          hparams_file: Optional path to a ``.yaml`` or ``.csv`` file with hierarchical structure\n",
      " |              as in this example::\n",
      " |      \n",
      " |                  dataloader:\n",
      " |                      batch_size: 32\n",
      " |      \n",
      " |              You most likely won't need this since Lightning will always save the hyperparameters\n",
      " |              to the checkpoint.\n",
      " |              However, if your checkpoint weights don't have the hyperparameters saved,\n",
      " |              use this method to pass in a ``.yaml`` file with the hparams you'd like to use.\n",
      " |              These will be converted into a :class:`~dict` and passed into your\n",
      " |              :class:`LightningDataModule` for use.\n",
      " |      \n",
      " |              If your datamodule's ``hparams`` argument is :class:`~argparse.Namespace`\n",
      " |              and ``.yaml`` file has hierarchical structure, you need to refactor your datamodule to treat\n",
      " |              ``hparams`` as :class:`~dict`.\n",
      " |          \\**kwargs: Any extra keyword args needed to init the datamodule. Can also be used to override saved\n",
      " |              hyperparameter values.\n",
      " |      \n",
      " |      Return:\n",
      " |          :class:`LightningDataModule` instance with loaded weights and hyperparameters (if available).\n",
      " |      \n",
      " |      Note:\n",
      " |          ``load_from_checkpoint`` is a **class** method. You must use your :class:`LightningDataModule`\n",
      " |          **class** to call it instead of the :class:`LightningDataModule` instance, or a\n",
      " |          ``TypeError`` will be raised.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          # load weights without mapping ...\n",
      " |          datamodule = MyLightningDataModule.load_from_checkpoint('path/to/checkpoint.ckpt')\n",
      " |      \n",
      " |          # or load weights and hyperparameters from separate files.\n",
      " |          datamodule = MyLightningDataModule.load_from_checkpoint(\n",
      " |              'path/to/checkpoint.ckpt',\n",
      " |              hparams_file='/path/to/hparams_file.yaml'\n",
      " |          )\n",
      " |      \n",
      " |          # override some of the params with new values\n",
      " |          datamodule = MyLightningDataModule.load_from_checkpoint(\n",
      " |              PATH,\n",
      " |              batch_size=32,\n",
      " |              num_workers=10,\n",
      " |          )\n",
      " |  \n",
      " |  load_state_dict(self, state_dict: Dict[str, Any]) -> None\n",
      " |      Called when loading a checkpoint, implement to reload datamodule state given datamodule state_dict.\n",
      " |      \n",
      " |      Args:\n",
      " |          state_dict: the datamodule state returned by ``state_dict``.\n",
      " |  \n",
      " |  state_dict(self) -> Dict[str, Any]\n",
      " |      Called when saving a checkpoint, implement to generate and save datamodule state.\n",
      " |      \n",
      " |      Returns:\n",
      " |          A dictionary containing datamodule state.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Class methods inherited from lightning.pytorch.core.datamodule.LightningDataModule:\n",
      " |  \n",
      " |  from_datasets(train_dataset: Union[torch.utils.data.dataset.Dataset, Iterable[torch.utils.data.dataset.Dataset], NoneType] = None, val_dataset: Union[torch.utils.data.dataset.Dataset, Iterable[torch.utils.data.dataset.Dataset], NoneType] = None, test_dataset: Union[torch.utils.data.dataset.Dataset, Iterable[torch.utils.data.dataset.Dataset], NoneType] = None, predict_dataset: Union[torch.utils.data.dataset.Dataset, Iterable[torch.utils.data.dataset.Dataset], NoneType] = None, batch_size: int = 1, num_workers: int = 0, **datamodule_kwargs: Any) -> 'LightningDataModule' from abc.ABCMeta\n",
      " |      Create an instance from torch.utils.data.Dataset.\n",
      " |      \n",
      " |      Args:\n",
      " |          train_dataset: Optional dataset or iterable of datasets to be used for train_dataloader()\n",
      " |          val_dataset: Optional dataset or iterable of datasets to be used for val_dataloader()\n",
      " |          test_dataset: Optional dataset or iterable of datasets to be used for test_dataloader()\n",
      " |          predict_dataset: Optional dataset or iterable of datasets to be used for predict_dataloader()\n",
      " |          batch_size: Batch size to use for each dataloader. Default is 1. This parameter gets forwarded to the\n",
      " |              ``__init__`` if the datamodule has such a name defined in its signature.\n",
      " |          num_workers: Number of subprocesses to use for data loading. 0 means that the\n",
      " |              data will be loaded in the main process. Number of CPUs available. This parameter gets forwarded to the\n",
      " |              ``__init__`` if the datamodule has such a name defined in its signature.\n",
      " |          **datamodule_kwargs: Additional parameters that get passed down to the datamodule's ``__init__``.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from lightning.pytorch.core.datamodule.LightningDataModule:\n",
      " |  \n",
      " |  CHECKPOINT_HYPER_PARAMS_KEY = 'datamodule_hyper_parameters'\n",
      " |  \n",
      " |  CHECKPOINT_HYPER_PARAMS_NAME = 'datamodule_hparams_name'\n",
      " |  \n",
      " |  CHECKPOINT_HYPER_PARAMS_TYPE = 'datamodule_hparams_type'\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from lightning.pytorch.core.hooks.DataHooks:\n",
      " |  \n",
      " |  on_after_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any\n",
      " |      Override to alter or apply batch augmentations to your batch after it is transferred to the device.\n",
      " |      \n",
      " |      Note:\n",
      " |          To check the current state of execution of this hook you can use\n",
      " |          ``self.trainer.training/testing/validating/predicting`` so that you can\n",
      " |          add different logic as per your requirement.\n",
      " |      \n",
      " |      Args:\n",
      " |          batch: A batch of data that needs to be altered or augmented.\n",
      " |          dataloader_idx: The index of the dataloader to which the batch belongs.\n",
      " |      \n",
      " |      Returns:\n",
      " |          A batch of data\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          def on_after_batch_transfer(self, batch, dataloader_idx):\n",
      " |              batch['x'] = gpu_transforms(batch['x'])\n",
      " |              return batch\n",
      " |      \n",
      " |      Raises:\n",
      " |          MisconfigurationException:\n",
      " |              If using IPUs, ``Trainer(accelerator='ipu')``.\n",
      " |      \n",
      " |      See Also:\n",
      " |          - :meth:`on_before_batch_transfer`\n",
      " |          - :meth:`transfer_batch_to_device`\n",
      " |  \n",
      " |  on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any\n",
      " |      Override to alter or apply batch augmentations to your batch before it is transferred to the device.\n",
      " |      \n",
      " |      Note:\n",
      " |          To check the current state of execution of this hook you can use\n",
      " |          ``self.trainer.training/testing/validating/predicting`` so that you can\n",
      " |          add different logic as per your requirement.\n",
      " |      \n",
      " |      Args:\n",
      " |          batch: A batch of data that needs to be altered or augmented.\n",
      " |          dataloader_idx: The index of the dataloader to which the batch belongs.\n",
      " |      \n",
      " |      Returns:\n",
      " |          A batch of data\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          def on_before_batch_transfer(self, batch, dataloader_idx):\n",
      " |              batch['x'] = transforms(batch['x'])\n",
      " |              return batch\n",
      " |      \n",
      " |      See Also:\n",
      " |          - :meth:`on_after_batch_transfer`\n",
      " |          - :meth:`transfer_batch_to_device`\n",
      " |  \n",
      " |  teardown(self, stage: str) -> None\n",
      " |      Called at the end of fit (train + validate), validate, test, or predict.\n",
      " |      \n",
      " |      Args:\n",
      " |          stage: either ``'fit'``, ``'validate'``, ``'test'``, or ``'predict'``\n",
      " |  \n",
      " |  transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any\n",
      " |      Override this hook if your :class:`~torch.utils.data.DataLoader` returns tensors wrapped in a custom data\n",
      " |      structure.\n",
      " |      \n",
      " |      The data types listed below (and any arbitrary nesting of them) are supported out of the box:\n",
      " |      \n",
      " |      - :class:`torch.Tensor` or anything that implements `.to(...)`\n",
      " |      - :class:`list`\n",
      " |      - :class:`dict`\n",
      " |      - :class:`tuple`\n",
      " |      \n",
      " |      For anything else, you need to define how the data is moved to the target device (CPU, GPU, TPU, ...).\n",
      " |      \n",
      " |      Note:\n",
      " |          This hook should only transfer the data and not modify it, nor should it move the data to\n",
      " |          any other device than the one passed in as argument (unless you know what you are doing).\n",
      " |          To check the current state of execution of this hook you can use\n",
      " |          ``self.trainer.training/testing/validating/predicting`` so that you can\n",
      " |          add different logic as per your requirement.\n",
      " |      \n",
      " |      Args:\n",
      " |          batch: A batch of data that needs to be transferred to a new device.\n",
      " |          device: The target device as defined in PyTorch.\n",
      " |          dataloader_idx: The index of the dataloader to which the batch belongs.\n",
      " |      \n",
      " |      Returns:\n",
      " |          A reference to the data on the new device.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          def transfer_batch_to_device(self, batch, device, dataloader_idx):\n",
      " |              if isinstance(batch, CustomBatch):\n",
      " |                  # move all tensors in your custom data structure to the device\n",
      " |                  batch.samples = batch.samples.to(device)\n",
      " |                  batch.targets = batch.targets.to(device)\n",
      " |              elif dataloader_idx == 0:\n",
      " |                  # skip device transfer for the first dataloader or anything you wish\n",
      " |                  pass\n",
      " |              else:\n",
      " |                  batch = super().transfer_batch_to_device(batch, device, dataloader_idx)\n",
      " |              return batch\n",
      " |      \n",
      " |      Raises:\n",
      " |          MisconfigurationException:\n",
      " |              If using IPUs, ``Trainer(accelerator='ipu')``.\n",
      " |      \n",
      " |      See Also:\n",
      " |          - :meth:`move_data_to_device`\n",
      " |          - :meth:`apply_to_collection`\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from lightning.pytorch.core.hooks.DataHooks:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin:\n",
      " |  \n",
      " |  save_hyperparameters(self, *args: Any, ignore: Union[Sequence[str], str, NoneType] = None, frame: Optional[frame] = None, logger: bool = True) -> None\n",
      " |      Save arguments to ``hparams`` attribute.\n",
      " |      \n",
      " |      Args:\n",
      " |          args: single object of `dict`, `NameSpace` or `OmegaConf`\n",
      " |              or string names or arguments from class ``__init__``\n",
      " |          ignore: an argument name or a list of argument names from\n",
      " |              class ``__init__`` to be ignored\n",
      " |          frame: a frame object. Default is None\n",
      " |          logger: Whether to send the hyperparameters to the logger. Default: True\n",
      " |      \n",
      " |      Example::\n",
      " |          >>> from lightning.pytorch.core.mixins import HyperparametersMixin\n",
      " |          >>> class ManuallyArgsModel(HyperparametersMixin):\n",
      " |          ...     def __init__(self, arg1, arg2, arg3):\n",
      " |          ...         super().__init__()\n",
      " |          ...         # manually assign arguments\n",
      " |          ...         self.save_hyperparameters('arg1', 'arg3')\n",
      " |          ...     def forward(self, *args, **kwargs):\n",
      " |          ...         ...\n",
      " |          >>> model = ManuallyArgsModel(1, 'abc', 3.14)\n",
      " |          >>> model.hparams\n",
      " |          \"arg1\": 1\n",
      " |          \"arg3\": 3.14\n",
      " |      \n",
      " |          >>> from lightning.pytorch.core.mixins import HyperparametersMixin\n",
      " |          >>> class AutomaticArgsModel(HyperparametersMixin):\n",
      " |          ...     def __init__(self, arg1, arg2, arg3):\n",
      " |          ...         super().__init__()\n",
      " |          ...         # equivalent automatic\n",
      " |          ...         self.save_hyperparameters()\n",
      " |          ...     def forward(self, *args, **kwargs):\n",
      " |          ...         ...\n",
      " |          >>> model = AutomaticArgsModel(1, 'abc', 3.14)\n",
      " |          >>> model.hparams\n",
      " |          \"arg1\": 1\n",
      " |          \"arg2\": abc\n",
      " |          \"arg3\": 3.14\n",
      " |      \n",
      " |          >>> from lightning.pytorch.core.mixins import HyperparametersMixin\n",
      " |          >>> class SingleArgModel(HyperparametersMixin):\n",
      " |          ...     def __init__(self, params):\n",
      " |          ...         super().__init__()\n",
      " |          ...         # manually assign single argument\n",
      " |          ...         self.save_hyperparameters(params)\n",
      " |          ...     def forward(self, *args, **kwargs):\n",
      " |          ...         ...\n",
      " |          >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))\n",
      " |          >>> model.hparams\n",
      " |          \"p1\": 1\n",
      " |          \"p2\": abc\n",
      " |          \"p3\": 3.14\n",
      " |      \n",
      " |          >>> from lightning.pytorch.core.mixins import HyperparametersMixin\n",
      " |          >>> class ManuallyArgsModel(HyperparametersMixin):\n",
      " |          ...     def __init__(self, arg1, arg2, arg3):\n",
      " |          ...         super().__init__()\n",
      " |          ...         # pass argument(s) to ignore as a string or in a list\n",
      " |          ...         self.save_hyperparameters(ignore='arg2')\n",
      " |          ...     def forward(self, *args, **kwargs):\n",
      " |          ...         ...\n",
      " |          >>> model = ManuallyArgsModel(1, 'abc', 3.14)\n",
      " |          >>> model.hparams\n",
      " |          \"arg1\": 1\n",
      " |          \"arg3\": 3.14\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties inherited from lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin:\n",
      " |  \n",
      " |  hparams\n",
      " |      The collection of hyperparameters saved with :meth:`save_hyperparameters`. It is mutable by the user. For\n",
      " |      the frozen set of initial hyperparameters, use :attr:`hparams_initial`.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Mutable hyperparameters dictionary\n",
      " |  \n",
      " |  hparams_initial\n",
      " |      The collection of hyperparameters saved with :meth:`save_hyperparameters`. These contents are read-only.\n",
      " |      Manual updates to the saved hyperparameters can instead be performed through :attr:`hparams`.\n",
      " |      \n",
      " |      Returns:\n",
      " |          AttributeDict: immutable initial hyperparameters\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin:\n",
      " |  \n",
      " |  __jit_unused_properties__ = ['hparams', 'hparams_initial']\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(MVTec)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "datamodule = MVTec(\n",
    "    root=dataset_root,\n",
    "    category=\"bottle\",\n",
    "    image_size=256,\n",
    "    train_batch_size=32,\n",
    "    eval_batch_size=32,\n",
    "    num_workers=24,\n",
    "    task=TaskType.SEGMENTATION,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Setup Fastflow as an example model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Fastflow(backbone=\"resnet18\", flow_steps=8)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MLFlow Logger"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Setup the MLFlow logger. But first let's print the docstring."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on class AnomalibMLFlowLogger in module anomalib.loggers.mlflow:\n",
      "\n",
      "class AnomalibMLFlowLogger(anomalib.loggers.base.ImageLoggerBase, lightning.pytorch.loggers.mlflow.MLFlowLogger)\n",
      " |  AnomalibMLFlowLogger(experiment_name: str | None = 'anomalib_logs', run_name: str | None = None, tracking_uri: str | None = None, save_dir: str | None = './mlruns', log_model: Optional[Literal[True, False, 'all']] = False, prefix: str | None = '', **kwargs) -> None\n",
      " |  \n",
      " |  Logger for MLFlow.\n",
      " |  \n",
      " |  Adds interface for ``add_image`` in the logger rather than calling the\n",
      " |  experiment object.\n",
      " |  \n",
      " |  .. note::\n",
      " |      Same as the MLFlowLogger provided by PyTorch Lightning and the doc string is reproduced below.\n",
      " |  \n",
      " |  Track your parameters, metrics, source code and more using\n",
      " |  `MLFlow <https://mlflow.org/#core-concepts>`_.\n",
      " |  \n",
      " |  Install it with pip:\n",
      " |  \n",
      " |  .. code-block:: bash\n",
      " |  \n",
      " |      pip install mlflow\n",
      " |  \n",
      " |  Args:\n",
      " |      experiment_name: The name of the experiment.\n",
      " |      run_name: Name of the new run.\n",
      " |          The `run_name` is internally stored as a ``mlflow.runName`` tag.\n",
      " |          If the ``mlflow.runName`` tag has already been set in `tags`, the value is overridden by the `run_name`.\n",
      " |      tracking_uri: Address of local or remote tracking server.\n",
      " |          If not provided, defaults to `MLFLOW_TRACKING_URI` environment variable if set, otherwise it falls\n",
      " |          back to `file:<save_dir>`.\n",
      " |      save_dir: A path to a local directory where the MLflow runs get saved.\n",
      " |          Defaults to `./mlruns` if `tracking_uri` is not provided.\n",
      " |          Has no effect if `tracking_uri` is provided.\n",
      " |      log_model: Log checkpoints created by `ModelCheckpoint` as MLFlow artifacts.\n",
      " |  \n",
      " |          - if ``log_model == 'all'``, checkpoints are logged during training.\n",
      " |          - if ``log_model == True``, checkpoints are logged at the end of training,                 except when `save_top_k == -1` which also logs every checkpoint during training.\n",
      " |          - if ``log_model == False`` (default), no checkpoint is logged.\n",
      " |  \n",
      " |      prefix: A string to put at the beginning of metric keys. Defaults to ``''``.\n",
      " |      kwargs: Additional arguments like `tags`, `artifact_location` etc. used by\n",
      " |          `MLFlowExperiment` can be passed as keyword arguments in this logger.\n",
      " |  \n",
      " |  Example:\n",
      " |      >>> from anomalib.loggers import AnomalibMLFlowLogger\n",
      " |      >>> from anomalib.engine import Engine\n",
      " |      ...\n",
      " |      >>> mlflow_logger = AnomalibMLFlowLogger()\n",
      " |      >>> engine = Engine(logger=mlflow_logger)\n",
      " |  \n",
      " |  See Also:\n",
      " |      - `MLFlow Documentation <https://mlflow.org/docs/latest/>`_.\n",
      " |  \n",
      " |  Method resolution order:\n",
      " |      AnomalibMLFlowLogger\n",
      " |      anomalib.loggers.base.ImageLoggerBase\n",
      " |      lightning.pytorch.loggers.mlflow.MLFlowLogger\n",
      " |      lightning.pytorch.loggers.logger.Logger\n",
      " |      lightning.fabric.loggers.logger.Logger\n",
      " |      abc.ABC\n",
      " |      builtins.object\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  __init__(self, experiment_name: str | None = 'anomalib_logs', run_name: str | None = None, tracking_uri: str | None = None, save_dir: str | None = './mlruns', log_model: Optional[Literal[True, False, 'all']] = False, prefix: str | None = '', **kwargs) -> None\n",
      " |      Initialize self.  See help(type(self)) for accurate signature.\n",
      " |  \n",
      " |  add_image(self, image: numpy.ndarray | matplotlib.figure.Figure, name: str | None = None, **kwargs) -> None\n",
      " |      Interface to log images in the mlflow loggers.\n",
      " |      \n",
      " |      Args:\n",
      " |          image (np.ndarray | Figure): Image to log.\n",
      " |          name (str | None): The tag of the image defaults to ``None``.\n",
      " |          kwargs: Additional keyword arguments that are only used if `image` is of type Figure.\n",
      " |              These arguments are passed directly to the method that saves the figure.\n",
      " |              If `image` is a NumPy array, `kwargs` has no effect.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes defined here:\n",
      " |  \n",
      " |  __abstractmethods__ = frozenset()\n",
      " |  \n",
      " |  __annotations__ = {}\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from anomalib.loggers.base.ImageLoggerBase:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from lightning.pytorch.loggers.mlflow.MLFlowLogger:\n",
      " |  \n",
      " |  after_save_checkpoint(self, checkpoint_callback: lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint) -> None\n",
      " |      Called after model checkpoint callback saves a new checkpoint.\n",
      " |      \n",
      " |      Args:\n",
      " |          checkpoint_callback: the model checkpoint callback instance\n",
      " |  \n",
      " |  finalize(self, status: str = 'success') -> None\n",
      " |      Do any processing that is necessary to finalize an experiment.\n",
      " |      \n",
      " |      Args:\n",
      " |          status: Status that the experiment finished with (e.g. success, failed, aborted)\n",
      " |  \n",
      " |  log_hyperparams(self, params: Union[Dict[str, Any], argparse.Namespace]) -> None\n",
      " |      Record hyperparameters.\n",
      " |      \n",
      " |      Args:\n",
      " |          params: :class:`~argparse.Namespace` or `Dict` containing the hyperparameters\n",
      " |          args: Optional positional arguments, depends on the specific logger being used\n",
      " |          kwargs: Optional keyword arguments, depends on the specific logger being used\n",
      " |  \n",
      " |  log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None\n",
      " |      Records metrics. This method logs metrics as soon as it received them.\n",
      " |      \n",
      " |      Args:\n",
      " |          metrics: Dictionary with metric names as keys and measured quantities as values\n",
      " |          step: Step number at which the metrics should be recorded\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties inherited from lightning.pytorch.loggers.mlflow.MLFlowLogger:\n",
      " |  \n",
      " |  experiment\n",
      " |      Actual MLflow object. To use MLflow features in your :class:`~lightning.pytorch.core.LightningModule` do the\n",
      " |      following.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          self.logger.experiment.some_mlflow_function()\n",
      " |  \n",
      " |  experiment_id\n",
      " |      Create the experiment if it does not exist to get the experiment id.\n",
      " |      \n",
      " |      Returns:\n",
      " |          The experiment id.\n",
      " |  \n",
      " |  name\n",
      " |      Get the experiment id.\n",
      " |      \n",
      " |      Returns:\n",
      " |          The experiment id.\n",
      " |  \n",
      " |  run_id\n",
      " |      Create the experiment if it does not exist to get the run id.\n",
      " |      \n",
      " |      Returns:\n",
      " |          The run id.\n",
      " |  \n",
      " |  save_dir\n",
      " |      The root file directory in which MLflow experiments are saved.\n",
      " |      \n",
      " |      Return:\n",
      " |          Local path to the root experiment directory if the tracking uri is local.\n",
      " |          Otherwise returns `None`.\n",
      " |  \n",
      " |  version\n",
      " |      Get the run id.\n",
      " |      \n",
      " |      Returns:\n",
      " |          The run id.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from lightning.pytorch.loggers.mlflow.MLFlowLogger:\n",
      " |  \n",
      " |  LOGGER_JOIN_CHAR = '-'\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from lightning.fabric.loggers.logger.Logger:\n",
      " |  \n",
      " |  log_graph(self, model: torch.nn.modules.module.Module, input_array: Optional[torch.Tensor] = None) -> None\n",
      " |      Record model graph.\n",
      " |      \n",
      " |      Args:\n",
      " |          model: the model with an implementation of ``forward``.\n",
      " |          input_array: input passes to `model.forward`\n",
      " |  \n",
      " |  save(self) -> None\n",
      " |      Save log data.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties inherited from lightning.fabric.loggers.logger.Logger:\n",
      " |  \n",
      " |  group_separator\n",
      " |      Return the default separator used by the logger to group the data into subfolders.\n",
      " |  \n",
      " |  log_dir\n",
      " |      Return directory the current version of the experiment gets saved, or `None` if the logger does not save\n",
      " |      data locally.\n",
      " |  \n",
      " |  root_dir\n",
      " |      Return the root directory where all versions of an experiment get saved, or `None` if the logger does not\n",
      " |      save data locally.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(AnomalibMLFlowLogger)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "mlflow_logger = AnomalibMLFlowLogger()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Training"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Callbacks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_checkpoint = ModelCheckpoint(mode=\"max\", monitor=\"pixel_AUROC\")\n",
    "\n",
    "early_stopping = EarlyStopping(monitor=\"pixel_AUROC\", mode=\"max\", patience=3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Setup Engine"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "callbacks = [\n",
    "    model_checkpoint,\n",
    "    early_stopping,\n",
    "]\n",
    "\n",
    "kwargs = {\"log_every_n_steps\": 3}\n",
    "\n",
    "engine = Engine(\n",
    "    callbacks=callbacks,\n",
    "    pixel_metrics=\"AUROC\",\n",
    "    accelerator=\"auto\",\n",
    "    devices=1,\n",
    "    logger=mlflow_logger,  # Logger is set here\n",
    "    **kwargs,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Fit the Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:pytorch_lightning.utilities.rank_zero:GPU available: True (cuda), used: True\n",
      "INFO:pytorch_lightning.utilities.rank_zero:TPU available: False, using: 0 TPU cores\n",
      "INFO:pytorch_lightning.utilities.rank_zero:IPU available: False, using: 0 IPUs\n",
      "INFO:pytorch_lightning.utilities.rank_zero:HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
      "\n",
      "  | Name                  | Type                     | Params\n",
      "-------------------------------------------------------------------\n",
      "0 | loss                  | FastflowLoss             | 0     \n",
      "1 | _transform            | Compose                  | 0     \n",
      "2 | normalization_metrics | MinMax                   | 0     \n",
      "3 | image_threshold       | F1AdaptiveThreshold      | 0     \n",
      "4 | pixel_threshold       | F1AdaptiveThreshold      | 0     \n",
      "5 | image_metrics         | AnomalibMetricCollection | 0     \n",
      "6 | pixel_metrics         | AnomalibMetricCollection | 0     \n",
      "7 | model                 | FastflowModel            | 7.7 M \n",
      "-------------------------------------------------------------------\n",
      "3.5 M     Trainable params\n",
      "4.2 M     Non-trainable params\n",
      "7.7 M     Total params\n",
      "30.678    Total estimated model params size (MB)\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "aad5e6a5204a440eb9afdadd6634ec50",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Training: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ecb11fee03014910947909b1215a8b52",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "27bc3cfe8fef48749c5e04a125af0078",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "292ee531a3574c98a5ef92d4a5b5370f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a3bea1377bd64cb2bd2874617d4ad010",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9ab5886097e74dcf9b3e9edaff926b2d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7a751040314f4d81a84d93917e4c3ee1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "df6f24c397044035a50c6040918d4a44",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "22ccf278524c40ed84da1f8d5042137d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1e3c71c112fd472fba36a769b25c73f2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7cd3d7db74e14a67bbe5787d8b22e6ac",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "4befe32b77824fa1bffab626d103994b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c878d34f0d5f490e8528ae9502c8395b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "064e1c164aed49818f6b3b1604fbd662",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Validation: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "engine.fit(model=model, datamodule=datamodule)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "48057ef698d64dcdb2a7c2527b2305af",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Testing: |          | 0/? [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
       "┃<span style=\"font-weight: bold\">        Test metric        </span>┃<span style=\"font-weight: bold\">       DataLoader 0        </span>┃\n",
       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">        image_AUROC        </span>│<span style=\"color: #800080; text-decoration-color: #800080\">            1.0            </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">       image_F1Score       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    0.9919999837875366     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">        pixel_AUROC        </span>│<span style=\"color: #800080; text-decoration-color: #800080\">     0.973434567451477     </span>│\n",
       "└───────────────────────────┴───────────────────────────┘\n",
       "</pre>\n"
      ],
      "text/plain": [
       "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
       "┃\u001b[1m \u001b[0m\u001b[1m       Test metric       \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m      DataLoader 0       \u001b[0m\u001b[1m \u001b[0m┃\n",
       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
       "│\u001b[36m \u001b[0m\u001b[36m       image_AUROC       \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m           1.0           \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m      image_F1Score      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   0.9919999837875366    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m       pixel_AUROC       \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m    0.973434567451477    \u001b[0m\u001b[35m \u001b[0m│\n",
       "└───────────────────────────┴───────────────────────────┘\n"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "[{'pixel_AUROC': 0.973434567451477,\n",
       "  'image_AUROC': 1.0,\n",
       "  'image_F1Score': 0.9919999837875366}]"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "engine.test(model=model, dataloaders=datamodule.test_dataloader())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Demo Track Figure"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "fig, ax = plt.subplots()\n",
    "ax.plot([0, 0], [2, 3])\n",
    "\n",
    "mlflow_logger.add_image(fig, \"figure_demo.png\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Save Model to MLFlow"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "import mlflow\n",
    "\n",
    "with mlflow.start_run(run_id=mlflow_logger.run_id):\n",
    "    mlflow.pytorch.log_model(engine.model.model, \"Fastflow\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load Model from MLFlow"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "FastflowModel(\n",
       "  (feature_extractor): FeatureListNet(\n",
       "    (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "    (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (act1): ReLU(inplace=True)\n",
       "    (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "    (layer1): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer2): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "    (layer3): Sequential(\n",
       "      (0): BasicBlock(\n",
       "        (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "        (downsample): Sequential(\n",
       "          (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "          (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        )\n",
       "      )\n",
       "      (1): BasicBlock(\n",
       "        (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (drop_block): Identity()\n",
       "        (act1): ReLU(inplace=True)\n",
       "        (aa): Identity()\n",
       "        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (act2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (norms): ModuleList(\n",
       "    (0): LayerNorm((64, 64, 64), eps=1e-05, elementwise_affine=True)\n",
       "    (1): LayerNorm((128, 32, 32), eps=1e-05, elementwise_affine=True)\n",
       "    (2): LayerNorm((256, 16, 16), eps=1e-05, elementwise_affine=True)\n",
       "  )\n",
       "  (fast_flow_blocks): ModuleList(\n",
       "    (0): SequenceINN(\n",
       "      (module_list): ModuleList(\n",
       "        (0): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (1): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (2): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (3): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (4): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (5): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (6): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (7): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): SequenceINN(\n",
       "      (module_list): ModuleList(\n",
       "        (0): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (1): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (2): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (3): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (4): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (5): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (6): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (7): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (2): SequenceINN(\n",
       "      (module_list): ModuleList(\n",
       "        (0): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (1): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (2): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (3): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (4): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (5): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (6): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((1, 1, 1, 1))\n",
       "            (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((1, 1, 1, 1))\n",
       "            (4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "        (7): AllInOneBlock(\n",
       "          (subnet): Sequential(\n",
       "            (0): ZeroPad2d((0, 0, 0, 0))\n",
       "            (1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "            (2): ReLU()\n",
       "            (3): ZeroPad2d((0, 0, 0, 0))\n",
       "            (4): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (anomaly_map_generator): AnomalyMapGenerator()\n",
       ")"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_uri = f\"runs:/{mlflow_logger.run_id}/Fastflow\"\n",
    "mlflow.pytorch.load_model(model_uri)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
