{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b09bf163",
   "metadata": {},
   "source": [
    "# 1. Environment Setup\n",
    "Seed the random generators, import core dependencies, and detect the training device."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "60cdb64f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: openpyxl in /home/logic/miniforge3/envs/pyhealth/lib/python3.12/site-packages (3.1.5)\n",
      "Requirement already satisfied: et-xmlfile in /home/logic/miniforge3/envs/pyhealth/lib/python3.12/site-packages (from openpyxl) (2.0.0)\n"
     ]
    }
   ],
   "source": [
    "!pip install openpyxl"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e89065ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Running on device: cuda\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import random\n",
    "from pathlib import Path\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "from IPython.display import display\n",
    "\n",
    "from pyhealth.datasets import COVID19CXRDataset\n",
    "from pyhealth.datasets.splitter import split_by_sample\n",
    "from pyhealth.datasets.utils import get_dataloader\n",
    "from pyhealth.tasks.covid19_cxr_classification import COVID19CXRClassification\n",
    "\n",
    "SEED = 42\n",
    "random.seed(SEED)\n",
    "np.random.seed(SEED)\n",
    "torch.manual_seed(SEED)\n",
    "if torch.cuda.is_available():\n",
    "    torch.cuda.manual_seed_all(SEED)\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(f\"Running on device: {device}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ef621a0",
   "metadata": {},
   "source": [
    "# 2. Load COVID-19 CXR Metadata\n",
    "Point to the processed COVID-19 Radiography dataset root and trigger metadata preparation if necessary."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f41b13c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "No config path provided, using default config\n",
      "Initializing covid19_cxr dataset from /home/logic/Github/cxr (dev mode: False)\n",
      "Scanning table: covid19_cxr from /home/logic/Github/cxr/covid19_cxr-metadata-pyhealth.csv\n",
      "Collecting global event dataframe...\n",
      "Collected dataframe with shape: (21165, 6)\n",
      "Dataset: covid19_cxr\n",
      "Dev mode: False\n",
      "Number of patients: 21165\n",
      "Number of events: 21165\n"
     ]
    }
   ],
   "source": [
    "dataset = COVID19CXRDataset(\n",
    "    root=\"/home/logic/Github/cxr\",\n",
    ")\n",
    "dataset.stats()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "61b498db",
   "metadata": {},
   "source": [
    "# 3. Prepare PyHealth Dataset\n",
    "Instantiate the COVID-19 classification task, convert raw samples into PyHealth format, and confirm schema details."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ab69520f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setting task COVID19CXRClassification for covid19_cxr base dataset...\n",
      "Generating samples with 1 worker(s)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Generating samples for COVID19CXRClassification with 1 worker: 100%|██████████| 21165/21165 [00:05<00:00, 3703.62it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Label disease vocab: {'COVID': 0, 'Lung Opacity': 1, 'Normal': 2, 'Viral Pneumonia': 3}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Processing samples: 100%|██████████| 21165/21165 [00:22<00:00, 945.33it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Generated 21165 samples for task COVID19CXRClassification\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total task samples: 21165\n",
      "Input schema: {'image': 'image'}\n",
      "Output schema: {'disease': 'multiclass'}\n",
      "Label mapping (index -> name): {0: 'COVID', 1: 'Lung Opacity', 2: 'Normal', 3: 'Viral Pneumonia'}\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>count</th>\n",
       "      <th>proportion</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>COVID</th>\n",
       "      <td>3616</td>\n",
       "      <td>0.170848</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Lung Opacity</th>\n",
       "      <td>6012</td>\n",
       "      <td>0.284054</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Normal</th>\n",
       "      <td>10192</td>\n",
       "      <td>0.481550</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Viral Pneumonia</th>\n",
       "      <td>1345</td>\n",
       "      <td>0.063548</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                 count  proportion\n",
       "COVID             3616    0.170848\n",
       "Lung Opacity      6012    0.284054\n",
       "Normal           10192    0.481550\n",
       "Viral Pneumonia   1345    0.063548"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "task = COVID19CXRClassification()\n",
    "sample_dataset = dataset.set_task(task)\n",
    "\n",
    "print(f\"Total task samples: {len(sample_dataset)}\")\n",
    "print(f\"Input schema: {sample_dataset.input_schema}\")\n",
    "print(f\"Output schema: {sample_dataset.output_schema}\")\n",
    "\n",
    "if len(sample_dataset) == 0:\n",
    "    raise RuntimeError(\"The task did not produce any samples. Verify the dataset root or disable dev mode.\")\n",
    "\n",
    "label_processor = sample_dataset.output_processors[\"disease\"]\n",
    "IDX_TO_LABEL = {index: label for label, index in label_processor.label_vocab.items()}\n",
    "print(f\"Label mapping (index -> name): {IDX_TO_LABEL}\")\n",
    "\n",
    "# Build label histogram to confirm class balance\n",
    "label_indices = [sample_dataset[i][\"disease\"].item() for i in range(len(sample_dataset))]\n",
    "label_distribution = (\n",
    "    pd.Series(label_indices)\n",
    "    .map(IDX_TO_LABEL)\n",
    "    .value_counts()\n",
    "    .sort_index()\n",
    "    .to_frame(name=\"count\")\n",
    ")\n",
    "label_distribution[\"proportion\"] = label_distribution[\"count\"] / label_distribution[\"count\"].sum()\n",
    "display(label_distribution)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "55e012cb",
   "metadata": {},
   "source": [
    "# 4. Split Dataset\n",
    "Divide the processed samples into training, validation, and test subsets before building dataloaders."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a4d1d102",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train/Val/Test sizes: 14815, 2117, 4233\n"
     ]
    }
   ],
   "source": [
    "BATCH_SIZE = 32\n",
    "\n",
    "train_ds, val_ds, test_ds = split_by_sample(sample_dataset, [0.7, 0.1, 0.2], seed=SEED)\n",
    "print(f\"Train/Val/Test sizes: {len(train_ds)}, {len(val_ds)}, {len(test_ds)}\")\n",
    "\n",
    "train_loader = get_dataloader(train_ds, batch_size=BATCH_SIZE, shuffle=True)\n",
    "val_loader = get_dataloader(val_ds, batch_size=BATCH_SIZE) if len(val_ds) else None\n",
    "test_loader = get_dataloader(test_ds, batch_size=BATCH_SIZE) if len(test_ds) else None\n",
    "\n",
    "if len(train_loader) == 0:\n",
    "    raise RuntimeError(\"The training loader is empty. Increase the dataset size or adjust the split ratios.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a8fdadab",
   "metadata": {},
   "source": [
    "# 5. Inspect Batch Structure\n",
    "Peek at the first training batch to understand feature shapes and label encodings."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "bc0bdd4a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'image': 'Tensor(shape=(32, 1, 299, 299))', 'disease': 'Tensor(shape=(32,))'}\n",
      "Sample disease labels: [(2, 'Normal'), (2, 'Normal'), (2, 'Normal'), (2, 'Normal'), (0, 'COVID')]\n"
     ]
    }
   ],
   "source": [
    "first_batch = next(iter(train_loader))\n",
    "\n",
    "def describe(value):\n",
    "    if hasattr(value, \"shape\"):\n",
    "        return f\"{type(value).__name__}(shape={tuple(value.shape)})\"\n",
    "    if isinstance(value, (list, tuple)):\n",
    "        return f\"{type(value).__name__}(len={len(value)})\"\n",
    "    return type(value).__name__\n",
    "\n",
    "batch_summary = {key: describe(value) for key, value in first_batch.items()}\n",
    "print(batch_summary)\n",
    "\n",
    "disease_targets = first_batch[\"disease\"]\n",
    "preview_indices = disease_targets[:5].cpu().tolist()\n",
    "preview_labels = [IDX_TO_LABEL[idx] for idx in preview_indices]\n",
    "print(f\"Sample disease labels: {list(zip(preview_indices, preview_labels))}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6a0fd406",
   "metadata": {},
   "source": [
    "# 6. Instantiate CNN Model\n",
    "Create the PyHealth CNN with image embeddings and review its parameter footprint."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "61b0a206",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/logic/miniforge3/envs/pyhealth/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: No embedding created for field due to lack of compatible processor: image\n",
      "Feature keys: ['image']\n",
      "Label key: disease\n",
      "Model mode: multiclass\n",
      "Total parameters: 112,452\n"
     ]
    }
   ],
   "source": [
    "from pyhealth.models import CNN\n",
    "\n",
    "model = CNN(\n",
    "    dataset=sample_dataset,\n",
    "    embedding_dim=64,\n",
    "    hidden_dim=64,\n",
    "    num_layers=2,\n",
    ").to(device)\n",
    "\n",
    "total_params = sum(p.numel() for p in model.parameters())\n",
    "print(f\"Feature keys: {model.feature_keys}\")\n",
    "print(f\"Label key: {model.label_key}\")\n",
    "print(f\"Model mode: {model.mode}\")\n",
    "print(f\"Total parameters: {total_params:,}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f43d4a34",
   "metadata": {},
   "source": [
    "# 7. Configure Trainer\n",
    "Wrap the model with the PyHealth Trainer and define optimisation hyperparameters and metrics."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e242f28e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CNN(\n",
      "  (embedding_model): EmbeddingModel(embedding_layers=ModuleDict())\n",
      "  (cnn): ModuleDict(\n",
      "    (image): CNNLayer(\n",
      "      (cnn): ModuleList(\n",
      "        (0): CNNBlock(\n",
      "          (conv1): Sequential(\n",
      "            (0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "            (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU()\n",
      "          )\n",
      "          (conv2): Sequential(\n",
      "            (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "            (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          )\n",
      "          (downsample): Sequential(\n",
      "            (0): Conv2d(1, 64, kernel_size=(1, 1), stride=(1, 1))\n",
      "            (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          )\n",
      "          (relu): ReLU()\n",
      "        )\n",
      "        (1): CNNBlock(\n",
      "          (conv1): Sequential(\n",
      "            (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "            (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "            (2): ReLU()\n",
      "          )\n",
      "          (conv2): Sequential(\n",
      "            (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "            (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "          )\n",
      "          (relu): ReLU()\n",
      "        )\n",
      "      )\n",
      "      (pooling): AdaptiveAvgPool2d(output_size=1)\n",
      "    )\n",
      "  )\n",
      "  (fc): Linear(in_features=64, out_features=4, bias=True)\n",
      ")\n",
      "Metrics: ['accuracy', 'f1_macro', 'f1_micro']\n",
      "Device: cuda\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyhealth.trainer import Trainer\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    metrics=[\"accuracy\", \"f1_macro\", \"f1_micro\"],\n",
    "    device=str(device),\n",
    "    enable_logging=False,\n",
    " )\n",
    "\n",
    "training_config = {\n",
    "    \"epochs\": 3,\n",
    "    \"optimizer_params\": {\"lr\": 1e-3},\n",
    "    \"max_grad_norm\": 5.0,\n",
    "    \"monitor\": \"accuracy\",\n",
    "    \"monitor_criterion\": \"max\",\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18480f33",
   "metadata": {},
   "source": [
    "# 8. Train the Model\n",
    "Launch the training loop with optional validation monitoring for early diagnostics."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "793ae009",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training:\n",
      "Batch size: 32\n",
      "Optimizer: <class 'torch.optim.adam.Adam'>\n",
      "Optimizer params: {'lr': 0.001}\n",
      "Weight decay: 0.0\n",
      "Max grad norm: 5.0\n",
      "Val dataloader: <torch.utils.data.dataloader.DataLoader object at 0x7fac7a919a90>\n",
      "Monitor: accuracy\n",
      "Monitor criterion: max\n",
      "Epochs: 3\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0 / 3: 100%|██████████| 463/463 [02:12<00:00,  3.50it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Train epoch-0, step-463 ---\n",
      "loss: 0.9918\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluation: 100%|██████████| 67/67 [00:03<00:00, 18.46it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Eval epoch-0, step-463 ---\n",
      "accuracy: 0.4766\n",
      "f1_macro: 0.1628\n",
      "f1_micro: 0.4766\n",
      "loss: 1.4306\n",
      "New best accuracy score (0.4766) at epoch-0, step-463\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Epoch 1 / 3: 100%|██████████| 463/463 [02:12<00:00,  3.49it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Train epoch-1, step-926 ---\n",
      "loss: 0.8374\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluation: 100%|██████████| 67/67 [00:03<00:00, 18.75it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Eval epoch-1, step-926 ---\n",
      "accuracy: 0.7171\n",
      "f1_macro: 0.6673\n",
      "f1_micro: 0.7171\n",
      "loss: 0.7513\n",
      "New best accuracy score (0.7171) at epoch-1, step-926\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Epoch 2 / 3: 100%|██████████| 463/463 [02:12<00:00,  3.49it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Train epoch-2, step-1389 ---\n",
      "loss: 0.7259\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluation: 100%|██████████| 67/67 [00:03<00:00, 18.65it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- Eval epoch-2, step-1389 ---\n",
      "accuracy: 0.6802\n",
      "f1_macro: 0.6627\n",
      "f1_micro: 0.6802\n",
      "loss: 0.8749\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "train_kwargs = dict(training_config)\n",
    "if val_loader is None:\n",
    "    train_kwargs.pop(\"monitor\", None)\n",
    "    train_kwargs.pop(\"monitor_criterion\", None)\n",
    "\n",
    "trainer.train(\n",
    "    train_dataloader=train_loader,\n",
    "    val_dataloader=val_loader,\n",
    "    **train_kwargs,\n",
    " )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7272fad1",
   "metadata": {},
   "source": [
    "# 9. Evaluate on Validation/Test Splits\n",
    "Compute accuracy and F1 scores on the held-out loaders to assess generalisation."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "48ce1ee1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluation: 100%|██████████| 67/67 [00:03<00:00, 18.60it/s]\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation metrics: accuracy=0.6802, f1_macro=0.6627, f1_micro=0.6802, loss=0.8749\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluation: 100%|██████████| 133/133 [00:08<00:00, 14.79it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test metrics: accuracy=0.6674, f1_macro=0.6568, f1_micro=0.6674, loss=0.9159\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "evaluation_results = {}\n",
    "for split_name, loader in {\"validation\": val_loader, \"test\": test_loader}.items():\n",
    "    if loader is None:\n",
    "        continue\n",
    "    metrics = trainer.evaluate(loader)\n",
    "    evaluation_results[split_name] = metrics\n",
    "    formatted = \", \".join(f\"{k}={v:.4f}\" for k, v in metrics.items())\n",
    "    print(f\"{split_name.title()} metrics: {formatted}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "17dbfae0",
   "metadata": {},
   "source": [
    "# 10. Inspect Sample Predictions\n",
    "Run an inference pass and preview top predictions alongside their probabilities."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "79fc4483",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluation: 100%|██████████| 67/67 [00:03<00:00, 18.60it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Mean loss: 0.8749\n",
      "{'true_index': 2, 'true_label': 'Normal', 'pred_index': 1, 'pred_label': 'Lung Opacity', 'pred_prob': 0.8432581424713135}\n",
      "{'true_index': 0, 'true_label': 'COVID', 'pred_index': 1, 'pred_label': 'Lung Opacity', 'pred_prob': 0.7460941672325134}\n",
      "{'true_index': 2, 'true_label': 'Normal', 'pred_index': 1, 'pred_label': 'Lung Opacity', 'pred_prob': 0.49037978053092957}\n",
      "{'true_index': 0, 'true_label': 'COVID', 'pred_index': 1, 'pred_label': 'Lung Opacity', 'pred_prob': 0.6506214141845703}\n",
      "{'true_index': 2, 'true_label': 'Normal', 'pred_index': 1, 'pred_label': 'Lung Opacity', 'pred_prob': 0.9451408386230469}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "target_loader = val_loader if val_loader is not None else train_loader\n",
    "\n",
    "y_true, y_prob, mean_loss = trainer.inference(target_loader)\n",
    "top_indices = y_prob.argmax(axis=-1)\n",
    "preview = []\n",
    "for i, (true_idx, pred_idx) in enumerate(zip(y_true[:5], top_indices[:5])):\n",
    "    prob = float(y_prob[i, pred_idx])\n",
    "    preview.append({\n",
    "        \"true_index\": int(true_idx),\n",
    "        \"true_label\": IDX_TO_LABEL[int(true_idx)],\n",
    "        \"pred_index\": int(pred_idx),\n",
    "        \"pred_label\": IDX_TO_LABEL[int(pred_idx)],\n",
    "        \"pred_prob\": prob,\n",
    "    })\n",
    "\n",
    "print(f\"Mean loss: {mean_loss:.4f}\")\n",
    "for sample in preview:\n",
    "    print(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a15f9c0d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pyhealth",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
