{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "# MoleRec Model Training on MIMIC-III Dataset\n",
        "\n",
        "Train the MoleRec model for medication recommendation on the MIMIC-III dataset.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {},
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "/usr/local/Caskroom/miniforge/base/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
            "  from .autonotebook import tqdm as notebook_tqdm\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "No config path provided, using default config\n",
            "Initializing mimic3 dataset from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III (dev mode: True)\n",
            "Scanning table: patients from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PATIENTS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PATIENTS.csv\n",
            "Some column names were converted to lowercase\n",
            "Scanning table: admissions from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv\n",
            "Some column names were converted to lowercase\n",
            "Scanning table: icustays from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ICUSTAYS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ICUSTAYS.csv\n",
            "Some column names were converted to lowercase\n",
            "Scanning table: diagnoses_icd from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/DIAGNOSES_ICD.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/DIAGNOSES_ICD.csv\n",
            "Some column names were converted to lowercase\n",
            "Joining with table: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv\n",
            "Scanning table: procedures_icd from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PROCEDURES_ICD.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PROCEDURES_ICD.csv\n",
            "Some column names were converted to lowercase\n",
            "Joining with table: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv\n",
            "Scanning table: prescriptions from https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PRESCRIPTIONS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/PRESCRIPTIONS.csv\n",
            "Some column names were converted to lowercase\n",
            "Joining with table: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv.gz\n",
            "Original path does not exist. Using alternative: https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III/ADMISSIONS.csv\n",
            "Collecting global event dataframe...\n",
            "Dev mode enabled: limiting to 1000 patients\n",
            "Collected dataframe with shape: (43082, 49)\n",
            "Dataset: mimic3\n",
            "Dev mode: True\n",
            "Number of patients: 1000\n",
            "Number of events: 43082\n"
          ]
        }
      ],
      "source": [
        "from pyhealth.datasets import MIMIC3Dataset\n",
        "\n",
        "dataset = MIMIC3Dataset(\n",
        "    root=\"https://storage.googleapis.com/pyhealth/Synthetic_MIMIC-III\",\n",
        "    tables=[\"DIAGNOSES_ICD\", \"PROCEDURES_ICD\", \"PRESCRIPTIONS\"],\n",
        "    dev=True,\n",
        ")\n",
        "dataset.stats()\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Set Drug Recommendation Task\n",
        "\n",
        "Use the `DrugRecommendationMIMIC3` task function which creates samples with conditions, procedures, and atc-3 codes (drugs).\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Setting task DrugRecommendationMIMIC3 for mimic3 base dataset...\n",
            "Generating samples with 4 worker(s)...\n",
            "Generating samples for DrugRecommendationMIMIC3 with 4 workers\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Collecting samples for DrugRecommendationMIMIC3 from 4 workers: 100%|██████████| 1000/1000 [00:00<00:00, 2908.84it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Label drugs vocab: {'*NF*': 0, '1/2 ': 1, 'Acet': 2, 'Acyc': 3, 'Albu': 4, 'Alem': 5, 'Allo': 6, 'Alte': 7, 'Alum': 8, 'Ambi': 9, 'Amio': 10, 'Amlo': 11, 'Amox': 12, 'Amph': 13, 'Arip': 14, 'Arti': 15, 'Asco': 16, 'Aspi': 17, 'Aten': 18, 'Ator': 19, 'Atov': 20, 'Atro': 21, 'Azat': 22, 'Azit': 23, 'Bisa': 24, 'BuPR': 25, 'Bupi': 26, 'Calc': 27, 'Capt': 28, 'Carv': 29, 'Casp': 30, 'Cefa': 31, 'Cefe': 32, 'Ceft': 33, 'Cele': 34, 'Cepa': 35, 'Ceph': 36, 'Chlo': 37, 'Cilo': 38, 'Cipr': 39, 'Cisa': 40, 'Cita': 41, 'Clin': 42, 'Clon': 43, 'Clop': 44, 'Colc': 45, 'Cyan': 46, 'Cycl': 47, 'D10W': 48, 'D5 1': 49, 'D5NS': 50, 'D5W': 51, 'D5W ': 52, 'DOBU': 53, 'Daki': 54, 'Desm': 55, 'Dexa': 56, 'Dexm': 57, 'Dext': 58, 'Diaz': 59, 'Digo': 60, 'Dilt': 61, 'Diph': 62, 'Dipy': 63, 'Docu': 64, 'Dola': 65, 'Done': 66, 'DopA': 67, 'Dorz': 68, 'Doxy': 69, 'Drop': 70, 'Emtr': 71, 'Enal': 72, 'Epin': 73, 'Epoe': 74, 'Epti': 75, 'Eryt': 76, 'Famo': 77, 'Fent': 78, 'Ferr': 79, 'Fexo': 80, 'Fish': 81, 'Flec': 82, 'Fluc': 83, 'Fluo': 84, 'Flut': 85, 'FoLI': 86, 'Foli': 87, 'Furo': 88, 'Gaba': 89, 'Gluc': 90, 'GlyB': 91, 'Glyb': 92, 'Glyc': 93, 'Guai': 94, 'HEPA': 95, 'HYDR': 96, 'Halo': 97, 'Hepa': 98, 'Hesp': 99, 'Humu': 100, 'Hydr': 101, 'Ibup': 102, 'Infl': 103, 'Insu': 104, 'Ipra': 105, 'Isos': 106, 'Keto': 107, 'LR': 108, 'Labe': 109, 'Lact': 110, 'Lami': 111, 'Lans': 112, 'Lepi': 113, 'Leve': 114, 'Levo': 115, 'Lido': 116, 'Line': 117, 'Lisi': 118, 'Lora': 119, 'Magn': 120, 'Mann': 121, 'Mepe': 122, 'Mero': 123, 'MetR': 124, 'Meth': 125, 'Meto': 126, 'Metr': 127, 'Mico': 128, 'Mida': 129, 'Milk': 130, 'Milr': 131, 'Mira': 132, 'Mirt': 133, 'Mont': 134, 'Morp': 135, 'Mult': 136, 'Mupi': 137, 'NS': 138, 'NS (': 139, 'Nado': 140, 'Nafc': 141, 'Nalo': 142, 'Neos': 143, 'Neph': 144, 'Neut': 145, 'Nico': 146, 'Nitr': 147, 'Nore': 148, 'Nyst': 149, 'Olan': 150, 'Omep': 151, 'Onda': 152, 'Oxac': 153, 'Oxaz': 154, 'Oxyc': 155, 'Pant': 156, 'Papa': 157, 'Phen': 158, 'Phyt': 159, 'Piog': 160, 'Pipe': 161, 'Pneu': 162, 'Poly': 163, 'Pota': 164, 'Prav': 165, 'Pred': 166, 'Proc': 167, 'Prom': 168, 'Prop': 169, 'Prot': 170, 'Pyri': 171, 'Quet': 172, 'Quin': 173, 'Rani': 174, 'Rifa': 175, 'Ritu': 176, 'Sarn': 177, 'Senn': 178, 'Sert': 179, 'Seve': 180, 'Sime': 181, 'Siro': 182, 'Sodi': 183, 'Spir': 184, 'Stav': 185, 'Sucr': 186, 'Sulf': 187, 'Tacr': 188, 'Tams': 189, 'Tema': 190, 'Thia': 191, 'Tiot': 192, 'Tiza': 193, 'Tors': 194, 'TraM': 195, 'Tube': 196, 'Tyle': 197, 'Unas': 198, 'Vals': 199, 'Vanc': 200, 'Vecu': 201, 'Vera': 202, 'Vita': 203, 'Warf': 204, 'Zinc': 205, 'Zolp': 206, 'feno': 207, 'traM': 208, 'traZ': 209}\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Processing samples: 100%|██████████| 38/38 [00:00<00:00, 1234.77it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Generated 38 samples for task DrugRecommendationMIMIC3\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Sample Dataset Statistics:\n",
            "\t- Dataset: mimic3\n",
            "\t- Task: <pyhealth.tasks.drug_recommendation.DrugRecommendationMIMIC3 object at 0x130730bf0>\n",
            "\t- Number of samples: 38\n",
            "\n",
            "First sample structure:\n",
            "Patient ID: 1033\n",
            "Number of visits: 1\n",
            "Sample conditions (first visit): tensor([1, 2, 3, 4, 5])...\n",
            "Sample procedures (first visit): tensor([1, 2, 3, 4, 0])...\n",
            "Sample drugs (target): tensor([0., 0., 1., 0., 1., 1., 0., 0., 0., 0.])...\n"
          ]
        }
      ],
      "source": [
        "from pyhealth.tasks import DrugRecommendationMIMIC3\n",
        "\n",
        "task = DrugRecommendationMIMIC3()\n",
        "samples = dataset.set_task(task, num_workers=4)\n",
        "\n",
        "print(f\"Sample Dataset Statistics:\")\n",
        "print(f\"\\t- Dataset: {samples.dataset_name}\")\n",
        "print(f\"\\t- Task: {samples.task_name}\")\n",
        "print(f\"\\t- Number of samples: {len(samples)}\")\n",
        "\n",
        "print(\"\\nFirst sample structure:\")\n",
        "print(f\"Patient ID: {samples.samples[0]['patient_id']}\")\n",
        "print(f\"Number of visits: {len(samples.samples[0]['conditions'])}\")\n",
        "print(f\"Sample conditions (first visit): {samples.samples[0]['conditions'][0][:5]}...\")\n",
        "print(f\"Sample procedures (first visit): {samples.samples[0]['procedures'][0][:5]}...\")\n",
        "print(f\"Sample drugs (target): {samples.samples[0]['drugs'][:10]}...\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Split Dataset and Create Data Loaders\n",
        "\n",
        "Split the dataset by patient to ensure no data leakage between train/validation/test sets.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Train samples: 26\n",
            "Validation samples: 4\n",
            "Test samples: 8\n"
          ]
        }
      ],
      "source": [
        "from pyhealth.datasets import split_by_patient, get_dataloader\n",
        "\n",
        "train_dataset, val_dataset, test_dataset = split_by_patient(\n",
        "    samples, ratios=[0.7, 0.1, 0.2]\n",
        ")\n",
        "\n",
        "print(f\"Train samples: {len(train_dataset)}\")\n",
        "print(f\"Validation samples: {len(val_dataset)}\")\n",
        "print(f\"Test samples: {len(test_dataset)}\")\n",
        "\n",
        "train_dataloader = get_dataloader(train_dataset, batch_size=32, shuffle=True)\n",
        "val_dataloader = get_dataloader(val_dataset, batch_size=32, shuffle=False)\n",
        "test_dataloader = get_dataloader(test_dataset, batch_size=32, shuffle=False)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Initialize MoleRec Model\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {},
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "/Users/arjunchatterjee/PyHealth/pyhealth/sampler/sage_sampler.py:3: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n",
            "  import pkg_resources\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "MoleRec(\n",
            "  (dropout_fn): Dropout(p=0.5, inplace=False)\n",
            "  (embedding_model): EmbeddingModel(embedding_layers=ModuleDict(\n",
            "    (conditions): Embedding(198, 64)\n",
            "    (procedures): Embedding(99, 64)\n",
            "    (drugs_hist): Embedding(171, 64)\n",
            "  ))\n",
            "  (substructure_graphs): StaticParaDict()\n",
            "  (molecule_graphs): StaticParaDict()\n",
            "  (rnns): ModuleDict(\n",
            "    (conditions): GRU(64, 64, batch_first=True)\n",
            "    (procedures): GRU(64, 64, batch_first=True)\n",
            "  )\n",
            "  (substructure_relation): Sequential(\n",
            "    (0): ReLU()\n",
            "    (1): Linear(in_features=128, out_features=64, bias=True)\n",
            "    (2): ReLU()\n",
            "    (3): Linear(in_features=64, out_features=1, bias=True)\n",
            "  )\n",
            "  (layer): MoleRecLayer(\n",
            "    (substructure_encoder): GINGraph(\n",
            "      (atom_encoder): AtomEncoder(\n",
            "        (atom_embedding_list): ModuleList(\n",
            "          (0): Embedding(119, 64)\n",
            "          (1): Embedding(5, 64)\n",
            "          (2-3): 2 x Embedding(12, 64)\n",
            "          (4): Embedding(10, 64)\n",
            "          (5-6): 2 x Embedding(6, 64)\n",
            "          (7-8): 2 x Embedding(2, 64)\n",
            "        )\n",
            "      )\n",
            "      (convs): ModuleList(\n",
            "        (0-3): 4 x GINConv(\n",
            "          (mlp): Sequential(\n",
            "            (0): Linear(in_features=64, out_features=128, bias=True)\n",
            "            (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "            (2): ReLU()\n",
            "            (3): Linear(in_features=128, out_features=64, bias=True)\n",
            "          )\n",
            "          (bond_encoder): BondEncoder(\n",
            "            (bond_embedding_list): ModuleList(\n",
            "              (0): Embedding(5, 64)\n",
            "              (1): Embedding(6, 64)\n",
            "              (2): Embedding(2, 64)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "      (batch_norms): ModuleList(\n",
            "        (0-3): 4 x BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      )\n",
            "      (dropout_fun): Dropout(p=0.5, inplace=False)\n",
            "    )\n",
            "    (molecule_encoder): GINGraph(\n",
            "      (atom_encoder): AtomEncoder(\n",
            "        (atom_embedding_list): ModuleList(\n",
            "          (0): Embedding(119, 64)\n",
            "          (1): Embedding(5, 64)\n",
            "          (2-3): 2 x Embedding(12, 64)\n",
            "          (4): Embedding(10, 64)\n",
            "          (5-6): 2 x Embedding(6, 64)\n",
            "          (7-8): 2 x Embedding(2, 64)\n",
            "        )\n",
            "      )\n",
            "      (convs): ModuleList(\n",
            "        (0-3): 4 x GINConv(\n",
            "          (mlp): Sequential(\n",
            "            (0): Linear(in_features=64, out_features=128, bias=True)\n",
            "            (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "            (2): ReLU()\n",
            "            (3): Linear(in_features=128, out_features=64, bias=True)\n",
            "          )\n",
            "          (bond_encoder): BondEncoder(\n",
            "            (bond_embedding_list): ModuleList(\n",
            "              (0): Embedding(5, 64)\n",
            "              (1): Embedding(6, 64)\n",
            "              (2): Embedding(2, 64)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "      (batch_norms): ModuleList(\n",
            "        (0-3): 4 x BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      )\n",
            "      (dropout_fun): Dropout(p=0.5, inplace=False)\n",
            "    )\n",
            "    (substructure_interaction_module): SAB(\n",
            "      (net): MAB(\n",
            "        (Qdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Kdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Vdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Odense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (ln1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
            "        (ln2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
            "      )\n",
            "    )\n",
            "    (combination_feature_aggregator): AttnAgg(\n",
            "      (Qdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "      (Kdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "    )\n",
            "    (score_extractor): Sequential(\n",
            "      (0): Linear(in_features=64, out_features=32, bias=True)\n",
            "      (1): ReLU()\n",
            "      (2): Linear(in_features=32, out_features=1, bias=True)\n",
            "    )\n",
            "  )\n",
            ")\n"
          ]
        }
      ],
      "source": [
        "from pyhealth.models import MoleRec\n",
        "\n",
        "model = MoleRec(\n",
        "    dataset=samples,\n",
        "    embedding_dim=64,\n",
        "    hidden_dim=64,\n",
        "    num_rnn_layers=1,\n",
        "    num_gnn_layers=4,\n",
        "    dropout=0.5,\n",
        ")\n",
        "\n",
        "print(model)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Initialize Trainer\n",
        "\n",
        "We use jaccard similarity, f1 score, pr_auc, ddi score.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "MoleRec(\n",
            "  (dropout_fn): Dropout(p=0.5, inplace=False)\n",
            "  (embedding_model): EmbeddingModel(embedding_layers=ModuleDict(\n",
            "    (conditions): Embedding(198, 64)\n",
            "    (procedures): Embedding(99, 64)\n",
            "    (drugs_hist): Embedding(171, 64)\n",
            "  ))\n",
            "  (substructure_graphs): StaticParaDict()\n",
            "  (molecule_graphs): StaticParaDict()\n",
            "  (rnns): ModuleDict(\n",
            "    (conditions): GRU(64, 64, batch_first=True)\n",
            "    (procedures): GRU(64, 64, batch_first=True)\n",
            "  )\n",
            "  (substructure_relation): Sequential(\n",
            "    (0): ReLU()\n",
            "    (1): Linear(in_features=128, out_features=64, bias=True)\n",
            "    (2): ReLU()\n",
            "    (3): Linear(in_features=64, out_features=1, bias=True)\n",
            "  )\n",
            "  (layer): MoleRecLayer(\n",
            "    (substructure_encoder): GINGraph(\n",
            "      (atom_encoder): AtomEncoder(\n",
            "        (atom_embedding_list): ModuleList(\n",
            "          (0): Embedding(119, 64)\n",
            "          (1): Embedding(5, 64)\n",
            "          (2-3): 2 x Embedding(12, 64)\n",
            "          (4): Embedding(10, 64)\n",
            "          (5-6): 2 x Embedding(6, 64)\n",
            "          (7-8): 2 x Embedding(2, 64)\n",
            "        )\n",
            "      )\n",
            "      (convs): ModuleList(\n",
            "        (0-3): 4 x GINConv(\n",
            "          (mlp): Sequential(\n",
            "            (0): Linear(in_features=64, out_features=128, bias=True)\n",
            "            (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "            (2): ReLU()\n",
            "            (3): Linear(in_features=128, out_features=64, bias=True)\n",
            "          )\n",
            "          (bond_encoder): BondEncoder(\n",
            "            (bond_embedding_list): ModuleList(\n",
            "              (0): Embedding(5, 64)\n",
            "              (1): Embedding(6, 64)\n",
            "              (2): Embedding(2, 64)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "      (batch_norms): ModuleList(\n",
            "        (0-3): 4 x BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      )\n",
            "      (dropout_fun): Dropout(p=0.5, inplace=False)\n",
            "    )\n",
            "    (molecule_encoder): GINGraph(\n",
            "      (atom_encoder): AtomEncoder(\n",
            "        (atom_embedding_list): ModuleList(\n",
            "          (0): Embedding(119, 64)\n",
            "          (1): Embedding(5, 64)\n",
            "          (2-3): 2 x Embedding(12, 64)\n",
            "          (4): Embedding(10, 64)\n",
            "          (5-6): 2 x Embedding(6, 64)\n",
            "          (7-8): 2 x Embedding(2, 64)\n",
            "        )\n",
            "      )\n",
            "      (convs): ModuleList(\n",
            "        (0-3): 4 x GINConv(\n",
            "          (mlp): Sequential(\n",
            "            (0): Linear(in_features=64, out_features=128, bias=True)\n",
            "            (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "            (2): ReLU()\n",
            "            (3): Linear(in_features=128, out_features=64, bias=True)\n",
            "          )\n",
            "          (bond_encoder): BondEncoder(\n",
            "            (bond_embedding_list): ModuleList(\n",
            "              (0): Embedding(5, 64)\n",
            "              (1): Embedding(6, 64)\n",
            "              (2): Embedding(2, 64)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "      (batch_norms): ModuleList(\n",
            "        (0-3): 4 x BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      )\n",
            "      (dropout_fun): Dropout(p=0.5, inplace=False)\n",
            "    )\n",
            "    (substructure_interaction_module): SAB(\n",
            "      (net): MAB(\n",
            "        (Qdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Kdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Vdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (Odense): Linear(in_features=64, out_features=64, bias=True)\n",
            "        (ln1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
            "        (ln2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
            "      )\n",
            "    )\n",
            "    (combination_feature_aggregator): AttnAgg(\n",
            "      (Qdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "      (Kdense): Linear(in_features=64, out_features=64, bias=True)\n",
            "    )\n",
            "    (score_extractor): Sequential(\n",
            "      (0): Linear(in_features=64, out_features=32, bias=True)\n",
            "      (1): ReLU()\n",
            "      (2): Linear(in_features=32, out_features=1, bias=True)\n",
            "    )\n",
            "  )\n",
            ")\n",
            "Metrics: ['jaccard_samples', 'f1_samples', 'pr_auc_samples', 'ddi']\n",
            "Device: cpu\n",
            "\n",
            "Baseline performance before training:\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Evaluation:   0%|          | 0/1 [00:00<?, ?it/s]"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00,  1.76it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "{'jaccard_samples': 0.1686518337947108, 'f1_samples': 0.2766813916994273, 'pr_auc_samples': 0.18212385799674458, 'ddi_score': 0.0, 'loss': 0.697697639465332}\n"
          ]
        }
      ],
      "source": [
        "from pyhealth.trainer import Trainer\n",
        "\n",
        "trainer = Trainer(\n",
        "    model=model,\n",
        "    metrics=[\"jaccard_samples\", \"f1_samples\", \"pr_auc_samples\", \"ddi\"],\n",
        ")\n",
        "\n",
        "print(\"Baseline performance before training:\")\n",
        "baseline_results = trainer.evaluate(test_dataloader)\n",
        "print(baseline_results)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Train the Model\n",
        "\n",
        "Train the model for a few epochs. I used 5 epochs here. Might need to train for more epochs in prod.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 6,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Training:\n",
            "Batch size: 32\n",
            "Optimizer: <class 'torch.optim.adam.Adam'>\n",
            "Optimizer params: {'lr': 0.0001}\n",
            "Weight decay: 0.0\n",
            "Max grad norm: None\n",
            "Val dataloader: <torch.utils.data.dataloader.DataLoader object at 0x1318addc0>\n",
            "Monitor: pr_auc_samples\n",
            "Monitor criterion: max\n",
            "Epochs: 5\n",
            "\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Epoch 0 / 5: 100%|██████████| 1/1 [00:00<00:00, 19.24it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Train epoch-0, step-1 ---\n",
            "loss: 0.7093\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 266.58it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Eval epoch-0, step-1 ---\n",
            "jaccard_samples: 0.1445\n",
            "f1_samples: 0.2449\n",
            "pr_auc_samples: 0.1699\n",
            "ddi_score: 0.0000\n",
            "loss: 0.6902\n",
            "New best pr_auc_samples score (0.1699) at epoch-0, step-1\n",
            "\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Epoch 1 / 5: 100%|██████████| 1/1 [00:00<00:00, 94.00it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Train epoch-1, step-2 ---\n",
            "loss: 0.7081\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 254.57it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Eval epoch-1, step-2 ---\n",
            "jaccard_samples: 0.1445\n",
            "f1_samples: 0.2449\n",
            "pr_auc_samples: 0.1698\n",
            "ddi_score: 0.0000\n",
            "loss: 0.6879\n",
            "\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Epoch 2 / 5: 100%|██████████| 1/1 [00:00<00:00, 76.76it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Train epoch-2, step-3 ---\n",
            "loss: 0.6987\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 210.76it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Eval epoch-2, step-3 ---\n",
            "jaccard_samples: 0.1445\n",
            "f1_samples: 0.2449\n",
            "pr_auc_samples: 0.1706\n",
            "ddi_score: 0.0000\n",
            "loss: 0.6856\n",
            "New best pr_auc_samples score (0.1706) at epoch-2, step-3\n",
            "\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Epoch 3 / 5: 100%|██████████| 1/1 [00:00<00:00, 87.71it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Train epoch-3, step-4 ---\n",
            "loss: 0.7032\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 279.96it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Eval epoch-3, step-4 ---\n",
            "jaccard_samples: 0.1447\n",
            "f1_samples: 0.2452\n",
            "pr_auc_samples: 0.1698\n",
            "ddi_score: 0.0000\n",
            "loss: 0.6834\n",
            "\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Epoch 4 / 5: 100%|██████████| 1/1 [00:00<00:00, 88.91it/s]"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Train epoch-4, step-5 ---\n",
            "loss: 0.6947\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "\n",
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 246.07it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "--- Eval epoch-4, step-5 ---\n",
            "jaccard_samples: 0.1450\n",
            "f1_samples: 0.2456\n",
            "pr_auc_samples: 0.1705\n",
            "ddi_score: 0.0000\n",
            "loss: 0.6811\n",
            "Loaded best model\n"
          ]
        }
      ],
      "source": [
        "trainer.train(\n",
        "    train_dataloader=train_dataloader,\n",
        "    val_dataloader=val_dataloader,\n",
        "    epochs=5,\n",
        "    monitor=\"pr_auc_samples\",\n",
        "    optimizer_params={\"lr\": 1e-4},\n",
        ")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Evaluate on Test Set\n",
        "\n",
        "Evaluate the trained model on the test set to see final performance metrics.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {},
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Evaluation: 100%|██████████| 1/1 [00:00<00:00, 195.81it/s]\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Final test set performance:\n",
            "{'jaccard_samples': 0.16870862480379736, 'f1_samples': 0.2767764829870635, 'pr_auc_samples': 0.18286476395431256, 'ddi_score': 0.0, 'loss': 0.6917362809181213}\n",
            "\n",
            "Key Metrics:\n",
            "  PR-AUC: 0.1829\n",
            "  F1 Score: 0.2768\n",
            "  Jaccard: 0.1687\n",
            "  DDI Rate: 0.0000 (lower is better)\n"
          ]
        }
      ],
      "source": [
        "test_results = trainer.evaluate(test_dataloader)\n",
        "print(\"Final test set performance:\")\n",
        "print(test_results)\n",
        "\n",
        "print(f\"\\nKey Metrics:\")\n",
        "print(f\"  PR-AUC: {test_results.get('pr_auc_samples', 'N/A'):.4f}\")\n",
        "print(f\"  F1 Score: {test_results.get('f1_samples', 'N/A'):.4f}\")\n",
        "print(f\"  Jaccard: {test_results.get('jaccard_samples', 'N/A'):.4f}\")\n",
        "print(f\"  DDI Rate: {test_results.get('ddi_score', 'N/A'):.4f} (lower is better)\")\n"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "base",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.12.11"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 2
}
