{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b33f85e7",
   "metadata": {},
   "source": [
    "# Atom and Bond Prediction"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5267af8d",
   "metadata": {},
   "source": [
    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/chemprop/chemprop/blob/main/examples/mol_atom_bond.ipynb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3f163e4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Install chemprop from GitHub if running in Google Colab\n",
    "import os\n",
    "\n",
    "if os.getenv(\"COLAB_RELEASE_TAG\"):\n",
    "    try:\n",
    "        import chemprop\n",
    "    except ImportError:\n",
    "        !git clone https://github.com/chemprop/chemprop.git\n",
    "        %cd chemprop\n",
    "        !pip install .\n",
    "        %cd examples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ab97f25b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import ast\n",
    "from pathlib import Path\n",
    "\n",
    "from lightning import pytorch as pl\n",
    "from lightning.pytorch.callbacks import ModelCheckpoint\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "\n",
    "from chemprop import data, featurizers, models, nn\n",
    "\n",
    "chemprop_dir = Path.cwd().parent\n",
    "data_dir = chemprop_dir / \"tests\" / \"data\" / \"mol_atom_bond\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "51a72a0c",
   "metadata": {},
   "source": [
    "This notebook shows how to use Chemprop to fit models on atom and bond property data. One model can predict molecule-, atom-, and bond-level properties at the same time. "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "614e2585",
   "metadata": {},
   "source": [
    "## Make datapoints"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bfe34443",
   "metadata": {},
   "source": [
    "The atom and bond targets are saved as strings that look like lists. This example uses regression targets, but classification (including multiclass) is also supported."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "4b41eb5e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>smiles</th>\n",
       "      <th>mol_y1</th>\n",
       "      <th>mol_y2</th>\n",
       "      <th>atom_y1</th>\n",
       "      <th>atom_y2</th>\n",
       "      <th>bond_y1</th>\n",
       "      <th>bond_y2</th>\n",
       "      <th>weight</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>[H][H]</td>\n",
       "      <td>2.016</td>\n",
       "      <td>2.0</td>\n",
       "      <td>[1, 1]</td>\n",
       "      <td>[1.008, 1.008]</td>\n",
       "      <td>[2]</td>\n",
       "      <td>[-2]</td>\n",
       "      <td>0.090909</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>C</td>\n",
       "      <td>16.043</td>\n",
       "      <td>1.0</td>\n",
       "      <td>[6]</td>\n",
       "      <td>[12.011]</td>\n",
       "      <td>[]</td>\n",
       "      <td>[]</td>\n",
       "      <td>0.181818</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>CN</td>\n",
       "      <td>31.058</td>\n",
       "      <td>2.0</td>\n",
       "      <td>[6, 7]</td>\n",
       "      <td>[12.011, 14.007]</td>\n",
       "      <td>[13]</td>\n",
       "      <td>[-13]</td>\n",
       "      <td>0.272727</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>CN</td>\n",
       "      <td>31.058</td>\n",
       "      <td>NaN</td>\n",
       "      <td>[6, 7]</td>\n",
       "      <td>[None, 14.007]</td>\n",
       "      <td>[13]</td>\n",
       "      <td>[None]</td>\n",
       "      <td>0.363636</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>CC</td>\n",
       "      <td>30.070</td>\n",
       "      <td>2.0</td>\n",
       "      <td>[6, 6]</td>\n",
       "      <td>[12.011, 12.011]</td>\n",
       "      <td>[12]</td>\n",
       "      <td>[-12]</td>\n",
       "      <td>0.454545</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>[CH2:3]=[N+:1]([H:4])[H:2]</td>\n",
       "      <td>30.050</td>\n",
       "      <td>4.0</td>\n",
       "      <td>[7, 1, 6, 1]</td>\n",
       "      <td>[14.007, 1.008, 12.011, 1.008]</td>\n",
       "      <td>[13, 8, 8]</td>\n",
       "      <td>[-13, -8, -8]</td>\n",
       "      <td>0.545455</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>CCCC</td>\n",
       "      <td>58.124</td>\n",
       "      <td>4.0</td>\n",
       "      <td>[6, 6, 6, 6]</td>\n",
       "      <td>[12.011, 12.011, 12.011, 12.011]</td>\n",
       "      <td>[12, 12, 12]</td>\n",
       "      <td>[-12, -12, -12]</td>\n",
       "      <td>0.636364</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>CO</td>\n",
       "      <td>32.042</td>\n",
       "      <td>2.0</td>\n",
       "      <td>[6, 8]</td>\n",
       "      <td>[12.011, 15.999]</td>\n",
       "      <td>[14]</td>\n",
       "      <td>[-14]</td>\n",
       "      <td>0.727273</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>CC#N</td>\n",
       "      <td>41.053</td>\n",
       "      <td>3.0</td>\n",
       "      <td>[6, 6, 7]</td>\n",
       "      <td>[12.011, 12.011, 14.007]</td>\n",
       "      <td>[12, 13]</td>\n",
       "      <td>[-12, -13]</td>\n",
       "      <td>0.818182</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>C1NN1</td>\n",
       "      <td>44.057</td>\n",
       "      <td>3.0</td>\n",
       "      <td>[6, 7, 7]</td>\n",
       "      <td>[12.011, 14.007, 14.007]</td>\n",
       "      <td>[13, 14, 13]</td>\n",
       "      <td>[-13, -14, -13]</td>\n",
       "      <td>0.909091</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>c1cc[n-]c1</td>\n",
       "      <td>66.083</td>\n",
       "      <td>5.0</td>\n",
       "      <td>[6, 6, 6, 7, 6]</td>\n",
       "      <td>[12.011, 12.011, 12.011, 14.007, 12.011]</td>\n",
       "      <td>[12, 12, 13, 13, 12]</td>\n",
       "      <td>[-12, -12, -13, -13, -12]</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                        smiles  mol_y1  mol_y2          atom_y1  \\\n",
       "0                       [H][H]   2.016     2.0           [1, 1]   \n",
       "1                            C  16.043     1.0              [6]   \n",
       "2                           CN  31.058     2.0           [6, 7]   \n",
       "3                           CN  31.058     NaN           [6, 7]   \n",
       "4                           CC  30.070     2.0           [6, 6]   \n",
       "5   [CH2:3]=[N+:1]([H:4])[H:2]  30.050     4.0     [7, 1, 6, 1]   \n",
       "6                         CCCC  58.124     4.0     [6, 6, 6, 6]   \n",
       "7                           CO  32.042     2.0           [6, 8]   \n",
       "8                         CC#N  41.053     3.0        [6, 6, 7]   \n",
       "9                        C1NN1  44.057     3.0        [6, 7, 7]   \n",
       "10                  c1cc[n-]c1  66.083     5.0  [6, 6, 6, 7, 6]   \n",
       "\n",
       "                                     atom_y2               bond_y1  \\\n",
       "0                             [1.008, 1.008]                   [2]   \n",
       "1                                   [12.011]                    []   \n",
       "2                           [12.011, 14.007]                  [13]   \n",
       "3                             [None, 14.007]                  [13]   \n",
       "4                           [12.011, 12.011]                  [12]   \n",
       "5             [14.007, 1.008, 12.011, 1.008]            [13, 8, 8]   \n",
       "6           [12.011, 12.011, 12.011, 12.011]          [12, 12, 12]   \n",
       "7                           [12.011, 15.999]                  [14]   \n",
       "8                   [12.011, 12.011, 14.007]              [12, 13]   \n",
       "9                   [12.011, 14.007, 14.007]          [13, 14, 13]   \n",
       "10  [12.011, 12.011, 12.011, 14.007, 12.011]  [12, 12, 13, 13, 12]   \n",
       "\n",
       "                      bond_y2    weight  \n",
       "0                        [-2]  0.090909  \n",
       "1                          []  0.181818  \n",
       "2                       [-13]  0.272727  \n",
       "3                      [None]  0.363636  \n",
       "4                       [-12]  0.454545  \n",
       "5               [-13, -8, -8]  0.545455  \n",
       "6             [-12, -12, -12]  0.636364  \n",
       "7                       [-14]  0.727273  \n",
       "8                  [-12, -13]  0.818182  \n",
       "9             [-13, -14, -13]  0.909091  \n",
       "10  [-12, -12, -13, -13, -12]  1.000000  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_input = pd.read_csv(data_dir / \"regression.csv\")\n",
    "df_input"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f92e5862",
   "metadata": {},
   "source": [
    "### Load optional extra features and descriptors\n",
    "Extra bond descriptors can be used when making bond property predictions, analogous to extra atom descriptors."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "85a385b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "x_ds = np.load(data_dir / \"descriptors.npz\")[\"arr_0\"]\n",
    "V_fs = np.load(data_dir / \"atom_features_descriptors.npz\")\n",
    "V_fs = [V_fs[f\"arr_{i}\"] for i in range(len(V_fs))]\n",
    "V_ds = V_fs\n",
    "E_fs = np.load(data_dir / \"bond_features_descriptors.npz\")\n",
    "E_fs = [E_fs[f\"arr_{i}\"] for i in range(len(E_fs))]\n",
    "E_ds = [np.repeat(E_f, repeats=2, axis=0) for E_f in E_fs]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "64e34f4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "columns = [\"smiles\", \"mol_y1\", \"mol_y2\", \"atom_y1\", \"atom_y2\", \"bond_y1\", \"bond_y2\", \"weight\"]\n",
    "smis = df_input.loc[:, columns[0]].values\n",
    "mol_ys = df_input.loc[:, columns[1:3]].values\n",
    "atoms_ys = df_input.loc[:, columns[3:5]].values\n",
    "bonds_ys = df_input.loc[:, columns[5:7]].values\n",
    "weights = df_input.loc[:, columns[7]].values\n",
    "\n",
    "# String lists are converted to lists using ast.literal_eval\n",
    "atoms_ys = [\n",
    "    np.array([ast.literal_eval(atom_y) for atom_y in atom_ys], dtype=float).T\n",
    "    for atom_ys in atoms_ys\n",
    "]\n",
    "bonds_ys = [\n",
    "    np.array([ast.literal_eval(bond_y) for bond_y in bond_ys], dtype=float).T\n",
    "    for bond_ys in bonds_ys\n",
    "]\n",
    "\n",
    "datapoints = [\n",
    "    data.MolAtomBondDatapoint.from_smi(\n",
    "        smi,\n",
    "        keep_h=True,\n",
    "        add_h=False,\n",
    "        # If the atom targets follow the order of an atom mapping in the SMILES string instead of\n",
    "        # the order of the atoms in the SMILES string (i.e. [F:2][Cl:1]), set reorder_atoms=True.\n",
    "        reorder_atoms=True,\n",
    "        y=mol_ys[i],\n",
    "        atom_y=atoms_ys[i],\n",
    "        bond_y=bonds_ys[i],\n",
    "        weight=weights[i],\n",
    "        x_d=x_ds[i],\n",
    "        V_f=V_fs[i],\n",
    "        V_d=V_ds[i],\n",
    "        E_f=E_fs[i],\n",
    "        E_d=E_ds[i],\n",
    "    )\n",
    "    for i, smi in enumerate(smis)\n",
    "]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "32bbca8b",
   "metadata": {},
   "source": [
    "If the regression targets are bounded (i.e. look like \"<3\" or \">0.1\"), parsing the atom and bond targets is a bit more complicated. Note that `BoundedMSE` should be used as the loss function (`RegressionFFN(criterion=BoundedMSE)`) and the less-than and greater-than masks should be given to the datapoints. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "573c543f",
   "metadata": {},
   "outputs": [],
   "source": [
    "bounded = False\n",
    "if bounded:    \n",
    "    mol_ys = mol_ys.astype(str)\n",
    "    lt_mask = mol_ys.map(lambda x: \"<\" in x).to_numpy()\n",
    "    gt_mask = mol_ys.map(lambda x: \">\" in x).to_numpy()\n",
    "    mol_ys = mol_ys.map(lambda x: x.strip(\"<\").strip(\">\")).to_numpy(np.single)\n",
    "\n",
    "    atoms_ys = atoms_ys.map(ast.literal_eval)\n",
    "    atom_lt_masks = atoms_ys.map(lambda L: [\"<\" in v if v else False for v in L])\n",
    "    atom_gt_masks = atoms_ys.map(lambda L: [\">\" in v if v else False for v in L])\n",
    "\n",
    "    atom_lt_masks = atom_lt_masks.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "    atom_gt_masks = atom_gt_masks.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "    atoms_ys = atoms_ys.map(\n",
    "        lambda L: np.array([v.strip(\"<\").strip(\">\") if v else \"nan\" for v in L], dtype=np.single)\n",
    "    )\n",
    "    atoms_ys = atoms_ys.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "\n",
    "    bonds_ys = bonds_ys.map(ast.literal_eval)\n",
    "    bond_lt_masks = bonds_ys.map(lambda L: [\"<\" in v if v else False for v in L])\n",
    "    bond_gt_masks = bonds_ys.map(lambda L: [\">\" in v if v else False for v in L])\n",
    "\n",
    "    bond_lt_masks = bond_lt_masks.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "    bond_gt_masks = bond_gt_masks.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "\n",
    "    bond_lt_masks = [bond_lt_mask.astype(bool) for bond_lt_mask in bond_lt_masks]\n",
    "    bond_gt_masks = [bond_gt_mask.astype(bool) for bond_gt_mask in bond_gt_masks]\n",
    "\n",
    "    bonds_ys = bonds_ys.map(\n",
    "        lambda L: np.array([v.strip(\"<\").strip(\">\") if v else \"nan\" for v in L], dtype=np.single)\n",
    "    )\n",
    "    bonds_ys = bonds_ys.apply(lambda row: np.vstack(row.values).T, axis=1).tolist()\n",
    "\n",
    "    datapoints = [\n",
    "        data.MolAtomBondDatapoint.from_smi(\n",
    "            smi,\n",
    "            keep_h=True,\n",
    "            add_h=False,\n",
    "            reorder_atoms=True,\n",
    "            y=mol_ys[i],\n",
    "            atom_y=atoms_ys[i],\n",
    "            bond_y=bonds_ys[i],\n",
    "            weight=weights[i],\n",
    "            lt_mask=lt_mask[i],\n",
    "            gt_mask=gt_mask[i],\n",
    "            atom_lt_mask=atom_lt_masks[i],\n",
    "            atom_gt_mask=atom_gt_masks[i],\n",
    "            bond_lt_mask=bond_lt_masks[i],\n",
    "            bond_gt_mask=bond_gt_masks[i],\n",
    "        )\n",
    "        for i, smi in enumerate(smis)\n",
    "    ]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9cd0d2b0",
   "metadata": {},
   "source": [
    "## Make datasets "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "991a3c63",
   "metadata": {},
   "outputs": [],
   "source": [
    "featurizer = featurizers.SimpleMoleculeMolGraphFeaturizer(\n",
    "    extra_atom_fdim=V_fs[0].shape[1], extra_bond_fdim=E_fs[0].shape[1]\n",
    ")\n",
    "\n",
    "train_dataset = data.MolAtomBondDataset(datapoints, featurizer=featurizer)\n",
    "val_dataset = data.MolAtomBondDataset(datapoints, featurizer=featurizer)\n",
    "test_dataset = data.MolAtomBondDataset(datapoints, featurizer=featurizer)\n",
    "predict_dataset = data.MolAtomBondDataset(datapoints, featurizer=featurizer)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84014bd1",
   "metadata": {},
   "source": [
    "## Scale the extra features and descriptors\n",
    "If extra features and descriptors are used, they can be scaled to make training easier. The scalers are turned into \"transforms\" which are given to the model to use at inference time. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "00f071ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "V_f_scaler = train_dataset.normalize_inputs(\"V_f\")\n",
    "E_f_scaler = train_dataset.normalize_inputs(\"E_f\")\n",
    "V_d_scaler = train_dataset.normalize_inputs(\"V_d\")\n",
    "E_d_scaler = train_dataset.normalize_inputs(\"E_d\")\n",
    "val_dataset.normalize_inputs(\"V_f\", V_f_scaler)\n",
    "val_dataset.normalize_inputs(\"E_f\", E_f_scaler)\n",
    "val_dataset.normalize_inputs(\"V_d\", V_d_scaler)\n",
    "val_dataset.normalize_inputs(\"E_d\", E_d_scaler)\n",
    "\n",
    "V_f_transform = nn.ScaleTransform.from_standard_scaler(\n",
    "    V_f_scaler, pad=(featurizer.atom_fdim - featurizer.extra_atom_fdim)\n",
    ")\n",
    "E_f_transform = nn.ScaleTransform.from_standard_scaler(\n",
    "    E_f_scaler, pad=(featurizer.bond_fdim - featurizer.extra_bond_fdim)\n",
    ")\n",
    "graph_transform = nn.GraphTransform(V_f_transform, E_f_transform)\n",
    "\n",
    "V_d_transform = nn.ScaleTransform.from_standard_scaler(V_d_scaler)\n",
    "E_d_transform = nn.ScaleTransform.from_standard_scaler(E_d_scaler)\n",
    "\n",
    "X_d_scaler = train_dataset.normalize_inputs(\"X_d\")\n",
    "val_dataset.normalize_inputs(\"X_d\", X_d_scaler)\n",
    "X_d_transform = nn.ScaleTransform.from_standard_scaler(X_d_scaler)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b237aac1",
   "metadata": {},
   "source": [
    "## Scale the regression targets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "d48ba56c",
   "metadata": {},
   "outputs": [],
   "source": [
    "mol_target_scaler = train_dataset.normalize_targets(\"mol\")\n",
    "atom_target_scaler = train_dataset.normalize_targets(\"atom\")\n",
    "bond_target_scaler = train_dataset.normalize_targets(\"bond\")\n",
    "val_dataset.normalize_targets(\"mol\", mol_target_scaler)\n",
    "val_dataset.normalize_targets(\"atom\", atom_target_scaler)\n",
    "val_dataset.normalize_targets(\"bond\", bond_target_scaler)\n",
    "mol_target_transform = nn.UnscaleTransform.from_standard_scaler(mol_target_scaler)\n",
    "atom_target_transform = nn.UnscaleTransform.from_standard_scaler(atom_target_scaler)\n",
    "bond_target_transform = nn.UnscaleTransform.from_standard_scaler(bond_target_scaler)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a6e3a798",
   "metadata": {},
   "source": [
    "## Make dataloaders"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "68fbf27b",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataloader = data.build_dataloader(train_dataset, shuffle=True, batch_size=4)\n",
    "val_dataloader = data.build_dataloader(val_dataset, shuffle=False, batch_size=4)\n",
    "test_dataloader = data.build_dataloader(test_dataset, shuffle=False, batch_size=4)\n",
    "predict_dataloader = data.build_dataloader(predict_dataset, shuffle=False, batch_size=4)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ae83b225",
   "metadata": {},
   "source": [
    "## The MAB (mol atom bond) message passing returns both learned node embeddings and learned edge embeddings\n",
    "`MABBondMessagePassing` takes the same customization arguments as the usual `BondMessagePassing` class"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "cede1449",
   "metadata": {},
   "outputs": [],
   "source": [
    "mp = nn.MABBondMessagePassing(\n",
    "    d_v=featurizer.atom_fdim,\n",
    "    d_e=featurizer.bond_fdim,\n",
    "    d_h=100,\n",
    "    d_vd=V_ds[0].shape[1],\n",
    "    d_ed=E_ds[0].shape[1],\n",
    "    dropout=0.1,\n",
    "    activation=\"tanh\",\n",
    "    depth=4,\n",
    "    graph_transform=graph_transform,\n",
    "    V_d_transform=V_d_transform,\n",
    "    E_d_transform=E_d_transform,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6d8b67c8",
   "metadata": {},
   "source": [
    "## A separate predictor is used for each of the molecule, atom, and bond predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5da5beb6",
   "metadata": {},
   "outputs": [],
   "source": [
    "agg = nn.MeanAggregation()\n",
    "\n",
    "# Note that each predictor may have a different input dimension\n",
    "mol_predictor = nn.RegressionFFN(\n",
    "    input_dim=mp.output_dims[0] + x_ds.shape[1],\n",
    "    n_tasks=mol_ys.shape[1],\n",
    "    output_transform=mol_target_transform,\n",
    ")\n",
    "atom_predictor = nn.RegressionFFN(\n",
    "    input_dim=mp.output_dims[0],\n",
    "    n_tasks=atoms_ys[0].shape[1],\n",
    "    output_transform=atom_target_transform,\n",
    ")\n",
    "bond_predictor = nn.RegressionFFN(\n",
    "    input_dim=(mp.output_dims[1] * 2),\n",
    "    n_tasks=bonds_ys[0].shape[1],\n",
    "    output_transform=bond_target_transform,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "123c3204",
   "metadata": {},
   "source": [
    "Different predictors can be used for different types of tasks including but not limited to `MveFFN`, `BinaryClassificationFFN`, `MulticlassClassificationFFN`."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b5288538",
   "metadata": {},
   "source": [
    "## Combine the layers into a single model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "ab1d3bfc",
   "metadata": {},
   "outputs": [],
   "source": [
    "metrics = [nn.MAE(), nn.RMSE()]\n",
    "model = models.MolAtomBondMPNN(\n",
    "    message_passing=mp,\n",
    "    agg=agg,\n",
    "    mol_predictor=mol_predictor,\n",
    "    atom_predictor=atom_predictor,\n",
    "    bond_predictor=bond_predictor,\n",
    "    batch_norm=True,\n",
    "    metrics=metrics,\n",
    "    X_d_transform=X_d_transform,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "85b9f9f0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MolAtomBondMPNN(\n",
       "  (message_passing): MABBondMessagePassing(\n",
       "    (W_i): Linear(in_features=90, out_features=100, bias=False)\n",
       "    (W_h): Linear(in_features=100, out_features=100, bias=False)\n",
       "    (W_vo): Linear(in_features=174, out_features=100, bias=True)\n",
       "    (W_vd): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (W_eo): Linear(in_features=116, out_features=100, bias=True)\n",
       "    (W_ed): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (dropout): Dropout(p=0.1, inplace=False)\n",
       "    (tau): Tanh()\n",
       "    (V_d_transform): ScaleTransform()\n",
       "    (E_d_transform): ScaleTransform()\n",
       "    (graph_transform): GraphTransform(\n",
       "      (V_transform): ScaleTransform()\n",
       "      (E_transform): ScaleTransform()\n",
       "    )\n",
       "  )\n",
       "  (agg): MeanAggregation()\n",
       "  (mol_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=104, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (atom_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=102, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bond_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=204, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bns): ModuleList(\n",
       "    (0-2): 3 x BatchNorm1d(102, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  )\n",
       "  (X_d_transform): ScaleTransform()\n",
       "  (metricss): ModuleList(\n",
       "    (0-2): 3 x ModuleList(\n",
       "      (0): MAE(task_weights=[[1.0]])\n",
       "      (1): RMSE(task_weights=[[1.0]])\n",
       "      (2): MSE(task_weights=[[1.0, 1.0]])\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ffd1b02",
   "metadata": {},
   "source": [
    "If any of molecule, atom, or bond targets are not used, the corresponding predictor isn't added to the model. If bond targets are not used, the message passing layer should be told to not return the bond embeddings to avoid initializing weight matrices that won't be used. If molecule targets are not used, the aggregation layer isn't added to the model. If both molecule and atom targets are not used, the message passing layer should be told not to return the node embeddings. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "fc5291eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "no_bond = False\n",
    "no_mol = False\n",
    "no_mol_atom = False\n",
    "\n",
    "if no_bond:\n",
    "    mp = nn.MABBondMessagePassing(return_edge_embeddings=False)\n",
    "    agg = nn.NormAggregation()\n",
    "    mol_predictor = nn.RegressionFFN()\n",
    "    atom_predictor = nn.RegressionFFN()\n",
    "    model = models.MolAtomBondMPNN(\n",
    "        message_passing=mp, agg=agg, mol_predictor=mol_predictor, atom_predictor=atom_predictor\n",
    "    )\n",
    "\n",
    "if no_mol:\n",
    "    mp = nn.MABBondMessagePassing()\n",
    "    atom_predictor = nn.RegressionFFN()\n",
    "    bond_predictor = nn.RegressionFFN(input_dim=(mp.output_dims[1] * 2))\n",
    "    model = models.MolAtomBondMPNN(\n",
    "        message_passing=mp, atom_predictor=atom_predictor, bond_predictor=bond_predictor\n",
    "    )\n",
    "\n",
    "if no_mol_atom:\n",
    "    mp = nn.MABBondMessagePassing(return_vertex_embeddings=False)\n",
    "    bond_predictor = nn.RegressionFFN(input_dim=(mp.output_dims[1] * 2))\n",
    "    model = models.MolAtomBondMPNN(message_passing=mp, bond_predictor=bond_predictor)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "013a0826",
   "metadata": {},
   "source": [
    "## Set up trainer with checkpointing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b3051d28",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: False, used: False\n",
      "TPU available: False, using: 0 TPU cores\n",
      "HPU available: False, using: 0 HPUs\n"
     ]
    }
   ],
   "source": [
    "checkpointing = ModelCheckpoint(\n",
    "    dirpath=\"MABcheckpoints\",\n",
    "    filename=\"best-{epoch}-{val_loss:.2f}\",\n",
    "    monitor=\"val_loss\",\n",
    "    mode=\"min\",\n",
    "    save_last=True,\n",
    ")\n",
    "\n",
    "trainer = pl.Trainer(\n",
    "    logger=False,\n",
    "    enable_checkpointing=True,\n",
    "    enable_progress_bar=True,\n",
    "    accelerator=\"auto\",\n",
    "    devices=1,\n",
    "    max_epochs=20,\n",
    "    callbacks=[checkpointing],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "d5f4f787",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading `train_dataloader` to estimate number of stepping batches.\n",
      "\n",
      "  | Name            | Type                  | Params | Mode \n",
      "------------------------------------------------------------------\n",
      "0 | message_passing | MABBondMessagePassing | 69.2 K | train\n",
      "1 | agg             | MeanAggregation       | 0      | train\n",
      "2 | mol_predictor   | RegressionFFN         | 32.1 K | train\n",
      "3 | atom_predictor  | RegressionFFN         | 31.5 K | train\n",
      "4 | bond_predictor  | RegressionFFN         | 62.1 K | train\n",
      "5 | bns             | ModuleList            | 612    | train\n",
      "6 | X_d_transform   | ScaleTransform        | 0      | train\n",
      "7 | metricss        | ModuleList            | 0      | train\n",
      "------------------------------------------------------------------\n",
      "195 K     Trainable params\n",
      "0         Non-trainable params\n",
      "195 K     Total params\n",
      "0.782     Total estimated model params size (MB)\n",
      "63        Modules in train mode\n",
      "0         Modules in eval mode\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Sanity Checking DataLoader 0:  50%|█████     | 1/2 [00:00<00:00,  7.59it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19: 100%|██████████| 3/3 [00:01<00:00,  2.24it/s, mol_train_loss_step=0.0765, atom_train_loss_step=0.113, bond_train_loss_step=0.148, train_loss_step=0.337, mol_val_loss=0.0379, atom_val_loss=0.0219, bond_val_loss=0.033, val_loss=0.136, mol_train_loss_epoch=0.084, atom_train_loss_epoch=0.0578, bond_train_loss_epoch=0.0589, train_loss_epoch=0.209]    "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=20` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19: 100%|██████████| 3/3 [00:01<00:00,  2.14it/s, mol_train_loss_step=0.0765, atom_train_loss_step=0.113, bond_train_loss_step=0.148, train_loss_step=0.337, mol_val_loss=0.0379, atom_val_loss=0.0219, bond_val_loss=0.033, val_loss=0.136, mol_train_loss_epoch=0.084, atom_train_loss_epoch=0.0578, bond_train_loss_epoch=0.0589, train_loss_epoch=0.209]\n"
     ]
    }
   ],
   "source": [
    "trainer.fit(model, train_dataloader, val_dataloader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "d177eb79",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Testing DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 20.82it/s]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
       "┃<span style=\"font-weight: bold\">        Test metric        </span>┃<span style=\"font-weight: bold\">       DataLoader 0        </span>┃\n",
       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">       atom_test/mae       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    0.5686116218566895     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">      atom_test/rmse       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    0.9241167902946472     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">       bond_test/mae       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    0.5553550720214844     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">      bond_test/rmse       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    1.3456532955169678     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">       mol_test/mae        </span>│<span style=\"color: #800080; text-decoration-color: #800080\">    2.6322197914123535     </span>│\n",
       "│<span style=\"color: #008080; text-decoration-color: #008080\">       mol_test/rmse       </span>│<span style=\"color: #800080; text-decoration-color: #800080\">     4.299654006958008     </span>│\n",
       "└───────────────────────────┴───────────────────────────┘\n",
       "</pre>\n"
      ],
      "text/plain": [
       "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
       "┃\u001b[1m \u001b[0m\u001b[1m       Test metric       \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m      DataLoader 0       \u001b[0m\u001b[1m \u001b[0m┃\n",
       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
       "│\u001b[36m \u001b[0m\u001b[36m      atom_test/mae      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   0.5686116218566895    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m     atom_test/rmse      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   0.9241167902946472    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m      bond_test/mae      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   0.5553550720214844    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m     bond_test/rmse      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   1.3456532955169678    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m      mol_test/mae       \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m   2.6322197914123535    \u001b[0m\u001b[35m \u001b[0m│\n",
       "│\u001b[36m \u001b[0m\u001b[36m      mol_test/rmse      \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m    4.299654006958008    \u001b[0m\u001b[35m \u001b[0m│\n",
       "└───────────────────────────┴───────────────────────────┘\n"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "results = trainer.test(dataloaders=test_dataloader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "13ce67b8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 49.64it/s]\n"
     ]
    }
   ],
   "source": [
    "predss = trainer.predict(model, predict_dataloader)\n",
    "mol_preds, atom_preds, bond_preds = (torch.concat(tensors) for tensors in zip(*predss))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "69a6d53d",
   "metadata": {},
   "source": [
    "## Split the atom and bond predictions into a list of tensors, one for each molecule"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "f0f8889b",
   "metadata": {},
   "outputs": [],
   "source": [
    "atoms_per_mol = [mol.GetNumAtoms() for mol in predict_dataset.mols]\n",
    "bonds_per_mol = [mol.GetNumBonds() for mol in predict_dataset.mols]\n",
    "\n",
    "atom_preds = torch.split(atom_preds, atoms_per_mol)\n",
    "bond_preds = torch.split(bond_preds, bonds_per_mol)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "13557888",
   "metadata": {},
   "source": [
    "## Save and load the model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "854e213d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MolAtomBondMPNN(\n",
       "  (message_passing): MABBondMessagePassing(\n",
       "    (W_i): Linear(in_features=90, out_features=100, bias=False)\n",
       "    (W_h): Linear(in_features=100, out_features=100, bias=False)\n",
       "    (W_vo): Linear(in_features=174, out_features=100, bias=True)\n",
       "    (W_vd): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (W_eo): Linear(in_features=116, out_features=100, bias=True)\n",
       "    (W_ed): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (dropout): Dropout(p=0.1, inplace=False)\n",
       "    (tau): Tanh()\n",
       "    (V_d_transform): ScaleTransform()\n",
       "    (E_d_transform): ScaleTransform()\n",
       "    (graph_transform): GraphTransform(\n",
       "      (V_transform): ScaleTransform()\n",
       "      (E_transform): ScaleTransform()\n",
       "    )\n",
       "  )\n",
       "  (agg): MeanAggregation()\n",
       "  (mol_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=104, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (atom_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=102, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bond_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=204, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bns): ModuleList(\n",
       "    (0-2): 3 x BatchNorm1d(102, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  )\n",
       "  (X_d_transform): ScaleTransform()\n",
       "  (metricss): ModuleList(\n",
       "    (0-2): 3 x ModuleList(\n",
       "      (0): MAE(task_weights=[[1.0]])\n",
       "      (1): RMSE(task_weights=[[1.0]])\n",
       "      (2): MSE(task_weights=[[1.0, 1.0]])\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "models.utils.save_model(\"temp.pt\", model)\n",
    "models.MolAtomBondMPNN.load_from_file(\"temp.pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5d8fe04",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MolAtomBondMPNN(\n",
       "  (message_passing): MABBondMessagePassing(\n",
       "    (W_i): Linear(in_features=90, out_features=100, bias=False)\n",
       "    (W_h): Linear(in_features=100, out_features=100, bias=False)\n",
       "    (W_vo): Linear(in_features=174, out_features=100, bias=True)\n",
       "    (W_vd): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (W_eo): Linear(in_features=116, out_features=100, bias=True)\n",
       "    (W_ed): Linear(in_features=102, out_features=102, bias=True)\n",
       "    (dropout): Dropout(p=0.1, inplace=False)\n",
       "    (tau): Tanh()\n",
       "    (V_d_transform): ScaleTransform()\n",
       "    (E_d_transform): ScaleTransform()\n",
       "    (graph_transform): GraphTransform(\n",
       "      (V_transform): ScaleTransform()\n",
       "      (E_transform): ScaleTransform()\n",
       "    )\n",
       "  )\n",
       "  (agg): MeanAggregation()\n",
       "  (mol_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=104, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (atom_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=102, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bond_predictor): RegressionFFN(\n",
       "    (ffn): MLP(\n",
       "      (0): Sequential(\n",
       "        (0): Linear(in_features=204, out_features=300, bias=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ReLU()\n",
       "        (1): Dropout(p=0.0, inplace=False)\n",
       "        (2): Linear(in_features=300, out_features=2, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (criterion): MSE(task_weights=[[1.0, 1.0]])\n",
       "    (output_transform): UnscaleTransform()\n",
       "  )\n",
       "  (bns): ModuleList(\n",
       "    (0-2): 3 x BatchNorm1d(102, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  )\n",
       "  (X_d_transform): ScaleTransform()\n",
       "  (metricss): ModuleList(\n",
       "    (0-2): 3 x ModuleList(\n",
       "      (0): MAE(task_weights=[[1.0]])\n",
       "      (1): RMSE(task_weights=[[1.0]])\n",
       "      (2): MSE(task_weights=[[1.0, 1.0]])\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "models.MolAtomBondMPNN.load_from_checkpoint(\"MABcheckpoints/last.ckpt\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chemprop",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
