{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Example 3.1: Sharding is All You Need"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Example 3.1: Using Sharding to training\n",
    "\n",
    "import os\n",
    "import sys\n",
    "\n",
    "sys.path.append(os.path.dirname(sys.path[0]))\n",
    "\n",
    "import jax\n",
    "import time\n",
    "import optax\n",
    "import pickle\n",
    "import numpy as np\n",
    "import jax.numpy as jnp\n",
    "import haiku as hk\n",
    "import jaxopt as opt\n",
    "\n",
    "from absl import app\n",
    "from absl import flags\n",
    "from jax import Array, jit\n",
    "from jaxopt import OptaxSolver\n",
    "from typing import Optional, Union, List, Callable, Any\n",
    "from ml_collections import config_flags\n",
    "\n",
    "from flax import jax_utils\n",
    "from cybertron.embedding import ConformationEmbedding\n",
    "from cybertron.model import MolCT\n",
    "from cybertron.readout import AtomwiseReadout\n",
    "from cybertron import Cybertron\n",
    "from cybertron.utils.train import print_net_params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from jax.experimental import mesh_utils\n",
    "from jax.sharding import PositionalSharding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def net_fn(atom_type, \n",
    "           atom_mask, \n",
    "           bond_type, \n",
    "           bond_mask, \n",
    "           coordinate, \n",
    "           dist_mask,):\n",
    "\n",
    "    emb = ConformationEmbedding(dim_node=128,\n",
    "                                dim_edge=128,\n",
    "                                rbf_runc='log_gaussian') # type: ignore\n",
    "    model = MolCT(dim_feature=128, \n",
    "                  dim_edge_emb=128, \n",
    "                  dim_node_emb=128,) # type: ignore\n",
    "    readout = [AtomwiseReadout(dim_node_rep=128,\n",
    "                               activation='silu',)] # type: ignore\n",
    "    cbt = Cybertron(config=None, \n",
    "                    embedding=emb,\n",
    "                    model=model,\n",
    "                    readout=readout,) # type: ignore\n",
    "    \n",
    "    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = jax.vmap(net_fn)\n",
    "net = hk.transform(net, apply_rng=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# We use MAE loss here.\n",
    "@jit\n",
    "def loss_func(params, label, num_atoms, rng_key, **batch):\n",
    "\n",
    "    # using net.apply to caculate outputs\n",
    "    outputs = net.apply(params, rng_key, **batch)\n",
    "    diff = (outputs[0].squeeze() - label) / num_atoms\n",
    "    loss = jnp.abs(diff)\n",
    "    weights = num_atoms / jnp.sum(num_atoms)\n",
    "    return jnp.sum(loss * weights)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load datasets\n",
    "train_ds = np.load(sys.path[0] + \"/dataset_qm9_normed_trainset_1024.npz\")\n",
    "train_ds = dict(train_ds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = train_ds['label'][:, 7]\n",
    "ds = {'atom_type': train_ds.get('atom_type'), # (N, A)\n",
    "    'atom_mask': train_ds.get('atom_mask'),\n",
    "    'bond_type': train_ds.get('bond_type'),\n",
    "    'bond_mask': train_ds.get('bond_mask'),\n",
    "    'coordinate': train_ds.get('coordinate'), # (N, A, 3)\n",
    "    'dist_mask': train_ds.get('dist_mask'),}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "atom_mask = ds['atom_type'] > 0 # type: ignore\n",
    "num_atoms = jnp.sum(atom_mask, axis=-1) # (N,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "Array = Union[jax.Array, np.ndarray]\n",
    "\n",
    "def _sharding(input, shards):\n",
    "\n",
    "    n_device = shards.shape[0]\n",
    "    if isinstance(input, (np.ndarray, jax.Array)):\n",
    "        _shape = [n_device, ] + [1 for _ in range(input.ndim - 1)]\n",
    "        return jax.device_put(input, shards.reshape(_shape))\n",
    "    elif input is None:\n",
    "        return jax.device_put(input, shards)\n",
    "    else:\n",
    "        raise TypeError(f\"Invalid input: {input}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = optax.adam(learning_rate=1e-3)\n",
    "rng_seq = hk.PRNGSequence(42)\n",
    "params = net.init(next(rng_seq), ds['atom_type'][:2], None, None, None, ds['coordinate'][:2], None)\n",
    "opt_state = optimizer.init(params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "global_sharding = PositionalSharding(jax.devices()).reshape(8, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [],
   "source": [
    "params = jax.device_put(params, global_sharding.replicate())\n",
    "opt_state = jax.device_put(opt_state, global_sharding.replicate())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Net parameters:\n",
      "atomwise_readout/~/decoder/~/mlp/~/linear_0\n",
      "b: shape of (64,)\n",
      "w: shape of (128, 64)\n",
      "atomwise_readout/~/decoder/~/mlp/~/linear_1\n",
      "b: shape of (1,)\n",
      "w: shape of (64, 1)\n",
      "conformation_embedding/~/atom_embedding\n",
      "embeddings: shape of (64, 128)\n",
      "conformation_embedding/~/dis_filter/~/linear\n",
      "b: shape of (128,)\n",
      "w: shape of (64, 128)\n",
      "conformation_embedding/~/dis_filter/~/mlp/~/linear_0\n",
      "b: shape of (128,)\n",
      "w: shape of (128, 128)\n",
      "conformation_embedding/~/dis_filter/~/mlp/~/linear_1\n",
      "b: shape of (128,)\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_0/~/multihead_attention/linear_output\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_0/~/node_norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_0/~/positional_embedding/k_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_0/~/positional_embedding/norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_0/~/positional_embedding/q_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_0/~/positional_embedding/v_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_1/~/multihead_attention/linear_output\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_1/~/node_norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_1/~/positional_embedding/k_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_1/~/positional_embedding/norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_1/~/positional_embedding/q_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_1/~/positional_embedding/v_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_2/~/multihead_attention/linear_output\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_2/~/node_norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_2/~/positional_embedding/k_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_2/~/positional_embedding/norm\n",
      "offset: shape of (128,)\n",
      "scale: shape of (128,)\n",
      "molct/~build_interaction/interaction_unit_2/~/positional_embedding/q_gen\n",
      "w: shape of (128, 128)\n",
      "molct/~build_interaction/interaction_unit_2/~/positional_embedding/v_gen\n",
      "w: shape of (128, 128)\n",
      "Total number of parameters: 256001\n"
     ]
    }
   ],
   "source": [
    "print_net_params(params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import partial\n",
    "ds_sharding = partial(_sharding, shards=global_sharding)\n",
    "ds = jax.tree_map(ds_sharding, ds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = jax.device_put(label, global_sharding.reshape(-1))\n",
    "num_atoms = jax.device_put(num_atoms, global_sharding.reshape(-1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss = 1.565031\n",
      "Epoch 2, Loss = 0.733235\n",
      "Epoch 3, Loss = 0.746100\n",
      "Epoch 4, Loss = 0.559235\n",
      "Epoch 5, Loss = 0.671943\n",
      "Epoch 6, Loss = 0.630883\n",
      "Epoch 7, Loss = 0.545814\n",
      "Epoch 8, Loss = 0.571740\n",
      "Epoch 9, Loss = 0.579863\n",
      "Epoch 10, Loss = 0.515569\n",
      "Epoch 11, Loss = 0.484479\n",
      "Epoch 12, Loss = 0.505986\n",
      "Epoch 13, Loss = 0.468209\n",
      "Epoch 14, Loss = 0.409433\n",
      "Epoch 15, Loss = 0.423076\n",
      "Epoch 16, Loss = 0.393871\n"
     ]
    }
   ],
   "source": [
    "n_epoch = 16\n",
    "for epoch in range(n_epoch):\n",
    "\n",
    "    loss_value, grads = jax.value_and_grad(loss_func)(params, label, num_atoms, next(rng_seq), **ds)\n",
    "    updates, opt_state = optimizer.update(grads, opt_state, params)\n",
    "    params = optax.apply_updates(params, updates)\n",
    "\n",
    "    print(f\"Epoch {epoch + 1}, Loss = {loss_value:.6f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #393b79\">  GPU 0  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #d6616b\">  GPU 1  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #8ca252\">  GPU 2  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #de9ed6\">  GPU 3  </span><span style=\"color: #000000; text-decoration-color: #000000; background-color: #e7cb94\">  GPU 4  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #6b6ecf\">  GPU 5  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #a55194\">  GPU 6  </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #8c6d31\">  GPU 7  </span>\n",
       "<span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #393b79\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #d6616b\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #8ca252\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #de9ed6\">         </span><span style=\"color: #000000; text-decoration-color: #000000; background-color: #e7cb94\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #6b6ecf\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #a55194\">         </span><span style=\"color: #ffffff; text-decoration-color: #ffffff; background-color: #8c6d31\">         </span>\n",
       "</pre>\n"
      ],
      "text/plain": [
       "\u001b[38;2;255;255;255;48;2;57;59;121m  \u001b[0m\u001b[38;2;255;255;255;48;2;57;59;121mGPU 0\u001b[0m\u001b[38;2;255;255;255;48;2;57;59;121m  \u001b[0m\u001b[38;2;255;255;255;48;2;214;97;107m  \u001b[0m\u001b[38;2;255;255;255;48;2;214;97;107mGPU 1\u001b[0m\u001b[38;2;255;255;255;48;2;214;97;107m  \u001b[0m\u001b[38;2;255;255;255;48;2;140;162;82m  \u001b[0m\u001b[38;2;255;255;255;48;2;140;162;82mGPU 2\u001b[0m\u001b[38;2;255;255;255;48;2;140;162;82m  \u001b[0m\u001b[38;2;255;255;255;48;2;222;158;214m  \u001b[0m\u001b[38;2;255;255;255;48;2;222;158;214mGPU 3\u001b[0m\u001b[38;2;255;255;255;48;2;222;158;214m  \u001b[0m\u001b[38;2;0;0;0;48;2;231;203;148m  \u001b[0m\u001b[38;2;0;0;0;48;2;231;203;148mGPU 4\u001b[0m\u001b[38;2;0;0;0;48;2;231;203;148m  \u001b[0m\u001b[38;2;255;255;255;48;2;107;110;207m  \u001b[0m\u001b[38;2;255;255;255;48;2;107;110;207mGPU 5\u001b[0m\u001b[38;2;255;255;255;48;2;107;110;207m  \u001b[0m\u001b[38;2;255;255;255;48;2;165;81;148m  \u001b[0m\u001b[38;2;255;255;255;48;2;165;81;148mGPU 6\u001b[0m\u001b[38;2;255;255;255;48;2;165;81;148m  \u001b[0m\u001b[38;2;255;255;255;48;2;140;109;49m  \u001b[0m\u001b[38;2;255;255;255;48;2;140;109;49mGPU 7\u001b[0m\u001b[38;2;255;255;255;48;2;140;109;49m  \u001b[0m\n",
       "\u001b[38;2;255;255;255;48;2;57;59;121m         \u001b[0m\u001b[38;2;255;255;255;48;2;214;97;107m         \u001b[0m\u001b[38;2;255;255;255;48;2;140;162;82m         \u001b[0m\u001b[38;2;255;255;255;48;2;222;158;214m         \u001b[0m\u001b[38;2;0;0;0;48;2;231;203;148m         \u001b[0m\u001b[38;2;255;255;255;48;2;107;110;207m         \u001b[0m\u001b[38;2;255;255;255;48;2;165;81;148m         \u001b[0m\u001b[38;2;255;255;255;48;2;140;109;49m         \u001b[0m\n"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_ = grads['atomwise_readout/~/decoder/~/mlp/~/linear_0']['w']\n",
    "jax.debug.visualize_array_sharding(label)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "jax",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
