{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Pytorch Lightning Tutorial\n",
    "\n",
    "## Basic skills\n",
    "\n",
    "Learn the basics of model development with Lightning. Researchers and machine learning engineers should start here."
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Add imports\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-01-21 13:20:29.184227: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F AVX512_VNNI FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2023-01-21 13:20:29.392401: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
      "2023-01-21 13:20:29.429765: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
      "Using TensorFlow backend.\n",
      "Global seed set to 3047\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on : NVIDIA GeForce RTX 3090 Ti\n"
     ]
    }
   ],
   "source": [
    "#!/usr/bin/env python\n",
    "# -*- coding: utf-8 -*-\n",
    "'''\n",
    "@File    :   playground.ipynb\n",
    "@Time    :   2023/01/20 10:37:40\n",
    "@Author  :   Haowei Xu\n",
    "@Site    :   https://howiehsu0126.github.io/\n",
    "@License :   (C)Copyright 2018-2023, iOPEN, NWPU\n",
    "@Desc    :   None\n",
    "'''\n",
    "# Standard libraries\n",
    "import os\n",
    "import warnings\n",
    "from datetime import datetime\n",
    "# PyTorch Lightning\n",
    "import pytorch_lightning as pl\n",
    "# PyTorch\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch import nn\n",
    "from torch.nn import (ReLU, Linear)\n",
    "from torch.optim import SGD\n",
    "# PyTorch geometric\n",
    "from torch_geometric import transforms as T\n",
    "from torch_geometric.data import Data\n",
    "from torch_geometric.datasets import (TUDataset, Planetoid)\n",
    "from torch_geometric.loader import DataLoader\n",
    "from torch_geometric.nn import (Sequential, GCNConv)\n",
    "# PL callbacks and profilers\n",
    "from pytorch_lightning.callbacks import (ModelCheckpoint, ModelSummary, DeviceStatsMonitor)\n",
    "from pytorch_lightning. callbacks. early_stopping import EarlyStopping\n",
    "from pytorch_lightning.profilers import AdvancedProfiler\n",
    "from torch import Tensor\n",
    "# Transfer learning\n",
    "from pytorch_pretrained_bert import BertModel\n",
    "# Macro variables\n",
    "AVAIL_GPUS = min(1, torch.cuda.device_count())\n",
    "BATCH_SIZE = 256 if AVAIL_GPUS else 64\n",
    "# Path to the folder where the datasets are/should be downloaded\n",
    "DATASET_PATH = os.path.join(os.getcwd(), 'Data')\n",
    "# Create datasets path if it doesn't exist yet\n",
    "os.makedirs(DATASET_PATH, exist_ok=True)\n",
    "# Path to the folder where anything is logged\n",
    "LOG_PATH = os.path.join(os.path.join(os.getcwd(), \"Logs\"))\n",
    "# Create datasets path if it doesn't exist yet\n",
    "os.makedirs(LOG_PATH, exist_ok=True)\n",
    "# Path to the folder where the pretrained models are saved\n",
    "CHECKPOINT_PATH = os.path.join(\n",
    "    os.getcwd(), LOG_PATH, f\"saved_model/ckpt-{datetime.now().strftime('%Y-%m-%d_%H-%M')}\")\n",
    "# Create checkpoint path if it doesn't exist yet\n",
    "os.makedirs(CHECKPOINT_PATH, exist_ok=True)\n",
    "# Path to the profiler log\n",
    "PROFILER_PATH = os.path.join(os.getcwd(), LOG_PATH, \"Profiler\")\n",
    "# Create checkpoint path if it doesn't exist yet\n",
    "os.makedirs(PROFILER_PATH, exist_ok=True)\n",
    "profiler = AdvancedProfiler(dirpath=PROFILER_PATH, filename=f\"perf_logs-{datetime.now().strftime('%Y-%m-%d_%H-%M')}.log\")\n",
    "# Suppress warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "# Setting the seed\n",
    "pl.seed_everything(3047)\n",
    "# Ensure that all operations are deterministic on GPU (if used) for reproducibility\n",
    "torch.backends.cudnn.determinstic = True\n",
    "torch.backends.cudnn.benchmark = True\n",
    "# Setting devices\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(f\"Training on : {torch.cuda.get_device_name()}\")\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Define the training dataset\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch_geometric.datasets import Planetoid\n",
    "\n",
    "dataset = Planetoid(root=os.path.join(os.getcwd(), 'Data'), name='Cora')\n",
    "train_loader = DataLoader(dataset)\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Define the PyTorch nn.Modules\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Define a LightningModule\n",
    "\n",
    "The LightningModule is the full recipe that defines how your nn.Modules interact.\n",
    "\n",
    "- The training_step defines how the nn.Modules interact together.\n",
    "- In the configure_optimizers define the optimizer(s) for your models.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dataset.num_classes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Data(x=[3, 1433], edge_index=[2, 2], y=[3], train_mask=[3], val_mask=[3], test_mask=[3])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Sampling a subgraph\n",
    "example_input_graph = dataset.data.subgraph(torch.tensor([0, 1, 2], dtype=torch.long))\n",
    "example_input_graph\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NodeClassifier(pl.LightningModule):\n",
    "    def __init__(self, **kwargs):\n",
    "        super(NodeClassifier, self).__init__()\n",
    "        self.example_input_array = example_input_graph\n",
    "        self.num_features = kwargs[\"num_features\"]\n",
    "        self.num_classes = kwargs[\"num_classes\"]\n",
    "        # hidden layer node features\n",
    "        self.hidden = 256\n",
    "        # Saving hyperparameters\n",
    "        self.save_hyperparameters()\n",
    "\n",
    "        self.model = Sequential('x, edge_index', [\n",
    "            (GCNConv(self.num_features, self.hidden), 'x, edge_index -> x'),\n",
    "            (ReLU(inplace=True)),\n",
    "            (GCNConv(self.hidden, self.hidden), 'x, edge_index -> x'),\n",
    "            (ReLU(inplace=True)),\n",
    "            (Linear(self.hidden, self.num_classes)),\n",
    "        ])\n",
    "        self.loss_module = nn.CrossEntropyLoss()\n",
    "\n",
    "    def forward(self, data, mode=\"train\"):\n",
    "        x, edge_index = data.x, data.edge_index\n",
    "        x = self.model(x, edge_index)\n",
    "        # Only calculate the loss on the nodes corresponding to the mask\n",
    "        if mode == \"test\":\n",
    "            mask = data.test_mask\n",
    "        elif mode == \"train\":\n",
    "            mask = data.train_mask\n",
    "        elif mode == \"val\":\n",
    "            mask = data.val_mask\n",
    "        else:\n",
    "            assert False, f\"Unknown forward mode: {mode}\"\n",
    "\n",
    "        loss = self.loss_module(x[mask], data.y[mask])\n",
    "        acc = (x[mask].argmax(dim=-1) == data.y[mask]\n",
    "               ).sum().float() / mask.sum()\n",
    "        return loss, acc\n",
    "\n",
    "    def configure_optimizers(self):\n",
    "        # We use SGD here, but Adam works as well\n",
    "        return SGD(self.parameters(), lr=0.1, momentum=0.9, weight_decay=2e-3)\n",
    "\n",
    "    def training_step(self, batch, batch_idx):\n",
    "        train_loss, train_acc = self.forward(batch, mode=\"train\")\n",
    "        self.log_dict(\n",
    "            {\n",
    "                \"train_loss\": train_loss,\n",
    "                \"train_acc\": train_acc\n",
    "            },\n",
    "            on_step=False,\n",
    "            on_epoch=True,\n",
    "            logger=True,\n",
    "        )\n",
    "        return train_loss\n",
    "\n",
    "    def training_epoch_end(self, training_step_outputs):\n",
    "        for out in training_step_outputs:\n",
    "            print(f\"Training loss: {out['loss']}\")\n",
    "\n",
    "    def validation_step(self, batch, batch_idx):\n",
    "        val_loss, val_acc = self.forward(batch, mode=\"val\")\n",
    "        self.log_dict(\n",
    "            {\n",
    "                \"val_loss\": val_loss,\n",
    "                \"val_acc\": val_acc\n",
    "            },\n",
    "            on_step=False,\n",
    "            on_epoch=True,\n",
    "            logger=True,\n",
    "        )\n",
    "\n",
    "    def validation_epoch_end(self, validation_step_outputs):\n",
    "        for out in validation_step_outputs:\n",
    "            print(f\"Validation loss: {out['loss']}\")\n",
    "\n",
    "    def test_step(self, batch, batch_idx):\n",
    "        _, acc = self.forward(batch, mode=\"test\")\n",
    "        self.log(\"test_acc\", acc)\n",
    "\n",
    "    def predict_step(self, batch, batch_idx, dataloader_idx=0):\n",
    "        return self(batch)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Model Debugging\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def fast_dev(dataset):\n",
    "    node_data_loader = DataLoader(dataset, batch_size=1)\n",
    "\n",
    "    # Create a PyTorch Lightning trainer\n",
    "    trainer = pl.Trainer(\n",
    "        num_sanity_val_steps=2, # runs 2 steps of validation in the beginning of training\n",
    "        fast_dev_run=5, # runs 5 batch of training, validation, test and prediction data through your trainer\n",
    "        # limit_train_batches=10, # use 10 batches of train and 5 batches of val\n",
    "        # limit_val_batches=5,\n",
    "        callbacks=[\n",
    "            ModelSummary(max_depth=2),\n",
    "            DeviceStatsMonitor()\n",
    "        ],\n",
    "        gpus=AVAIL_GPUS,\n",
    "        accelerator='gpu',\n",
    "        precision=16, # Mixed-precision training\n",
    "        enable_progress_bar=False,\n",
    "        enable_checkpointing=False,\n",
    "    )\n",
    "    # Optional logging argument that we don't need\n",
    "    trainer.logger._default_hp_metric = None\n",
    "\n",
    "    # trainer.logger.log_metrics()\n",
    "    model = NodeClassifier(num_features=dataset.num_node_features,\n",
    "                           num_classes=dataset.num_classes,)\n",
    "    trainer.fit(model, node_data_loader, node_data_loader)\n",
    "\n",
    "    return trainer, node_data_loader\n",
    "\n",
    "\n",
    "# trainer, node_data_loader = fast_dev(dataset=dataset)\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Train the model\n",
    "\n",
    "To train the model use the Lightning Trainer which handles all the engineering and abstracts away all the complexity needed for scale.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Small function for printing the test scores\n",
    "def print_results(result_dict):\n",
    "    if \"train\" in result_dict:\n",
    "        print(\"Train accuracy: %4.2f%%\" % (100.0 * result_dict[\"train\"]))\n",
    "    if \"val\" in result_dict:\n",
    "        print(\"Val accuracy:   %4.2f%%\" % (100.0 * result_dict[\"val\"]))\n",
    "    print(\"Test accuracy:  %4.2f%%\" % (100.0 * result_dict[\"test\"]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Auto select gpus: [0]\n",
      "Using 16bit native Automatic Mixed Precision (AMP)\n",
      "Trainer already configured with model summary callbacks: [<class 'pytorch_lightning.callbacks.model_summary.ModelSummary'>]. Skipping setting a default `ModelSummary` callback.\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "Missing logger folder: /home/hwxu/Projects/PytorchLightningGym/Logs/saved_model/ckpt-2023-01-21_13-20/lightning_logs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name           | Type              | Params | In sizes            | Out sizes\n",
      "---------------------------------------------------------------------------------------\n",
      "0 | model          | Sequential_54b95d | 434 K  | [[3, 1433], [2, 2]] | [3, 7]   \n",
      "1 | model.module_0 | GCNConv           | 367 K  | [[3, 1433], [2, 2]] | [3, 256] \n",
      "2 | model.module_1 | ReLU              | 0      | [3, 256]            | [3, 256] \n",
      "3 | model.module_2 | GCNConv           | 65.8 K | [[3, 256], [2, 2]]  | [3, 256] \n",
      "4 | model.module_3 | ReLU              | 0      | [3, 256]            | [3, 256] \n",
      "5 | model.module_4 | Linear            | 1.8 K  | [3, 256]            | [3, 7]   \n",
      "6 | loss_module    | CrossEntropyLoss  | 0      | [[3, 7], [3]]       | ?        \n",
      "---------------------------------------------------------------------------------------\n",
      "434 K     Trainable params\n",
      "0         Non-trainable params\n",
      "434 K     Total params\n",
      "0.869     Total estimated model params size (MB)\n",
      "`Trainer.fit` stopped: `max_epochs=25` reached.\n"
     ]
    }
   ],
   "source": [
    "def train(dataset):\n",
    "    node_data_loader = DataLoader(dataset, batch_size=128)\n",
    "\n",
    "    # Create a PyTorch Lightning trainer\n",
    "    trainer = pl.Trainer(\n",
    "        default_root_dir=CHECKPOINT_PATH,\n",
    "        callbacks=[\n",
    "            ModelCheckpoint(\n",
    "                filename='best_model',\n",
    "                save_weights_only=True,\n",
    "                mode=\"max\",\n",
    "                monitor=\"val_acc\"\n",
    "            ),\n",
    "            EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=5),\n",
    "            ModelSummary(max_depth=2),\n",
    "            DeviceStatsMonitor(cpu_stats=False)\n",
    "        ],\n",
    "        gpus=AVAIL_GPUS,\n",
    "        accelerator=\"gpu\",\n",
    "        precision=16, # Mixed-precision training\n",
    "        max_epochs=25,\n",
    "        auto_lr_find=True,\n",
    "        auto_scale_batch_size=True,\n",
    "        auto_select_gpus=True,\n",
    "        enable_progress_bar=False,\n",
    "        profiler=profiler\n",
    "    )\n",
    "    # # Optional logging argument that we don't need\n",
    "    # trainer.logger._default_hp_metric = None\n",
    "\n",
    "    # trainer.logger.log_metrics()\n",
    "    model = NodeClassifier(num_features=dataset.num_node_features,\n",
    "                           num_classes=dataset.num_classes,)\n",
    "    trainer.fit(model, node_data_loader, node_data_loader)\n",
    "\n",
    "    return trainer, node_data_loader\n",
    "\n",
    "\n",
    "trainer, node_data_loader = train(dataset=dataset)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
      "       Test metric             DataLoader 0\n",
      "────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
      "        test_acc            0.6549999713897705\n",
      "────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
      "Train accuracy: 87.86%\n",
      "Val accuracy:   63.20%\n",
      "Test accuracy:  65.50%\n"
     ]
    }
   ],
   "source": [
    "def eval(trainer, data_loader):\n",
    "    model = NodeClassifier.load_from_checkpoint(\n",
    "        trainer.checkpoint_callback.best_model_path)\n",
    "    # Test best model on the test set\n",
    "    test_result = trainer.test(model, dataloaders=data_loader, verbose=True)\n",
    "    batch = next(iter(node_data_loader))\n",
    "    batch = batch.to(model.device)\n",
    "    _, train_acc = model.forward(batch, mode=\"train\")\n",
    "    _, val_acc = model.forward(batch, mode=\"val\")\n",
    "    result = {\"train\": train_acc, \"val\": val_acc,\n",
    "              \"test\": test_result[0][\"test_acc\"]}\n",
    "    return model, result\n",
    "\n",
    "\n",
    "model, result = eval(trainer, node_data_loader)\n",
    "print_results(result)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "hw",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13 (default, Mar 28 2022, 11:38:47) \n[GCC 7.5.0]"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "747b0286689d47d2437887b3fa14bd59117dc06b2f96aae2ee6b23ff3c51c6b1"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
