{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2afd7255",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| default_exp train_multi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12e79ccb",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "from nbdev.showdoc import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7dfd417d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "import io\n",
    "import time\n",
    "import random\n",
    "from pathlib import Path\n",
    "\n",
    "from fastprogress import progress_bar, master_bar\n",
    "import fastprogress\n",
    "import wandb\n",
    "\n",
    "import numpy as np\n",
    "import pylab as plt\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data.dataloader import DataLoader\n",
    "from torch.profiler import record_function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "232153f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "import lightning.pytorch as pl\n",
    "import math\n",
    "\n",
    "class TrainingTask(pl.LightningModule):\n",
    "    def __init__(self, model, model_hparams=None):\n",
    "        super().__init__()\n",
    "        self.model = model\n",
    "        self.model_hparams = model_hparams\n",
    "        \n",
    "    def on_fit_start(self):\n",
    "        if getattr(self.model, 'setup'):\n",
    "            self.model.setup(self.device)\n",
    "    \n",
    "    def configure_optimizers(self):\n",
    "        \"\"\" Initialize AdamW optimizer\"\"\"\n",
    "        lr = self.model_hparams['lr0']\n",
    "        weight_decay = self.model_hparams['weight_decay']\n",
    "        \n",
    "        all_params = set(model.parameters())\n",
    "        customized_params = set()\n",
    "        groups = []\n",
    "        group_map = {}\n",
    "        for name,m in model.named_modules():\n",
    "            if hasattr(m, 'no_weight_decay') or hasattr(m, 'lr_scale'):\n",
    "                customized_params |= set(m.parameters())\n",
    "                m_wd = 0 if hasattr(m, 'no_weight_decay') else weight_decay\n",
    "                m_lr = lr * getattr(m, 'lr_scale', 1)\n",
    "                group = group_map.get((m_wd, m_lr), None)\n",
    "                if not group:\n",
    "                    group = {\"params\": [], \"names\": [], \"weight_decay\": m_wd, \"lr\": m_lr}\n",
    "                    groups.append(group)\n",
    "                    group_map[(m_wd, m_lr)] = group\n",
    "                group['params'] += m.parameters()\n",
    "                group['names'].append(name)\n",
    "                \n",
    "        other_params = all_params - customized_params\n",
    "        \n",
    "        param_groups = groups + [\n",
    "            {\"names\": [\"other\"], \"params\": list(other_params), \"weight_decay\": weight_decay },\n",
    "        ]\n",
    "\n",
    "        optimizer = torch.optim.AdamW(lr=lr, betas=(0.9, 0.95), params=param_groups)\n",
    "        \n",
    "        # modified from https://github.com/Lightning-AI/lightning/issues/5449#issuecomment-1501597319\n",
    "        def num_steps_per_epoch() -> int:\n",
    "            \"\"\"Get number of steps\"\"\"\n",
    "            # Accessing _data_source is flaky and might break\n",
    "            dataset = self.trainer.fit_loop._data_source.dataloader()\n",
    "            dataset_size = len(dataset)\n",
    "            # math.ceil so always overestimate (underestimating throws exceptions)\n",
    "            num_steps = math.ceil(dataset_size / self.trainer.accumulate_grad_batches)\n",
    "            return num_steps\n",
    "        \n",
    "        total_steps = self.model_hparams['epochs'] * num_steps_per_epoch()\n",
    "        self.model_hparams['pct_start'] = min(0.3, self.model_hparams['warmup_steps'] / total_steps)\n",
    "\n",
    "        print(f\"{self.model_hparams['epochs']=} epochs x {num_steps_per_epoch()=} steps\")\n",
    "        \n",
    "        lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n",
    "            optimizer,\n",
    "            pct_start=self.model_hparams['pct_start'],\n",
    "            max_lr=[pg.get('lr', lr) for pg in param_groups],\n",
    "            steps_per_epoch=num_steps_per_epoch(),\n",
    "            epochs=int(self.model_hparams['epochs']),\n",
    "            final_div_factor=25\n",
    "        )\n",
    "\n",
    "        return [optimizer], [{'scheduler': lr_scheduler, 'interval': 'step'}]\n",
    "    \n",
    "    def training_step(self, train_batch, batch_idx):\n",
    "        train_logits, train_loss = self.model.forward(*train_batch)\n",
    "\n",
    "        self.log(\"train_loss\", train_loss, sync_dist=True)\n",
    "        return train_loss\n",
    "    \n",
    "    def validation_step(self, val_batch, batch_idx):\n",
    "        val_logits, val_loss = self.model.forward(*val_batch)\n",
    "\n",
    "        self.log(\"val_loss\", val_loss, sync_dist=True)\n",
    "        return val_loss\n",
    "\n",
    "    def on_validation_epoch_end(self):\n",
    "        if hasattr(self.model, 'get_metrics'):\n",
    "            self.log_dict({'metrics/'+k:v for k,v in self.model.get_metrics().items()}, sync_dist=True)\n",
    "    \n",
    "    def test_step(self, val_batch, batch_idx):\n",
    "        test_logits, test_loss = self.model.forward(*val_batch)\n",
    "\n",
    "        self.log(\"test_loss\", test_loss, sync_dist=True)\n",
    "        return test_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae232d52",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "from fastcore.script import anno_parser\n",
    "import shlex\n",
    "\n",
    "# watch out: we can only pass Python values as keyword arguments (not positional)\n",
    "# everything else has to be a string\n",
    "def parse_and_call(name, fun, args, kwargs={}, log_to_wandb=True):\n",
    "    p = anno_parser(fun)\n",
    "    args = p.parse_args(args).__dict__\n",
    "    args.pop('xtra'); args.pop('pdb')\n",
    "    args.update({k:v for k, v in kwargs.items()})\n",
    "    if log_to_wandb and type(wandb_logger.experiment.config) == wandb.sdk.wandb_config.Config:\n",
    "        wandb_logger.experiment.config[name] = {k:v for k,v in args.items() if k not in ['dataset', 'tunables']}\n",
    "    return fun(**args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4b23790e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ArgumentParser(prog='ipykernel_launcher.py', usage=None, description=None, formatter_class=<class 'fastcore.script._HelpFormatter'>, conflict_handler='error', add_help=True)"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def test_fun(a:str=None, to:int = 2, toggle:bool=True):\n",
    "    assert(a is not None)\n",
    "    print(a, to, toggle)\n",
    "parse_and_call(\"test\", test_fun, [\"--to\", \"4\"], dict(a=[]), log_to_wandb=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d8ac45a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "a 2 True\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Namespace(a=None, to=2, toggle=False, pdb=False, xtra=None)"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from fastcore.script import anno_parser\n",
    "def test_fun(a:str=None, to:int = 2, toggle:bool=True):\n",
    "    assert(a is not None)\n",
    "    print(a, to, toggle)\n",
    "test_fun(\"a\")\n",
    "anno_parser(test_fun).parse_args([])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd039c0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "qwe 2\n"
     ]
    }
   ],
   "source": [
    "def test_fun2(a:str, to:int = 2):\n",
    "    assert(a is not None)\n",
    "    print(a, to)\n",
    "\n",
    "parse_and_call(\"test\", test_fun2, [\"qwe\"], log_to_wandb=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7822542c",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "import argparse\n",
    "\n",
    "parser = argparse.ArgumentParser()\n",
    "parser.add_argument('--task', type=str, help='Task to train')\n",
    "parser.add_argument('--seed', type=int, default=0, help='Global training seed')\n",
    "parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')\n",
    "parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')\n",
    "parser.add_argument('--input-dir', type=str, default='', help='input data path') # fixed in the model for now\n",
    "parser.add_argument(\"--checkpoint-dir\", type=str, default=\"./checkpoints/\", help=\"directory to save the checkpoints\")\n",
    "parser.add_argument('--epochs', type=int, default=10, help='total training epochs')\n",
    "parser.add_argument('--validate-every-n-steps', type=int, default=500, help='how training steps to run between validations')\n",
    "parser.add_argument('--weight-decay', type=float, default=1e-2, help='optimizer weight decay')\n",
    "parser.add_argument('--lr0', type=float, default=1e-4, help='optimizer initial learning rate')\n",
    "parser.add_argument('--clip-gradient-norm', type=float, default=None, help='enable gradient norm clipping')\n",
    "parser.add_argument('--accumulate-grad-batches', type=int, default=1, help='perform the optimizer step only after going through several batches of samples')\n",
    "parser.add_argument('--precision', type=str, default=\"16-mixed\", help=\"floating point precision\")\n",
    "parser.add_argument('--warmup-steps', type=int, default=10000, help='total number steps during which the learning rate rises (defaults to 10k updates)')\n",
    "parser.add_argument('--tunables', type=str, default=\"\", help='tunable hyperparameters')\n",
    "parser.add_argument('--resume-from', type=Path, default=None, help='resume training from the given checkpoint')\n",
    "parser.add_argument('--strategy', type=str, default='ddp', help='distributed training strategy')\n",
    "parser.add_argument('--wandb-suffix', type=str, default=None, help='W&B project name suffix')\n",
    "parser.add_argument('--wandb-task-name', type=str, default=None, help='Task name for the W&B project name')\n",
    "\n",
    "args = parser.parse_args().__dict__\n",
    "\n",
    "task_args: list = shlex.split(args.pop(\"task\"))\n",
    "task_name, task_args = task_args[0], task_args[1:]\n",
    "input_args: list = shlex.split(args.pop(\"input_dir\"))\n",
    "checkpoint_dir: str = args.pop(\"checkpoint_dir\")\n",
    "num_workers: int = args.pop(\"workers\")\n",
    "batch_size: int = args.pop(\"batch_size\")\n",
    "epochs: int = args.pop(\"epochs\")\n",
    "tunables_args: list = shlex.split(args.pop(\"tunables\"))\n",
    "\n",
    "hyp_params = {}\n",
    "hyp_params['batch_size'] = batch_size\n",
    "hyp_params['warmup_steps'] = args['warmup_steps']\n",
    "hyp_params['weight_decay'] = args['weight_decay']\n",
    "hyp_params['clip_gradient_norm'] = args['clip_gradient_norm']\n",
    "hyp_params['accumulate_grad_batches'] = args['accumulate_grad_batches']\n",
    "hyp_params['precision'] = args['precision']\n",
    "hyp_params['lr0'] = args['lr0']\n",
    "hyp_params['epochs'] = epochs\n",
    "hyp_params['strategy'] = args['strategy']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a224add7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "from lightning.pytorch.loggers import WandbLogger\n",
    "from lightning.pytorch.callbacks import LearningRateMonitor\n",
    "import datetime\n",
    "import webdataset as wds\n",
    "import importlib\n",
    "\n",
    "torch.set_float32_matmul_precision('medium')\n",
    "\n",
    "project = f\"WhisperSpeech-{args['wandb_task_name'] or task_name}\"\n",
    "if args['wandb_suffix']:\n",
    "    project += \"-\"+args['wandb_suffix']\n",
    "\n",
    "wandb_logger = WandbLogger(project=project)\n",
    "\n",
    "ckpt_callback = pl.callbacks.ModelCheckpoint(\n",
    "     dirpath=f'{task_name}-{epochs}e',\n",
    "     filename=task_name+\"-{epoch}-{step}-{val_loss:.2f}\",\n",
    "     monitor=\"val_loss\",\n",
    "     save_top_k=4,\n",
    "     train_time_interval=datetime.timedelta(minutes=5),\n",
    " )\n",
    "\n",
    "lr_monitor_callback = LearningRateMonitor(logging_interval='step')\n",
    "\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "task = importlib.import_module(\"whisperspeech.\"+task_name)\n",
    "\n",
    "train_ds, val_ds = parse_and_call('dataset', task.load_datasets, input_args)\n",
    "\n",
    "tunables = None\n",
    "if hasattr(task, \"Tunables\"):\n",
    "    import dataclasses\n",
    "    tunables = parse_and_call('tunables', task.Tunables, tunables_args, log_to_wandb=False)\n",
    "    if type(wandb_logger.experiment.config) == wandb.sdk.wandb_config.Config:\n",
    "        wandb_logger.experiment.config['tunables'] = dataclasses.asdict(tunables)\n",
    "\n",
    "    for name in [\"lr0\", \"clip_gradient_norm\", \"weight_decay\", \"warmup_steps\"]:\n",
    "        val = getattr(tunables, name, None)\n",
    "        if val is not None: hyp_params[name] = val\n",
    "\n",
    "if isinstance(train_ds, torch.utils.data.IterableDataset):\n",
    "    dl_batch_size, dl_shuffle = None, False\n",
    "    pin_memory = False\n",
    "else:\n",
    "    dl_batch_size, dl_shuffle = batch_size, True\n",
    "    pin_memory = True\n",
    "\n",
    "val_loader = wds.WebLoader(val_ds,\n",
    "    batch_size=dl_batch_size,\n",
    "    num_workers=num_workers,\n",
    "    drop_last=False,\n",
    "    pin_memory=pin_memory).unbatched().shuffle(1024).batched(batch_size).with_length(val_ds.total_samples // batch_size)\n",
    "\n",
    "train_loader = wds.WebLoader(train_ds,\n",
    "    batch_size=dl_batch_size,\n",
    "    num_workers=num_workers,\n",
    "    drop_last=False,\n",
    "    shuffle=dl_shuffle,\n",
    "    pin_memory=pin_memory).unbatched().shuffle(1024).batched(batch_size).with_length(train_ds.total_samples // batch_size)\n",
    "\n",
    "model_kwargs = dict(dataset=train_ds)\n",
    "if tunables is not None: model_kwargs['tunables'] = tunables\n",
    "model = parse_and_call('model', task.make_model, task_args, model_kwargs)\n",
    "\n",
    "task = TrainingTask(model, model_hparams=hyp_params)\n",
    "\n",
    "trainer = pl.Trainer(strategy=hyp_params['strategy'],\n",
    "                  max_epochs=hyp_params['epochs'],\n",
    "                  accelerator=\"gpu\",\n",
    "                  profiler=\"simple\",\n",
    "                  precision=hyp_params['precision'],\n",
    "                  gradient_clip_val=hyp_params['clip_gradient_norm'],\n",
    "                  accumulate_grad_batches=hyp_params['accumulate_grad_batches'],\n",
    "                  val_check_interval=args.pop(\"validate_every_n_steps\"),\n",
    "                  enable_checkpointing=True,\n",
    "                  logger=wandb_logger,\n",
    "                  callbacks=[ckpt_callback, lr_monitor_callback])\n",
    "\n",
    "if type(wandb_logger.experiment.config) == wandb.sdk.wandb_config.Config:\n",
    "    wandb_logger.experiment.config.update(hyp_params)\n",
    "    \n",
    "kwargs = {}\n",
    "if 'resume_from' in args:\n",
    "    kwargs['ckpt_path'] = args['resume_from']\n",
    "trainer.fit(model=task, train_dataloaders=train_loader, val_dataloaders=val_loader, **kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00406652",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "import nbdev; nbdev.nbdev_export()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ffffe92",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
