{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2afd7255",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| default_exp modules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12e79ccb",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "from nbdev.showdoc import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7dfd417d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "import torch\n",
    "import numpy as np\n",
    "import math\n",
    "\n",
    "from torch import Tensor, nn\n",
    "import torch.nn.functional as F\n",
    "from typing import Dict, Iterable, Optional\n",
    "\n",
    "# import xformers.ops as xops"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e888d6dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "#|export\n",
    "\n",
    "# Code in this file is mostly borrowed from\n",
    "# https://github.com/openai/whisper/blob/main/whisper/model.py\n",
    "# and is under the MIT License\n",
    "\n",
    "class LayerNorm(nn.LayerNorm):\n",
    "    def forward(self, x):\n",
    "        return super().forward(x.float()).type(x.dtype)\n",
    "\n",
    "# Used in μP to initialize the weights and configure the optimizer\n",
    "# These two layers map the transformer width into a fixed dimension\n",
    "class LinearHead(nn.Linear):\n",
    "    pass\n",
    "\n",
    "class QueryHead(nn.Linear):\n",
    "    pass\n",
    "\n",
    "# based on https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L163\n",
    "def init_transformer(m):\n",
    "    if isinstance(m, (nn.Linear, nn.Embedding)):\n",
    "        torch.nn.init.trunc_normal_(m.weight, std=.02)\n",
    "        if isinstance(m, nn.Linear) and m.bias is not None:\n",
    "            torch.nn.init.constant_(m.bias, 0)\n",
    "    elif isinstance(m, nn.LayerNorm):\n",
    "        torch.nn.init.constant_(m.bias, 0)\n",
    "        torch.nn.init.constant_(m.weight, 1.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd5c1511",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "def sinusoids(length, channels, max_timescale=10000):\n",
    "    \"\"\"Returns sinusoids for positional embedding\"\"\"\n",
    "    assert channels % 2 == 0\n",
    "    log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)\n",
    "    inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))\n",
    "    scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]\n",
    "    return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09be4a04",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, n_state: int, n_head: int, qk_scale: float = 1, rope: bool = False, cross=False):\n",
    "        super().__init__()\n",
    "        self.n_state = n_state\n",
    "        self.n_head = n_head\n",
    "        self.sqrt_qk_scale = math.sqrt(qk_scale)\n",
    "        self.query = QueryHead(n_state, n_state)\n",
    "        self.key = nn.Linear(n_state, n_state, bias=False)\n",
    "        self.value = nn.Linear(n_state, n_state)\n",
    "        self.out = nn.Linear(n_state, n_state)\n",
    "        self.cross = cross\n",
    "        self.query_subsampling = 1\n",
    "        self.key_subsampling = 1\n",
    "\n",
    "        self.cached_kvx = None\n",
    "        self.register_buffer('k_cache', None)\n",
    "        self.register_buffer('v_cache', None)\n",
    "        \n",
    "        self.rotary = None\n",
    "        if rope:\n",
    "            self.rotary = Rotary(n_state // n_head)\n",
    "        self.qkv = None\n",
    "        self.kv = None\n",
    "\n",
    "    def setup_kv_cache(self, max_batch_size, max_seq_len, dtype=torch.float32):\n",
    "        cache_shape = (max_batch_size, self.n_head, max_seq_len, self.n_state//self.n_head)\n",
    "        self.k_cache = torch.zeros(cache_shape, dtype=dtype, device=self.key.weight.device)\n",
    "        self.v_cache = torch.zeros(cache_shape, dtype=dtype, device=self.value.weight.device)\n",
    "\n",
    "    def merge_linears(self, layers, mults):\n",
    "        bias = [x.bias for x in layers if x.bias is not None][0]\n",
    "        din, dout = layers[0].weight.shape\n",
    "        new = nn.Linear(din, len(layers) * dout).to(layers[0].weight.device)\n",
    "        with torch.no_grad():\n",
    "            new.weight[:] = torch.cat([x.weight * m for x,m in zip(layers, mults)])\n",
    "            new.bias[:] = torch.cat([torch.zeros_like(bias) if x.bias is None else x.bias * m for x, m in zip(layers, mults)])\n",
    "        return new\n",
    "\n",
    "    def convert_for_eval(self):\n",
    "        if self.qkv or self.kv: raise AttributeError(\"already converted\")\n",
    "        \n",
    "        self.odim = self.key.weight.shape[1]\n",
    "        if self.cross:\n",
    "            self.q = self.merge_linears([self.query], [self.sqrt_qk_scale])\n",
    "            self.kv = self.merge_linears([self.key, self.value],\n",
    "                                         [self.sqrt_qk_scale, 1])\n",
    "        else:\n",
    "            self.qkv = self.merge_linears([self.query, self.key, self.value],\n",
    "                                          [self.sqrt_qk_scale, self.sqrt_qk_scale, 1])\n",
    "        \n",
    "    def split_heads(self, x, x_positions, rope=False, subsampling=1):\n",
    "        x = x.view(*x.shape[:2], self.n_head, -1)\n",
    "        if rope:\n",
    "            x = rope_rotate(x, x_positions * subsampling, *self.rotary(x))\n",
    "        return x.permute(0, 2, 1, 3)\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        qx,\n",
    "        q_positions,\n",
    "        kvx,\n",
    "        kv_positions,\n",
    "        causal = False,\n",
    "        mask=None,\n",
    "    ):\n",
    "        if self.qkv:\n",
    "            q,k,v = self.qkv(qx).split(self.odim, dim=-1)\n",
    "        elif self.kv:\n",
    "            q = self.q(qx)\n",
    "            k,v = self.kv(kvx).split(self.odim, dim=-1)\n",
    "        else:\n",
    "            q,k,v = None,None,None\n",
    "        \n",
    "        if q is None: q = self.query(qx) * self.sqrt_qk_scale\n",
    "        q = self.split_heads(q, q_positions, rope = self.rotary, subsampling = self.query_subsampling)\n",
    "\n",
    "        if kvx is not self.cached_kvx:\n",
    "            if k is None: k = self.key(kvx) * self.sqrt_qk_scale\n",
    "            k = self.split_heads(k, kv_positions, rope = self.rotary, subsampling = self.key_subsampling)\n",
    "            if v is None: v = self.value(kvx)\n",
    "            v = self.split_heads(v, kv_positions)\n",
    "            if self.k_cache is not None:\n",
    "                self.k_cache[:,:,kv_positions] = k\n",
    "                self.v_cache[:,:,kv_positions] = v\n",
    "\n",
    "        if self.k_cache is not None:\n",
    "            k, v = self.k_cache, self.v_cache\n",
    "\n",
    "        if mask is not None:\n",
    "            mask = mask[q_positions]\n",
    "            \n",
    "        wv = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0, is_causal=causal)\n",
    "        \n",
    "        return self.out(wv.permute(0, 2, 1, 3).flatten(start_dim=2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e6f6750",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| exporti\n",
    "# modified from https://blog.eleuther.ai/rotary-embeddings/\n",
    "\n",
    "import torch\n",
    "\n",
    "class Rotary(torch.nn.Module):\n",
    "    def __init__(self, dim, base=10000):\n",
    "        super().__init__()\n",
    "        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))\n",
    "        self.register_buffer(\"inv_freq\", inv_freq)\n",
    "        self.seq_len_cached = None\n",
    "        self.cos_cached = None\n",
    "        self.sin_cached = None\n",
    "\n",
    "    def forward(self, x, seq_dim=1):\n",
    "        seq_len = x.shape[seq_dim]\n",
    "        if not self.seq_len_cached or seq_len > self.seq_len_cached:\n",
    "            self.seq_len_cached = 2500\n",
    "            # self.seq_len_cached = seq_len\n",
    "            \n",
    "            t = torch.arange(self.seq_len_cached, device=x.device).type_as(self.inv_freq)\n",
    "            freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n",
    "            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n",
    "            self.cos_cached = emb.cos()[None, :, None, :]\n",
    "            self.sin_cached = emb.sin()[None, :, None, :]\n",
    "        return self.cos_cached, self.sin_cached\n",
    "\n",
    "\n",
    "# rotary pos emb helpers:\n",
    "def rotate_half(x):\n",
    "    x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :]\n",
    "    return torch.cat(\n",
    "        (-x2, x1), dim=len(x.shape)-1\n",
    "    )\n",
    "\n",
    "def rope_rotate(x, positions, cos, sin):\n",
    "    return x * cos[:,positions] + rotate_half(x) * sin[:,positions]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f805216b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class ResidualAttentionBlock(nn.Module):\n",
    "    def __init__(self, n_state: int, n_head: int, cross_attention: bool = False, rope: bool = False,\n",
    "                 qk_scale: float = 1, ffn_mult: int = 4):\n",
    "        super().__init__()\n",
    "        self.attn = MultiHeadAttention(n_state, n_head, qk_scale=qk_scale, rope=rope)\n",
    "        self.attn_ln = LayerNorm(n_state)\n",
    "\n",
    "        self.cross_attn = (\n",
    "            MultiHeadAttention(n_state, n_head, qk_scale=qk_scale, rope=rope, cross=True) if cross_attention else None\n",
    "        )\n",
    "        self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None\n",
    "\n",
    "        n_mlp = n_state * ffn_mult\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(n_state, n_mlp), nn.GELU(), nn.Linear(n_mlp, n_state)\n",
    "        )\n",
    "        self.mlp_ln = LayerNorm(n_state)\n",
    "    \n",
    "    def setup_kv_cache(self, max_batch_size, max_seq_len, max_cross_seq_len=None):\n",
    "        self.attn.setup_kv_cache(max_batch_size, max_seq_len)\n",
    "        if self.cross_attn:\n",
    "            self.cross_attn.setup_kv_cache(max_batch_size, max_cross_seq_len)\n",
    "    \n",
    "    def forward(\n",
    "        self,\n",
    "        x: Tensor,\n",
    "        x_positions: Tensor = None,\n",
    "        xa: Optional[Tensor] = None,\n",
    "        xa_positions: Optional[Tensor] = None,\n",
    "        causal = False,\n",
    "        mask=None,\n",
    "    ):\n",
    "        lnx = self.attn_ln(x)\n",
    "        x = x + self.attn(lnx, x_positions, lnx, x_positions, causal=causal, mask=mask)\n",
    "        if self.cross_attn:\n",
    "            lnx = self.cross_attn_ln(x)\n",
    "            x = x + self.cross_attn(lnx, x_positions, xa, xa_positions)\n",
    "        x = x + self.mlp(self.mlp_ln(x))\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bcd06a98-a79b-4e6b-a5b9-61b858a9ff12",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class BaseDecoder(nn.Module):\n",
    "    def __init__(self, depth=6, n_head=6, width=384, qk_scale=1, ffn_mult=4, length=2250, rope=False):\n",
    "        super().__init__()\n",
    "        self.length = length\n",
    "        self.width = width\n",
    "        self.layers = nn.ModuleList([\n",
    "            ResidualAttentionBlock(\n",
    "                self.width, n_head, qk_scale=qk_scale, ffn_mult=ffn_mult, cross_attention=True, rope=rope\n",
    "            ) for _ in range(math.floor(depth))\n",
    "        ])\n",
    "\n",
    "        self.ln_post = LayerNorm(width)\n",
    "        \n",
    "        mask = torch.empty(length, length).fill_(-torch.inf).triu_(1)\n",
    "        self.register_buffer(\"mask\", mask, persistent=False)\n",
    "\n",
    "    def forward(self, x, x_positions, xenc, xenc_positions):\n",
    "        for i,l in enumerate(self.layers):\n",
    "            x = l(x, x_positions, xenc, xenc_positions, causal=False, mask=self.mask)\n",
    "\n",
    "        x = self.ln_post(x)\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "efbc6c73-f3a0-4f0a-84be-ecee909bb524",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class EmbeddingProjector(nn.Linear):\n",
    "    pass\n",
    "\n",
    "class FlexEmbeddings(nn.Module):\n",
    "    def __init__(self, codes, width, special_codes=None, frozen_width=None, special_embedding=None, unembed=True):\n",
    "        super().__init__()\n",
    "        self.codes = codes\n",
    "        self.special_codes = special_codes\n",
    "        if frozen_width is None: frozen_width = width\n",
    "        \n",
    "        self.main = nn.Embedding(codes, frozen_width or width)\n",
    "        self.emb_to_hidden = EmbeddingProjector(frozen_width, width) if frozen_width != width else None\n",
    "        self.hidden_to_emb = EmbeddingProjector(width, frozen_width) if unembed and frozen_width != width else None\n",
    "        if special_codes:\n",
    "            self.special = special_embedding or nn.Embedding(special_codes, width)\n",
    "            \n",
    "        self.register_buffer('merged_in', None)\n",
    "        self.register_buffer('merged_out', None)\n",
    "        self.register_buffer('bias_out', None)\n",
    "    \n",
    "    def set_frozen_embeddings(self, values):\n",
    "        with torch.no_grad():\n",
    "            self.main.weight[:] = values\n",
    "            self.main.lr_scale = 0\n",
    "    \n",
    "    @torch.no_grad()\n",
    "    def convert_for_eval(self):\n",
    "        if not self.special_codes: return\n",
    "        # in\n",
    "        main_w = self.main.weight\n",
    "        if self.emb_to_hidden is not None: main_w = self.emb_to_hidden(main_w)\n",
    "        weight = torch.cat([main_w, self.special.weight], dim=0)\n",
    "        self.merged_in = nn.Embedding(*weight.shape, _weight=weight)\n",
    "        \n",
    "        # out\n",
    "        weight = self.main.weight\n",
    "        if self.hidden_to_emb: weight = weight @ self.hidden_to_emb.weight\n",
    "        self.merged_out = torch.cat([weight.T, self.special.weight.T], dim=1).T.contiguous() # T is for F.linear\n",
    "        if self.hidden_to_emb:\n",
    "            self.bias_out = torch.cat([\n",
    "                self.hidden_to_emb.bias @ self.main.weight.T,\n",
    "                torch.zeros(self.special.weight.shape[0], device=weight.device, dtype=weight.dtype)\n",
    "            ], dim=0)\n",
    "        else:\n",
    "            self.bias_out = None\n",
    "\n",
    "    def forward(self, toks):\n",
    "        if not self.training and self.merged_in is not None:\n",
    "            return self.merged_in(toks)\n",
    "        \n",
    "        if self.special_codes:\n",
    "            special_mask = toks >= self.codes\n",
    "            embs = self.main(torch.where(special_mask, 0, toks))\n",
    "        else:\n",
    "            embs = self.main(toks)\n",
    "        \n",
    "        if self.emb_to_hidden: embs = self.emb_to_hidden(embs)\n",
    "        \n",
    "        if self.special_codes:\n",
    "            embs[special_mask] = self.special(toks[special_mask] - self.codes).to(embs.dtype)\n",
    "        \n",
    "        return embs\n",
    "    \n",
    "    def unembed(self, embs):\n",
    "        if not self.training and self.merged_out is not None:\n",
    "            return F.linear(embs, self.merged_out, self.bias_out) # embs @ self.merged_out + self.bias_out\n",
    "\n",
    "        orig_embs = embs\n",
    "        if self.hidden_to_emb: embs = self.hidden_to_emb(embs)\n",
    "        \n",
    "        main_logits = (embs @ self.main.weight.to(embs.dtype).T).float()\n",
    "        \n",
    "        if not self.special_codes:\n",
    "            return main_logits\n",
    "        \n",
    "        special_logits = (orig_embs @ self.special.weight.to(orig_embs.dtype).T).float()\n",
    "        return torch.cat([main_logits, special_logits], dim=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00406652",
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "import nbdev; nbdev.nbdev_export()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8fa6ef35",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
