{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%run decoder.ipynb\n",
    "\n",
    "import torch\n",
    "from transformers import WhisperForConditionalGeneration\n",
    "\n",
    "pretrained = WhisperForConditionalGeneration.from_pretrained(\n",
    "    'models/whisper-small').model.decoder\n",
    "decoder = load_decoder(pretrained)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(True), tensor(True), tensor(True))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def forward_atten(self, x, cache_kv):\n",
    "    q = self.q(x).reshape(1, 1, 12, 64).transpose(1, 2) * 0.125\n",
    "    k = self.k(x).reshape(1, -1, 12, 64).transpose(1, 2)\n",
    "    v = self.v(x).reshape(1, -1, 12, 64).transpose(1, 2)\n",
    "\n",
    "    if cache_kv:\n",
    "        k = torch.cat([cache_kv[0], k], dim=2)\n",
    "        v = torch.cat([cache_kv[1], v], dim=2)\n",
    "\n",
    "    cache_kv = k, v\n",
    "\n",
    "    q = q.reshape(12, -1, 64)\n",
    "    k = k.reshape(12, -1, 64)\n",
    "    v = v.reshape(12, -1, 64)\n",
    "\n",
    "    atten = q.bmm(k.transpose(1, 2)).softmax(dim=-1).bmm(v)\n",
    "\n",
    "    atten = atten.reshape(1, 12, 1, 64).transpose(1, 2).reshape(1, 1, 768)\n",
    "\n",
    "    atten = self.out(atten)\n",
    "\n",
    "    return atten, cache_kv\n",
    "\n",
    "\n",
    "x = torch.randn(1, 1, 768)\n",
    "cache_kv = torch.randn(1, 12, 2, 64), torch.randn(1, 12, 2, 64)\n",
    "\n",
    "out1, (k1, v1) = forward_atten(decoder.layer[0].atten, x, cache_kv)\n",
    "out2, _, (k2, v2) = pretrained.layers[0].self_attn(x, past_key_value=cache_kv)\n",
    "\n",
    "(out1 == out2).all(), (k1 == k2).all(), (v1 == v2).all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(True), tensor(True), tensor(True))"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def forward_cross_atten(self, x, kv, cache_kv):\n",
    "    q = self.q(x).reshape(1, 1, 12, 64).transpose(1, 2) * 0.125\n",
    "    if cache_kv:\n",
    "        k, v = cache_kv\n",
    "    else:\n",
    "        k = self.k(kv).reshape(1, -1, 12, 64).transpose(1, 2)\n",
    "        v = self.v(kv).reshape(1, -1, 12, 64).transpose(1, 2)\n",
    "        cache_kv = k, v\n",
    "\n",
    "    q = q.reshape(12, -1, 64)\n",
    "    k = k.reshape(12, -1, 64)\n",
    "    v = v.reshape(12, -1, 64)\n",
    "\n",
    "    atten = q.bmm(k.transpose(1, 2)).softmax(dim=-1).bmm(v).reshape(\n",
    "        1, 12, 1, 64).transpose(1, 2).reshape(1, 1, 768)\n",
    "\n",
    "    atten = self.out(atten)\n",
    "\n",
    "    return atten, cache_kv\n",
    "\n",
    "\n",
    "x = torch.randn(1, 1, 768)\n",
    "kv = torch.randn(1, 1500, 768)\n",
    "cache_kv = torch.randn(1, 12, 1500, 64), torch.randn(1, 12, 1500, 64)\n",
    "\n",
    "out1, (k1, v1) = forward_cross_atten(decoder.layer[0].cross_atten, x, kv,\n",
    "                                     cache_kv)\n",
    "out2, _, (k2, v2) = pretrained.layers[0].encoder_attn(x, kv, cache_kv)\n",
    "\n",
    "(out1 == out2).all(), (k1 == k2).all(), (v1 == v2).all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(True), tensor(True), tensor(True), tensor(True), tensor(True))"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def forward_layer(self, x, kv, cache_kv):\n",
    "    next_cache_kv = ()\n",
    "\n",
    "    res = x\n",
    "    x, _cache = forward_atten(self.atten,\n",
    "                              self.norm1(x),\n",
    "                              cache_kv=cache_kv[:2] if cache_kv else None)\n",
    "    x = x + res\n",
    "    next_cache_kv += _cache\n",
    "\n",
    "    res = x\n",
    "    x, _cache = forward_cross_atten(\n",
    "        self.cross_atten,\n",
    "        self.norm2(x),\n",
    "        kv,\n",
    "        cache_kv=cache_kv[2:] if cache_kv else None)\n",
    "    x = x + res\n",
    "    next_cache_kv += _cache\n",
    "\n",
    "    return self.s(x) + x, next_cache_kv\n",
    "\n",
    "\n",
    "x = torch.randn(1, 1, 768)\n",
    "kv = torch.randn(1, 1500, 768)\n",
    "cache_kv = torch.randn(1, 12, 2, 64), torch.randn(1, 12, 2, 64), torch.randn(\n",
    "    1, 12, 1500, 64), torch.randn(1, 12, 1500, 64)\n",
    "\n",
    "out1, (k1, v1, ck1, cv1) = forward_layer(decoder.layer[0], x, kv, cache_kv)\n",
    "out2, (k2, v2, ck2, cv2) = pretrained.layers[0](x,\n",
    "                                                encoder_hidden_states=kv,\n",
    "                                                past_key_value=cache_kv)\n",
    "\n",
    "(out1 == out2).all(), (k1 == k2).all(), (v1 == v2).all(), (ck1 == ck2).all(), (\n",
    "    cv1 == cv2).all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n",
      "0 tensor(True)\n",
      "1 tensor(True)\n",
      "2 tensor(True)\n",
      "3 tensor(True)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor(True)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def forward_decoder(self, x, kv, cache_kv):\n",
    "    pos_offset = cache_kv[0][0].shape[2] if cache_kv else 0\n",
    "\n",
    "    x = self.embed(x) + self.embed_pos.weight[pos_offset:pos_offset +\n",
    "                                              x.shape[1]]\n",
    "\n",
    "    next_cache_kv = []\n",
    "    for i, layer in enumerate(self.layer):\n",
    "        x, _cache = forward_layer(layer, x, kv,\n",
    "                                  cache_kv[i] if cache_kv else None)\n",
    "        next_cache_kv.append(_cache)\n",
    "\n",
    "    return self.norm(x), tuple(next_cache_kv)\n",
    "\n",
    "\n",
    "x = torch.LongTensor([[95]])\n",
    "kv = torch.randn(1, 1500, 768)\n",
    "\n",
    "cache_kv = [(torch.randn(1, 12, 2, 64), torch.randn(1, 12, 2, 64),\n",
    "             torch.randn(1, 12, 1500, 64), torch.randn(1, 12, 1500, 64))] * 12\n",
    "cache_kv = tuple(cache_kv)\n",
    "\n",
    "out1, cache1 = forward_decoder(decoder, x, kv, cache_kv)\n",
    "out2 = pretrained(x,\n",
    "                  encoder_hidden_states=kv,\n",
    "                  past_key_values=cache_kv,\n",
    "                  use_cache=True,\n",
    "                  output_attentions=False,\n",
    "                  output_hidden_states=False,\n",
    "                  return_dict=True)\n",
    "\n",
    "for i, j in zip(cache1, out2.past_key_values):\n",
    "    for t in range(4):\n",
    "        print(t, (i[t] == j[t]).all())\n",
    "\n",
    "(out1 == out2.last_hidden_state).all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "simple",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
