{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/miniconda3/envs/serve/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from transformers import GPT2LMHeadModel, AutoTokenizer\n",
    "\n",
    "model = GPT2LMHeadModel.from_pretrained(\"gpt2\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n",
    "tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "tokenizer.pad_token = tokenizer.eos_token"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Inference time: 1.781\n",
      "tensor([ 464, 3139,  286, 4881,  220, 1849,  271,  262, 3139,  286,  262, 4141,\n",
      "        2066,   13,  383, 4141, 2066,  318,  257, 1181,  286,  262, 1242, 2422,\n",
      "         290, 3034, 1080,   13,  383, 4141, 2066,  318,  257, 1181,  286,  262,\n",
      "        1242, 2422,  290, 3034, 1080,   13,  383, 4141, 2066,  318,  257, 1181,\n",
      "         286,  262, 1242, 2422,  290, 3034, 1080])\n"
     ]
    }
   ],
   "source": [
    "encoded = tokenizer(\"The capital of France \", return_tensors=\"pt\")\n",
    "import time\n",
    "st = time.perf_counter()\n",
    "generate_output = model.generate(**encoded, use_cache=True, return_dict_in_generate=True, max_new_tokens=50)\n",
    "print(f\"Inference time: {time.perf_counter()-st:.3f}\")\n",
    "print(generate_output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 464, 3139,  286, 4881,  220]]), 'attention_mask': tensor([[1, 1, 1, 1, 1]])}\n",
      "tensor([ 464, 3139,  286, 4881,  220, 1849])\n"
     ]
    }
   ],
   "source": [
    "model_config={\n",
    "    \"use_cache\":True,\n",
    "    \"return_dict_in_generate\":True,\n",
    "    \"max_new_tokens\":1,\n",
    "}\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "old_update = model._update_model_kwargs_for_generation\n",
    "extracted = {}\n",
    "import types\n",
    "def new_func(self,*args, **kwargs):\n",
    "    extracted[\"past_key_values\"] = args[0][\"past_key_values\"]\n",
    "    return old_update(*args, **kwargs)\n",
    "\n",
    "model._update_model_kwargs_for_generation = types.MethodType(new_func, model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 5, 64]\n",
      "tensor([ 464, 3139,  286, 4881,  220, 1849])\n"
     ]
    }
   ],
   "source": [
    "\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([ 464, 3139,  286, 4881,  220, 1849,  271])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "encoded = {\n",
    "    \"input_ids\": output.sequences,\n",
    "    \"attention_mask\": torch.concat((encoded[\"attention_mask\"], torch.ones((1,1), dtype=torch.int64)), dim=1),\n",
    "    \"past_key_values\": extracted[\"past_key_values\"],\n",
    "}\n",
    "# print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 5, 64]\n",
      "[12, 2, 1, 12, 6, 64]\n",
      "[12, 2, 1, 12, 7, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 8, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 9, 64]\n",
      "[12, 2, 1, 12, 10, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 11, 64]\n",
      "[12, 2, 1, 12, 12, 64]\n",
      "[12, 2, 1, 12, 13, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 14, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 15, 64]\n",
      "[12, 2, 1, 12, 16, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 17, 64]\n",
      "[12, 2, 1, 12, 18, 64]\n",
      "[12, 2, 1, 12, 19, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 20, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 21, 64]\n",
      "[12, 2, 1, 12, 22, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 23, 64]\n",
      "[12, 2, 1, 12, 24, 64]\n",
      "[12, 2, 1, 12, 25, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 26, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 27, 64]\n",
      "[12, 2, 1, 12, 28, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 29, 64]\n",
      "[12, 2, 1, 12, 30, 64]\n",
      "[12, 2, 1, 12, 31, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 32, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 33, 64]\n",
      "[12, 2, 1, 12, 34, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 35, 64]\n",
      "[12, 2, 1, 12, 36, 64]\n",
      "[12, 2, 1, 12, 37, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 38, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 39, 64]\n",
      "[12, 2, 1, 12, 40, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 41, 64]\n",
      "[12, 2, 1, 12, 42, 64]\n",
      "[12, 2, 1, 12, 43, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 44, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 45, 64]\n",
      "[12, 2, 1, 12, 46, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 47, 64]\n",
      "[12, 2, 1, 12, 48, 64]\n",
      "[12, 2, 1, 12, 49, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 50, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 51, 64]\n",
      "[12, 2, 1, 12, 52, 64]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 53, 64]\n",
      "[12, 2, 1, 12, 54, 64]\n",
      "Inference time: 1.860\n",
      "tensor([ 464, 3139,  286, 4881,  220, 1849,  271,  262, 3139,  286,  262, 4141,\n",
      "        2066,   13,  383, 4141, 2066,  318,  257, 1181,  286,  262, 1242, 2422,\n",
      "         290, 3034, 1080,   13,  383, 4141, 2066,  318,  257, 1181,  286,  262,\n",
      "        1242, 2422,  290, 3034, 1080,   13,  383, 4141, 2066,  318,  257, 1181,\n",
      "         286,  262, 1242, 2422,  290, 3034, 1080])\n"
     ]
    }
   ],
   "source": [
    "encoded = tokenizer(\"The capital of France \", return_tensors=\"pt\")\n",
    "st = time.perf_counter()\n",
    "for _ in range(50):\n",
    "    output = model.generate(**encoded, **model_config)\n",
    "    encoded = {\n",
    "        \"input_ids\": output.sequences,\n",
    "        \"attention_mask\": torch.concat((encoded[\"attention_mask\"], torch.ones((1,1), dtype=torch.int64)), dim=1),\n",
    "        \"past_key_values\": extracted[\"past_key_values\"],\n",
    "    }\n",
    "    print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(f\"Inference time: {time.perf_counter()-st:.3f}\")\n",
    "print(output.sequences[0])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert all(generate_output.sequences[0] == output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[  464,  3139,   286,  4881,   318,   220],\n",
      "        [50256, 32423, 49696,   457, 38863, 18042]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1],\n",
      "        [0, 1, 1, 1, 1, 1]])}\n"
     ]
    }
   ],
   "source": [
    "tokenizer.padding_side=\"left\"\n",
    "encoded = tokenizer([\"The capital of France is \", \"Die Hauptstadt von\"], return_tensors=\"pt\", padding=\"longest\")\n",
    "print(encoded)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 2, 12, 6, 64]\n",
      "tensor([ 464, 3139,  286, 4881,  318,  220, 1849])\n",
      "tensor([50256, 32423, 49696,   457, 38863, 18042,   509])\n"
     ]
    }
   ],
   "source": [
    "output = model.generate(**encoded, **model_config)\n",
    "print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(output.sequences[0])\n",
    "print(output.sequences[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "import copy\n",
    "padded_kv_cache = copy.deepcopy(extracted[\"past_key_values\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([32423, 49696,   457, 38863, 18042,   509])\n"
     ]
    }
   ],
   "source": [
    "encoded = tokenizer([\"Die Hauptstadt von\"], return_tensors=\"pt\")\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 1, 12, 5, 64]\n"
     ]
    }
   ],
   "source": [
    "def print_kv_dims(kv):\n",
    "    print([len(kv), len(kv[0])] + list(kv[0][0].size()))\n",
    "print_kv_dims(extracted[\"past_key_values\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 2, 2, 12, 6, 64]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "print_kv_dims(padded_kv_cache)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00],\n",
      "        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00],\n",
      "        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00],\n",
      "        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,\n",
      "          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00],\n",
      "        [-4.7684e-07,  1.1921e-06,  2.3842e-07,  1.1921e-07,  5.3644e-07,\n",
      "         -2.3842e-07,  1.1921e-06,  0.0000e+00, -5.9605e-07,  8.9407e-08,\n",
      "         -8.9407e-08,  1.1921e-07, -6.7055e-08,  3.5763e-07,  4.7684e-07,\n",
      "          2.3842e-07, -7.1526e-07,  2.9802e-07,  4.7684e-07, -4.7684e-07,\n",
      "          4.7684e-07, -4.4703e-08, -5.9605e-07,  1.1921e-07,  0.0000e+00,\n",
      "          1.7881e-07, -1.7881e-07, -7.7486e-07,  2.3842e-07,  1.1921e-07,\n",
      "          7.1526e-07, -1.1921e-07, -7.1526e-07,  5.9605e-08,  5.3644e-07,\n",
      "          2.3842e-07,  4.7684e-07, -5.9605e-07, -3.5763e-07,  5.9605e-08,\n",
      "          4.1723e-07,  3.5763e-07,  1.1921e-06, -2.3842e-07,  8.9407e-07,\n",
      "          9.5367e-07, -4.1723e-07,  2.3842e-07,  7.4506e-08, -4.7684e-07,\n",
      "          2.0862e-07, -5.9605e-08,  1.1921e-07, -3.5763e-07, -1.7881e-07,\n",
      "          9.5367e-07, -8.3074e-07, -3.3528e-08, -2.9802e-07, -3.5763e-07,\n",
      "          1.1921e-07,  1.7881e-07, -2.6822e-07, -9.5367e-07]])\n"
     ]
    }
   ],
   "source": [
    "print(extracted[\"past_key_values\"][0][0][0,0,...] - padded_kv_cache[0][0][1,0,1:,:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-4.7684e-07,  1.1921e-06,  2.3842e-07,  1.1921e-07,  5.3644e-07,\n",
      "        -2.3842e-07,  1.1921e-06,  0.0000e+00, -5.9605e-07,  8.9407e-08,\n",
      "        -8.9407e-08,  1.1921e-07, -6.7055e-08,  3.5763e-07,  4.7684e-07,\n",
      "         2.3842e-07, -7.1526e-07,  2.9802e-07,  4.7684e-07, -4.7684e-07,\n",
      "         4.7684e-07, -4.4703e-08, -5.9605e-07,  1.1921e-07,  0.0000e+00,\n",
      "         1.7881e-07, -1.7881e-07, -7.7486e-07,  2.3842e-07,  1.1921e-07,\n",
      "         7.1526e-07, -1.1921e-07, -7.1526e-07,  5.9605e-08,  5.3644e-07,\n",
      "         2.3842e-07,  4.7684e-07, -5.9605e-07, -3.5763e-07,  5.9605e-08,\n",
      "         4.1723e-07,  3.5763e-07,  1.1921e-06, -2.3842e-07,  8.9407e-07,\n",
      "         9.5367e-07, -4.1723e-07,  2.3842e-07,  7.4506e-08, -4.7684e-07,\n",
      "         2.0862e-07, -5.9605e-08,  1.1921e-07, -3.5763e-07, -1.7881e-07,\n",
      "         9.5367e-07, -8.3074e-07, -3.3528e-08, -2.9802e-07, -3.5763e-07,\n",
      "         1.1921e-07,  1.7881e-07, -2.6822e-07, -9.5367e-07])\n",
      "tensor([-1.9769,  2.8057,  1.7984,  1.7875,  0.5844,  2.1871,  1.4393, -0.5568,\n",
      "        -0.9254, -0.3672,  0.2673,  1.1119, -0.0763,  1.2123, -1.3547,  0.5947,\n",
      "         0.7469, -0.6633,  1.7078, -0.8085, -1.6846,  0.0351, -1.0112, -0.9357,\n",
      "         0.3067, -0.8318, -0.5093, -0.7956, -0.5246, -1.0272,  0.7018, -0.6455,\n",
      "        -2.2052,  0.5388,  0.8386, -0.5252,  1.3803,  1.6268,  1.2225, -0.5823,\n",
      "         0.5009,  1.0283, -1.7727,  1.0943,  0.5510, -2.3148,  0.8457, -1.1288,\n",
      "         0.0967,  1.5846,  0.4326, -0.2651,  1.6881,  0.5560, -0.3775,  2.8351,\n",
      "        -0.0109,  0.0034,  0.8708,  0.7571, -1.3306, -0.8162,  0.2832,  2.1278])\n",
      "tensor([-1.9769,  2.8057,  1.7984,  1.7875,  0.5844,  2.1871,  1.4393, -0.5568,\n",
      "        -0.9254, -0.3672,  0.2673,  1.1119, -0.0763,  1.2123, -1.3547,  0.5947,\n",
      "         0.7469, -0.6633,  1.7078, -0.8085, -1.6846,  0.0351, -1.0112, -0.9357,\n",
      "         0.3067, -0.8318, -0.5093, -0.7956, -0.5246, -1.0272,  0.7018, -0.6455,\n",
      "        -2.2052,  0.5388,  0.8386, -0.5252,  1.3803,  1.6268,  1.2225, -0.5823,\n",
      "         0.5009,  1.0283, -1.7727,  1.0943,  0.5510, -2.3148,  0.8457, -1.1288,\n",
      "         0.0967,  1.5846,  0.4326, -0.2651,  1.6881,  0.5560, -0.3775,  2.8351,\n",
      "        -0.0109,  0.0034,  0.8708,  0.7571, -1.3306, -0.8162,  0.2832,  2.1278])\n"
     ]
    }
   ],
   "source": [
    "print(extracted[\"past_key_values\"][0][0][0,0,-1] - padded_kv_cache[0][0][1,0,-1,:])\n",
    "print(extracted[\"past_key_values\"][0][0][0,0,-1])\n",
    "print(padded_kv_cache[0][0][1,0,-1,:])\n",
    "# Could the difference be the leakiness of the attention mask in the attention block? mask is not binary but 1 and float32.min\n",
    "# see https://github.com/huggingface/transformers/blob/c3ecf2d95d6a9f614d968af2f8b4e317f381e5ec/src/transformers/models/gpt2/modeling_gpt2.py#L823C82-L823C82"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 464, 3139,  286, 4881],\n",
      "        [ 464, 3139,  286, 4881]]), 'attention_mask': tensor([[1, 1, 1, 1],\n",
      "        [1, 1, 1, 1]])}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([ 464, 3139,  286, 4881,   11, 6342,   11,  318, 1363])\n",
      "tensor([ 464, 3139,  286, 4881,   11, 6342,   11,  318, 1363])\n",
      "{'input_ids': tensor([[50256, 50256,   464,  3139,   286,  4881]]), 'attention_mask': tensor([[0, 0, 1, 1, 1, 1]])}\n",
      "tensor([50256, 50256,   464,  3139,   286,  4881,    11,  6342,    11,   318,\n",
      "         1363])\n",
      "The capital of France, Paris, is home\n"
     ]
    }
   ],
   "source": [
    "model_config[\"max_new_tokens\"]=5\n",
    "encoded = tokenizer([\"The capital of France\", \"The capital of France\"], return_tensors=\"pt\")\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])\n",
    "print(output.sequences[1])\n",
    "\n",
    "encoded = tokenizer([\"The capital of France\"], return_tensors=\"pt\", max_length=6, padding='max_length', truncation=True)\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])\n",
    "print(tokenizer.decode(output.sequences[0],skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Inference time: 0.414\n",
      "tensor([ 464, 3139,  286, 4881,   11, 6342,   11,  318, 1363])\n",
      "tensor([ 464, 3139,  286, 4881,   11, 6342,   11,  318, 1363])\n"
     ]
    }
   ],
   "source": [
    "model_config[\"max_new_tokens\"]=1\n",
    "encoded = tokenizer([\"The capital of France\", \"The capital of France\"], return_tensors=\"pt\")\n",
    "st = time.perf_counter()\n",
    "for _ in range(5):\n",
    "    output = model.generate(**encoded, **model_config)\n",
    "    encoded = {\n",
    "        \"input_ids\": output.sequences,\n",
    "        \"attention_mask\": torch.concat((encoded[\"attention_mask\"], torch.ones((2,1), dtype=torch.int64)), dim=1),\n",
    "        \"past_key_values\": extracted[\"past_key_values\"],\n",
    "    }\n",
    "    # print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(f\"Inference time: {time.perf_counter()-st:.3f}\")\n",
    "print(output.sequences[0])\n",
    "print(output.sequences[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[50256, 50256, 50256, 50256, 50256,   464,  3139,   286,  4881],\n",
      "        [  464,  3139,   286,  4881,    11,  6342,    11,   318,  1363]]), 'attention_mask': tensor([[0, 0, 0, 0, 0, 1, 1, 1, 1],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([50256, 50256, 50256, 50256, 50256,   464,  3139,   286,  4881,    11,\n",
      "         6342,    11,   318,  1363])\n",
      "tensor([ 464, 3139,  286, 4881,   11, 6342,   11,  318, 1363,  284,  262,  995,\n",
      "         338, 4387])\n",
      "{'input_ids': tensor([[50256, 50256,   464,  3139,   286,  4881]]), 'attention_mask': tensor([[0, 0, 1, 1, 1, 1]])}\n",
      "tensor([50256, 50256,   464,  3139,   286,  4881,    11,  6342,    11,   318,\n",
      "         1363])\n",
      "The capital of France, Paris, is home\n"
     ]
    }
   ],
   "source": [
    "model_config[\"max_new_tokens\"]=5\n",
    "encoded = tokenizer([\"The capital of France\", \"The capital of France, Paris, is home\"], return_tensors=\"pt\", padding=True)\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])\n",
    "print(output.sequences[1])\n",
    "\n",
    "encoded = tokenizer([\"The capital of France\"], return_tensors=\"pt\", max_length=6, padding='max_length', truncation=True)\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])\n",
    "print(tokenizer.decode(output.sequences[0],skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "padded_kv_cache = copy.deepcopy(extracted[\"past_key_values\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/miniconda3/envs/serve/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "/home/ubuntu/miniconda3/envs/serve/lib/python3.10/site-packages/accelerate/utils/imports.py:245: UserWarning: Intel Extension for PyTorch 2.0 needs to work with PyTorch 2.0.*, but PyTorch 2.2.0.dev20230922+cu118 is found. Please switch to the matching version and run again.\n",
      "  warnings.warn(\n",
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:01<00:00,  1.09it/s]\n",
      "/home/ubuntu/miniconda3/envs/serve/lib/python3.10/site-packages/transformers/generation/utils.py:1353: UserWarning: Using `max_length`'s default (20) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
      "  warnings.warn(\n",
      "/home/ubuntu/miniconda3/envs/serve/lib/python3.10/site-packages/transformers/generation/utils.py:1452: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[   1,  450, 7483,  310, 3444]]), 'attention_mask': tensor([[1, 1, 1, 1, 1]])}\n",
      "tensor([    1,   450,  7483,   310,  3444, 29892,  3681, 29892,   338,   263,\n",
      "         4272,   310,  6017,   749, 29892,  1616, 29892,   322,  9257, 29889])\n",
      "The capital of France, Paris, is a city of romance, art, and culture.\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-hf')\n",
    "tokenizer.pad_token_id = tokenizer.eos_token_id\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    'meta-llama/Llama-2-7b-hf',\n",
    "    device_map=\"balanced\",\n",
    "    low_cpu_mem_usage=True,\n",
    "    torch_dtype=torch.float16,\n",
    "    load_in_8bit=True,\n",
    "    )\n",
    "\n",
    "encoded = tokenizer([\"The capital of France\"], return_tensors=\"pt\", return_token_type_ids=False)\n",
    "print(encoded)\n",
    "output = model.generate(**encoded, use_cache=True, return_dict_in_generate=True)\n",
    "print(output.sequences[0])\n",
    "print(tokenizer.decode(output.sequences[0],skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "old_update = model._update_model_kwargs_for_generation\n",
    "extracted = {}\n",
    "import types\n",
    "def new_func(self,*args, **kwargs):\n",
    "    extracted[\"past_key_values\"] = args[0][\"past_key_values\"]\n",
    "    return old_update(*args, **kwargs)\n",
    "\n",
    "model._update_model_kwargs_for_generation = types.MethodType(new_func, model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[32, 2, 1, 32, 5, 128]\n",
      "tensor([    1,   450,  7483,   310,  3444, 29892])\n"
     ]
    }
   ],
   "source": [
    "model_config={\n",
    "    \"use_cache\":True,\n",
    "    \"return_dict_in_generate\":True,\n",
    "    \"max_new_tokens\":1,\n",
    "}\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([    1,   450,  7483,   310,  3444, 29892,  3681])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "encoded = {\n",
    "    \"input_ids\": output.sequences,\n",
    "    \"attention_mask\": torch.concat((encoded[\"attention_mask\"], torch.ones((1,1), dtype=torch.int64)), dim=1),\n",
    "    \"past_key_values\": extracted[\"past_key_values\"],\n",
    "}\n",
    "# print(encoded)\n",
    "output = model.generate(**encoded, **model_config)\n",
    "print(output.sequences[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Inference time: 7.655\n",
      "tensor([    1,   450,  7483,   310,  3444, 29892,  3681, 29892,   338,   263,\n",
      "         4272,   310,  6017,   749, 29892,  1616, 29892,   322,  9257, 29889,\n",
      "          739,   338,   884,   263,  4272,   310, 13460, 29892,  9687, 29892,\n",
      "          322,  2090, 29889,  3681,   338,   263,  4272,   393,   756,  1554,\n",
      "          363, 14332, 29889, 26460,   366,   526,  3063,   363,   263,  6017,\n",
      "         7716,   679, 21694, 29892,   263])\n",
      "The capital of France, Paris, is a city of romance, art, and culture. It is also a city of fashion, food, and fun. Paris is a city that has something for everyone. Whether you are looking for a romantic getaway, a\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "encoded = tokenizer(\"The capital of France\", return_tensors=\"pt\", return_token_type_ids=False)\n",
    "st = time.perf_counter()\n",
    "for _ in range(50):\n",
    "    output = model.generate(**encoded, **model_config)\n",
    "    if output.sequences[0][-1] == tokenizer.eos_token_id:\n",
    "        break\n",
    "    encoded = {\n",
    "        \"input_ids\": output.sequences,\n",
    "        \"attention_mask\": torch.concat((encoded[\"attention_mask\"], torch.ones((1,1), dtype=torch.int64)), dim=1),\n",
    "        \"past_key_values\": extracted[\"past_key_values\"],\n",
    "    }\n",
    "    # print([len(extracted[\"past_key_values\"]), len(extracted[\"past_key_values\"][0])] + list(extracted[\"past_key_values\"][0][0].size()))\n",
    "print(f\"Inference time: {time.perf_counter()-st:.3f}\")\n",
    "print(output.sequences[0])\n",
    "print(tokenizer.decode(output.sequences[0],skip_special_tokens=True))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "serve",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
