{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5662283d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step1 导入相关包\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import time\n",
    "import torch\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b5896d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step2 加载 model 和 tokenizer\n",
    "model_name ='gpt2'\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "model = AutoModelForCausalLM.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a41fd6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们打印下这个模型\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "324ba7e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step3 我们已经实例化了我们的模型，让我们继续探讨如何让它生成文本\n",
    "prompt = \"The quick brown fox jumped over the\"\n",
    "inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
    "inputs\n",
    "\n",
    "# {'input_ids': tensor([[ 464, 2068,7586,21831,11687,625, 262]]), 'attention_mask': tensor([[1,1,1,1,1,1,1,1,111]])}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e83b0696",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step4 我们拿到了 分词后的 inputs, 将它传递给模型\n",
    "# torch.no_grad() 告诉Pytorch 我们不想生成梯度，这只是推理过程, 不需要的额外内存\n",
    "with torch.no_grad():\n",
    "    outputs = model(**inputs)\n",
    "    \n",
    "logits = outputs.logits\n",
    "print(logits.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74139c64",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们只是取这里的最后一个标记，然后取词汇维度中的每一个元素，然后计算argmax\n",
    "last_logits = logits[0, -1, :]\n",
    "next_token_id = last_logits.argmax()\n",
    "next_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55fa52e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将这个标记  解码回一个普通字符串\n",
    "tokenizer.decode(next_token_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb35f412",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 除了查看 argmax 之外，我们还可以查看 前K个\n",
    "top_k = torch.topk(last_logits, k=10)\n",
    "tokens = [tokenizer.decode(tk) for tk in top_k.indices]\n",
    "\n",
    "tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6aae47e",
   "metadata": {},
   "outputs": [],
   "source": [
    "next_inputs = {\n",
    "    \"input_ids\": torch.cat(\n",
    "        [inputs[\"input_ids\"], next_token_id.reshape((1, 1))],\n",
    "        dim=1\n",
    "    ),\n",
    "    \"attention_mask\": torch.cat(\n",
    "        [inputs[\"attention_mask\"], torch.tensor([[1]])],\n",
    "        dim=1\n",
    "    )\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ecd2af69",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们来打印一下\n",
    "print(next_inputs[\"input_ids\"], \n",
    "      next_inputs[\"input_ids\"].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55e42d56",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测下一个词\n",
    "def generate_token(inputs):\n",
    "    with torch.no_grad():\n",
    "        outputs = model(**inputs)\n",
    "    \n",
    "    logtis = outputs.logits\n",
    "    last_logits = logits[0, -1, :]\n",
    "    next_token_id = last_logits.argmax()\n",
    "    return next_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "40b4dbe3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# generateToken Help 函数，我们尝试生成一些标记，看看需要多长时间\n",
    "generated_tokens = []\n",
    "next_inputs = input\n",
    "duration_s = []\n",
    "for _ in range(10):\n",
    "    t0 = time.time()\n",
    "    next_token_id = generate_token(inputs)\n",
    "    duration_s += [time.time() - t0]\n",
    "    \n",
    "    next_inputs = {\n",
    "        \"input_ids\": torch.cat(\n",
    "            [inputs[\"input_ids\"], next_token_id.reshape((1, 1))],\n",
    "            dim=1\n",
    "        ),\n",
    "        \"attention_mask\": torch.cat(\n",
    "            [inputs[\"attention_mask\"], torch.tensor([[1]])],\n",
    "            dim=1\n",
    "        )\n",
    "    }\n",
    "    \n",
    "    next_token = tokenizer.decode(next_token_id) # 继续解码\n",
    "    generated_tokens.append(next_token) # 并加入到我们的列表中\n",
    "\n",
    "\n",
    "print(f\"{sum(duration_s)} s\")\n",
    "print(generated_tokens)\n",
    "\n",
    "# 刚刚生成了10个标记，并测量了生成每个标记所需的时间"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c08698c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 接下来，随着我们生成更多的token, 所花费的时间如何改变\n",
    "\n",
    "# 快速绘制一下那个持续时间曲线是什么样子的\n",
    "plt.plot(duration_s)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9a8e9917",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 从前面应该能看出来，实际上在每一步之后增加了输入的大小， 因为我们在其中添加了越来越多的 token， 然后每次重新计算输出\n",
    "\n",
    "# 根据时间曲线图，可以分析出来，生成第一个token 会稍微慢一点， 底层代码中，进行缓存预热，\n",
    "\n",
    "# LLM 推理成本来源于哪里， 对于任何的transformer模型来说，最大的计算瓶颈无疑是注意力计算，而且有不同的变体。\n",
    "\n",
    "# q k v 这些矩阵的大小最终与输入序列的大小成正比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d3d1b81",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个新函数 generate_token_with_past 返回了输出的另外一个属性，称为过去的键值\n",
    "\n",
    "def generate_token_with_past(inputs):\n",
    "    with torch.no_grad():\n",
    "        outputs = model(**inputs)\n",
    "    \n",
    "    logits = outputs.logits\n",
    "    last_logits = logits[0, -1, :]\n",
    "    next_token_id = last_logits.argmax()\n",
    "    return next_token_id, outputs.past_key_values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d6b6e6b8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 但这次我们将实际使用过去的键值\n",
    "generated_tokens = []\n",
    "next_inputs = input\n",
    "# duration_s = []\n",
    "duration_cached_s = []\n",
    "for _ in range(10):\n",
    "    t0 = time.time()\n",
    "    next_token_id, past_key_values = generate_token_with_past(inputs)\n",
    "    # duration_s += [time.time() - t0]\n",
    "    duration_cached_s += [time.time() - t0]\n",
    "    \n",
    "    next_inputs = {\n",
    "        \"input_ids\": next_token_id.reshape((1, 1)), # 完全丢弃了先前的输入id: inputs[\"input_ids\"]\n",
    "        \"attention_mask\": torch.cat(\n",
    "            [inputs[\"attention_mask\"], torch.tensor([[1]])],\n",
    "            dim=1\n",
    "        ),\n",
    "        \"past_key_values\": past_key_values # 上一步中计算的过去的值\n",
    "    }\n",
    "    \n",
    "    next_token = tokenizer.decode(next_token_id) # 继续解码\n",
    "    generated_tokens.append(next_token) # 并加入到我们的列表中\n",
    "\n",
    "\n",
    "print(f\"{sum(duration_cached_s)} s\")\n",
    "print(generated_tokens)\n",
    "\n",
    "# 这种输出，全程持续时间为: 0.87秒， 上面的那种方案是： 1.7x秒"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10750037",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 继续绘制时间曲线， 与没有使用缓存时，持续时间曲线\n",
    "plt.plot(duration_s)\n",
    "plt.plot(duration_cached_s)\n",
    "plt.show()\n",
    "\n",
    "# 会有2条线， 蓝线是在没有缓存的情况下生成标记需要的时间"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5 (tags/v3.13.5:6cb20a2, Jun 11 2025, 16:15:46) [MSC v.1943 64 bit (AMD64)]"
  },
  "vscode": {
   "interpreter": {
    "hash": "340a546b2c4c9cf5d23eb4a2a4e78e923e0b7afe4d00162258c4442b3ee3b061"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
