{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "176a3451",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step1 导入相关包\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import time\n",
    "import torch\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "de13b4ac",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step2 加载 model 和 tokenizer\n",
    "model_name ='gpt2'\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "model = AutoModelForCausalLM.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09bf1fd5",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompt = \"The quick brown fox jumped over the\"\n",
    "inputs = tokenizer(prompt, return_tensors=\"pt\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "0fabc560",
   "metadata": {},
   "source": [
    "我们将扩展这个函数及其后续部分，以支持不仅仅是单个输入，还支持多个输入\n",
    "需要引入填充标记，"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "007faf79",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate(inputs, max_tokens):\n",
    "    generated_tokens = []\n",
    "    next_inputs = inputs\n",
    "    for _ in range(max_tokens):\n",
    "        next_token_id, past_key_values = generate_token_with_past(next_inputs)\n",
    "        next_inputs = {\n",
    "            \"input_ids\": next_token_id.reshape((1, 1)), # 完全丢弃了先前的输入id: inputs[\"input_ids\"]\n",
    "            \"attention_mask\": torch.cat(\n",
    "                [next_inputs[\"attention_mask\"], torch.tensor([[1]])],\n",
    "                dim=1\n",
    "            ),\n",
    "            \"past_key_values\": past_key_values # 上一步中计算的过去的值\n",
    "        }\n",
    "        \n",
    "        next_token = tokenizer.decode(next_token_id) # 继续解码\n",
    "        generated_tokens.append(next_token) # 并加入到我们的列表中\n",
    "\n",
    "    return \"\".join(generated_tokens)\n",
    "\n",
    "tokens = generate(inputs, max_tokens=10)\n",
    "print(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c561789",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 来看看怎么让支持多个输入\n",
    "# 首先要对模型和分词器进行一些小的修改，以引入填充标记\n",
    "\n",
    "# Define PAD token as EOS token\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "model.config.pad_token_id = model.config.eos_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d32c012",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义填充标记放置的位置  left or  right\n",
    "tokenizer.padding_side = \"left\"  # 或 \"right\"\n",
    "tokenizer.truncation_side = \"left\"  # 或 \"right\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a54a303",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建有三个提示的列表，而不再是单个prompt\n",
    "prompts = [\n",
    "    \"The quick brown fox jumped over the\",\n",
    "    \"The rain in Spain stays mainly in the\",\n",
    "    \"What comes up must\"\n",
    "]\n",
    "\n",
    "inputs = tokenizer(prompts, padding=True, return_tensors=\"pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "953ffa52",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f\"input_ids: {inputs['input_ids']}\")\n",
    "print(\"shape\", inputs[\"attention_mask\"].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "16e22039",
   "metadata": {},
   "outputs": [],
   "source": [
    "# position_ids, 本质上只是告诉模型输入序列中每个标记的序数位置\n",
    "attention_mask = inputs[\"attention_mask\"]\n",
    "position_ids = attention_mask.long().cumsum(-1) - 1\n",
    "position_ids = position_ids.masked_fill_(attention_mask == 0, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e98bbc0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    outputs = model(position_ids=position_ids, **inputs)\n",
    "    \n",
    "logits = outputs.logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9036afeb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们要改变这个最后一次的逻辑计算\n",
    "# 而是要选择所有批次\n",
    "# 所以每个批次元素对应下一个标记ID\n",
    "last_logits = logits[:, -1, :]\n",
    "next_token_ids = last_logits.argmax(dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "59039e09",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(next_token_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ef4977f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们将这些标记ID 解码为字符串\n",
    "next_tokens = tokenizer.batch_decode(next_token_ids)\n",
    "next_tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f676f94",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_batch_tokens_with_past(inputs):\n",
    "    with torch.no_grad():\n",
    "        outputs = model(**inputs)\n",
    "    \n",
    "    logits = outputs.logits\n",
    "    last_logits = logits[:, -1, :]\n",
    "    next_token_ids = last_logits.argmax(dim=1)\n",
    "    return next_token_ids, outputs.past_key_values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "07b66e0b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_batch(inputs, max_tokens):\n",
    "    batch_size = inputs[\"input_ids\"].shape[0]\n",
    "    # create a list of tokens for every input in the batch\n",
    "    generated_tokens = [\n",
    "        [] for _ in range(batch_size)\n",
    "    ]\n",
    "    \n",
    "    attention_mask = inputs[\"attention_mask\"]\n",
    "    position_ids = attention_mask.long().cumsum(-1) - 1\n",
    "    position_ids = position_ids.masked_fill_(attention_mask == 0, 1)\n",
    "    \n",
    "    next_inputs = {\n",
    "        \"position_ids\": position_ids,\n",
    "        **inputs\n",
    "    }\n",
    "\n",
    "    # 我们将迭代我们想要生成的标记数\n",
    "    for _ in range(max_tokens):\n",
    "        next_token_ids, past_key_values = generate_batch_tokens_with_past(next_inputs)\n",
    "        \n",
    "        # update next_inputs for the next iteration\n",
    "        next_inputs = {\n",
    "            \"input_ids\": next_token_ids.reshape((-1, 1)),\n",
    "            \"position_ids\": next_inputs[\"position_ids\"][:, -1].unsqueeze(-1) + 1,\n",
    "            \"attention_mask\": torch.cat([\n",
    "                next_inputs[\"attention_mask\"],\n",
    "                torch.ones((next_token_ids.shape[0], 1))\n",
    "            ], dim=1),\n",
    "            \"past_key_values\": past_key_values\n",
    "        }\n",
    "\n",
    "        next_tokens = tokenizer.batch_decode(next_token_ids)\n",
    "        \n",
    "        for i, token in enumerate(next_tokens):\n",
    "            generated_tokens[i].append(token)\n",
    "    \n",
    "    # 最后我们将所有标记连接成一个字符串，而不仅仅是一个列表，并将其返回\n",
    "    return [\"\".join(tokens) for tokens in generated_tokens]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39017d41",
   "metadata": {},
   "outputs": [],
   "source": [
    "generated_tokens = generate_batch(inputs, max_tokens=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "340d211d",
   "metadata": {},
   "outputs": [],
   "source": [
    "for prompt, gen_tokens in zip(prompts, generated_tokens):\n",
    "    print(f\"Prompt: {prompt}\")\n",
    "    print(f\"Generated tokens: {gen_tokens}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b11ea60d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# constants\n",
    "max_tokens = 10\n",
    "\n",
    "# observations\n",
    "durations = []\n",
    "throughputs = []\n",
    "latencies = []\n",
    "\n",
    "batch_sizes = [2**p for p in range(8)]\n",
    "for batch_size in batch_sizes:\n",
    "    print(f\"bs={batch_size}\")\n",
    "    # generate tokens for batch and record duration\n",
    "    t0 = time.time()\n",
    "    batch_prompts = [\n",
    "    prompts[i % len(prompts)] for i in range(batch_size)\n",
    "    ]\n",
    "    inputs = tokenizer(\n",
    "    batch_prompts, padding=True, return_tensors=\"pt\"\n",
    "    )\n",
    "    generated_tokens = generate_batch(inputs, max_tokens=max_tokens)\n",
    "    duration_s = time.time() - t0\n",
    "\n",
    "    ntokens = batch_size * max_tokens\n",
    "    throughput = ntokens / duration_s\n",
    "    avg_latency = duration_s / max_tokens\n",
    "    print(\"duration\", duration_s)\n",
    "    print(\"throughput\", throughput)\n",
    "    print(\"avg latency\", avg_latency)\n",
    "    print()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be05af11",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个函数，来绘制这种关系\n",
    "\n",
    "def render_plot(x, y1, y2, x_label, y1_label, y2_label):\n",
    "    # Create a figure and a set of subplots\n",
    "    fig, axi = plt.subplots()\n",
    "\n",
    "    # Plot the first line (throughput)\n",
    "    color = 'tab:red'\n",
    "    axi.set_xlabel(x_label)\n",
    "    axi.set_ylabel(y1_label, color=color)\n",
    "    axi.plot(x, y1, color=color)\n",
    "    axi.tick_params(axis='y', labelcolor=color)\n",
    "\n",
    "    # Set the x-axis to be log-scaled\n",
    "    axi.set_xscale('log', base=2)\n",
    "\n",
    "    # Instantiate a second axes that shares the same x-axis\n",
    "    ax2 = axi.twinx()\n",
    "    color = 'tab:blue'\n",
    "    ax2.set_ylabel(y2_label, color=color)  # we already handled the x-axis\n",
    "    ax2.plot(x, y2, color=color)\n",
    "    ax2.tick_params(axis='y', labelcolor=color)\n",
    "\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fda96d8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "render_plot(\n",
    "    batch_sizes,\n",
    "    throughputs, \n",
    "    latencies, \"Batch Size\", \"Throughput\", \"Latency\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.13.5 (tags/v3.13.5:6cb20a2, Jun 11 2025, 16:15:46) [MSC v.1943 64 bit (AMD64)]"
  },
  "vscode": {
   "interpreter": {
    "hash": "340a546b2c4c9cf5d23eb4a2a4e78e923e0b7afe4d00162258c4442b3ee3b061"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
