{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "837aae9a",
   "metadata": {},
   "source": [
    "在上一课中，我们看到了 批处理是如何通过 `增加吞吐量` 来降低延迟的\n",
    "\n",
    "同步批处理情况： 多个不同时间到达的请求，然后我们希望将它们一起批处理 到一个单独的批次，以提高吞吐量。\n",
    "\n",
    "这就引出了，连续批处理的概念，在看到一个新请求时,当我们完成一个特定标记后,我们决定是否要继续将`该请求合并到我们现有的批处理中`,这样它们就可以继续一起生成标记,从而获得`吞吐量`的优势。\n",
    "\n",
    "可以有效地从批处理中删除其中一个序列，并将其替换为另一个正在等待轮到的请求。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "523dcb30",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step1 导入相关包\n",
    "import copy\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import time\n",
    "import random\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from tqdm import tqdm\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a4534ab0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# step2 加载 model 和 tokenizer\n",
    "model_name ='gpt2'\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "model = AutoModelForCausalLM.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "97ce00e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 连续批处理技术，也是在 批处理，所以我们需要引入填充标记\n",
    "# Define PAD token as EOS token\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "model.config.pad_token_id = model.config.eos_token_id\n",
    "\n",
    "# 定义填充标记放置的位置  left or  right\n",
    "tokenizer.padding_side = \"left\"  # 或 \"right\"\n",
    "tokenizer.truncation_side = \"left\"  # 或 \"right\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fc188a0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建有三个提示的列表，而不再是单个prompt\n",
    "prompts = [\n",
    "    \"The quick brown fox jumped over the\",\n",
    "    \"The rain in Spain stays mainly in the\",\n",
    "    \"What comes up must\"\n",
    "]\n",
    "\n",
    "inputs = tokenizer(prompts, padding=True, return_tensors=\"pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ed9db7d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 这里也需要使用2个辅助函数 generate_batch_tokens_with_past  & generate_batch\n",
    "# 注意观察与我们的新连续批处理的实现进行比较\n",
    "def generate_batch_tokens_with_past(inputs):\n",
    "    with torch.no_grad():\n",
    "        outputs = model(**inputs)\n",
    "    \n",
    "    logits = outputs.logits\n",
    "    last_logits = logits[:, -1, :]\n",
    "    next_token_ids = last_logits.argmax(dim=1)\n",
    "    return next_token_ids, outputs.past_key_values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b5317e31",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_batch(inputs, max_tokens):\n",
    "    batch_size = inputs[\"input_ids\"].shape[0]\n",
    "    # create a list of tokens for every input in the batch\n",
    "    generated_tokens = [\n",
    "        [] for _ in range(batch_size)\n",
    "    ]\n",
    "    \n",
    "    attention_mask = inputs[\"attention_mask\"]\n",
    "    position_ids = attention_mask.long().cumsum(-1) - 1\n",
    "    position_ids = position_ids.masked_fill_(attention_mask == 0, 1)\n",
    "    \n",
    "    next_inputs = {\n",
    "        \"position_ids\": position_ids,\n",
    "        **inputs\n",
    "    }\n",
    "\n",
    "    # 我们将迭代我们想要生成的标记数\n",
    "    for _ in range(max_tokens):\n",
    "        next_token_ids, past_key_values = generate_batch_tokens_with_past(next_inputs)\n",
    "        \n",
    "        # update next_inputs for the next iteration\n",
    "        next_inputs = {\n",
    "            \"input_ids\": next_token_ids.reshape((-1, 1)),\n",
    "            \"position_ids\": next_inputs[\"position_ids\"][:, -1].unsqueeze(-1) + 1,\n",
    "            \"attention_mask\": torch.cat([\n",
    "                next_inputs[\"attention_mask\"],\n",
    "                torch.ones((next_token_ids.shape[0], 1))\n",
    "            ], dim=1),\n",
    "            \"past_key_values\": past_key_values\n",
    "        }\n",
    "\n",
    "        next_tokens = tokenizer.batch_decode(next_token_ids)\n",
    "        \n",
    "        for i, token in enumerate(next_tokens):\n",
    "            generated_tokens[i].append(token)\n",
    "    \n",
    "    # 最后我们将所有标记连接成一个字符串，而不仅仅是一个列表，并将其返回\n",
    "    return [\"\".join(tokens) for tokens in generated_tokens]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "683d3823",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 首先设置随机种子\n",
    "# 首先, 我们将初始化我们的随机数生成器,使得这个过程变得确定性,消除任何随机性的影响。\n",
    "torch.manual_seed(42)\n",
    "\n",
    "# 接下来,让我们定义我们的请求队列,这是一个包含一系列元组的数据结构,其中元组包含两部分。\n",
    "\n",
    "# constants\n",
    "queue_size = 32\n",
    "batch_size = 8 # 批大小\n",
    "\n",
    "# requests waiting to be processed\n",
    "# requests are tuples (prompt, max_tokens)\n",
    "# 使用prompts 中的第一个\n",
    "request_queue = [\n",
    "    (prompts[0], 100 if i % batch_size == 0 else 10)\n",
    "    for i in range(queue_size)\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6e6bf60d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 请求队列的前八个或批大小的元素\n",
    "request_queue[:8]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13b60556",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 所以，从请求队列中切片创建一个新列表, 将其切分为批大小和长度的子列表, 或者更少的子列表\n",
    "batches = [\n",
    "    request_queue[i:i + batch_size]\n",
    "    for i in range(0, len(request_queue), batch_size)\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7c1635f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们的队列大小32除以批大小8等于4。\n",
    "len(batches) # 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "778e46cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 批大小： 每一批次：有8个元素，其中批次中的第一个序列需要生成 100个标记\n",
    "batches[0]\n",
    "\n",
    "# [('the quick...', 100), ('the quick...', 10), ......]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ced599cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# generally tokens for all batches and record duration\n",
    "# 将为所有批次生成标记并记录持续时间\n",
    "# tqdm: 为我们呈现进度条\n",
    "t0 = time_time()\n",
    "with tqdm(total=len(batches), desc=f\"bs={batch_size}\") as pbar:\n",
    "    for i, batch in enumerate(batches):\n",
    "        # to accommodate all the requests with our\n",
    "        # current implementation, we take the max of\n",
    "        # all the tokens to generate among the requests\n",
    "        # 首先我们将获取，批次中每个元素生成的标记数\n",
    "        batch_max_tokens = [b[1] for b in batch]\n",
    "        # 获取标记的最大数量\n",
    "        max_tokens = max(batch_max_tokens)\n",
    "        pbar.set_postfix({'max_tokens': max_tokens})\n",
    "\n",
    "        batch_prompts = [b[0] for b in batch]\n",
    "        inputs = tokenizer(batch_prompts, padding=True, return_tensors=\"pt\")\n",
    "        generate_batch(inputs, max_tokens=max_tokens) # 并将要生成的最大标记设置为批处理中的最大标记。\n",
    "        \n",
    "        pbar.update(1)\n",
    "\n",
    "# 记录这个过程所花费的总时长  \n",
    "duration_s = time.time() - t0\n",
    "print(\"duration\", duration_s)\n",
    "\n",
    "# 09:07， 请注意,由于我们在使用CPU运行一个非常大的模型并生成100个token,所以这个过程会花费一点时间,比以前生成的要多得多,但是您可以看到,在这种情况下,生成我们的第一批数据大约需要17秒,\n",
    "# 所以我们应该期望生成每个后续批次都需要大约相同的时间。\n",
    "# 注意使用CPU 运行一个非常大的模型，并生成100个Token, 会花费长点的时间"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dde210a0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 现在，我们来实现我们的连续批处理算法，看看它是否可以提高这个例子的性能。\n",
    "# init_batch 函数，见 utils.py 文件\n",
    "random.seed(42)\n",
    "\n",
    "# constants\n",
    "queue_size = 32\n",
    "batch_size = 8 # 批大小\n",
    "\n",
    "# requests waiting to be processed\n",
    "# requests are tuples (prompt, max_tokens)\n",
    "# 使用prompts 中的第一个\n",
    "request_queue = [\n",
    "    (prompts[0], 100 if i % batch_size == 0 else 10)\n",
    "    for i in range(queue_size)\n",
    "]\n",
    "\n",
    "t0 = time.time()\n",
    "with tqdm(total=len(batches), desc=f\"bs={batch_size}\") as pbar:\n",
    "    # 连续批处理的独特部分\n",
    "    # 要做的第一件事是：初始化缓存批次\n",
    "    # first, let's seed the initial cached_batch\n",
    "    # with the first `batch_size` inputs\n",
    "    # and run the initial prefill step\n",
    "    batch = init_batch(request_queue[:batch_size])\n",
    "    cached_batch = generate_next_token(batch)\n",
    "    request_queue = request_queue[batch_size:]]\n",
    "    \n",
    "    # continue until both the request queue is\n",
    "    # fully drained and every input\n",
    "    # within the cached_batch has completed generation\n",
    "    # 要么是队列中还有需要处理的更多请求,要么是我们缓存批次中的元素数量为非零,这意味着仍有等待完成处理的元素。\n",
    "    while (\n",
    "        len(request_queue) > 0 or\n",
    "        cached_batch[\"input_ids\"].size(0) > 0\n",
    "    ):\n",
    "        batch_capacity = (\n",
    "            batch_size - cached_batch[\"input_ids\"].size(0)\n",
    "        )\n",
    "        if batch_capacity > 0 and len(request_queue) > 0:\n",
    "            # prefill\n",
    "            new_batch = init_batch(request_queue[:batch_capacity])\n",
    "            new_batch = generate_next_token(new_batch)\n",
    "            request_queue = request_queue[batch_capacity:]\n",
    "\n",
    "            # merge 函数，一是填充标记使形状对齐，二是将它们连接起来\n",
    "            cached_batch = merge_batches(cached_batch, new_batch)\n",
    "        \n",
    "        # decode， 所以这只是一个正常的generate_next_token步骤,其结果是：为生成下一组标记而缓存的新批次。\n",
    "        cached_batch = generate_next_token(cached_batch)\n",
    "        \n",
    "        # 最后,在解码步骤之后, 我们快速查看批次的当前状态,看是否有任何准备被移除的批次元素。\n",
    "        # remove any inputs thak have finished generation\n",
    "        cached_batch, removed_indices = filter_batch(cached_batch)\n",
    "        pbar.update(len(removed_indices))\n",
    "\n",
    "# 记录这个过程所花费的总时长  \n",
    "duration_s = time.time() - t0\n",
    "print(\"duration\", duration_s)\n",
    "    \n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
