{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ad924135",
   "metadata": {},
   "source": [
    "我们将结合LoRA和 连续批处理(continuous batching) 创建一个端到端的服务系统，\n",
    "\n",
    "如何在单个部署中，高效地同时提供数十个这些精调模型，而不会牺牲延迟和吞吐量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "53999c1e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 这里的 hidden_size = 10，而不是1024\n",
    "# lora_indices： 查找张量\n",
    "class AbstractMultiLoraModel(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "        # hidden_size = 10\n",
    "        # set this so low to ensure we are not\n",
    "        # compute-bound by the linear layer\n",
    "        # this is only an issue when running on CPU,\n",
    "        # for GPUs we can set this much\n",
    "        # higher and still avoid being compute bound\n",
    "        self.embedding = torch.nn.Embedding(10, 10)\n",
    "        self.linear = torch.nn.Linear(10, 10)\n",
    "        self.lm_head = torch.nn.Linear(10, 10)\n",
    "        \n",
    "    def linear_lora(\n",
    "        self,\n",
    "        x: torch.Tensor,    # (batch_size, seq_len, in_features)\n",
    "        loras_a: torch.Tensor,    # (num_loras, in_features, rank)\n",
    "        loras_b: torch.Tensor,    # (num_loras, rank, out_features)\n",
    "        lora_indices: torch.LongTensor,  # (batch_size,)\n",
    "    ) -> torch.Tensor:\n",
    "        # y[i] = k[i] \\{loras_a[lora_idx] @ loras_b[lora_idx]\n",
    "        raise NotImplementedError()\n",
    "    \n",
    "    def forward(self, input_ids, loras_a, loras_b, lora_indices):\n",
    "        x = self.embedding(input_ids)\n",
    "        x = self.linear_lora(x, loras_a, loras_b, lora_indices)\n",
    "        x = self.lm_head(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ffed3842",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们使用一个非常简单的循环\n",
    "# 它继承了我们的抽象类： AbstractMultiLoraModel\n",
    "# 我们首先计算线性层的输出，这是Lora 计算的标准部分\n",
    "\n",
    "class LoopMultiLoraModel(Abst[actMultiLoraModel]):\n",
    "    def linear_lora(\n",
    "        self,\n",
    "        x: torch.Tensor,    # (batch_size, seq_len, in_features)\n",
    "        loras_a: torch.Tensor,    # (num_loras, in_features, loras_b)\n",
    "        loras_b: torch.Tensor,    # (num_loras, lora_rank, out_rows)\n",
    "        lora_indices: torch.LongTensor,  # (batch_size,)\n",
    "\n",
    "    ) -> torch.Tensor:\n",
    "        y = self.Linear(x)\n",
    "        # 我们讲遍历这些Lora 索引，并提取对应于该批量大小维度的批量索引。\n",
    "        for batch_idx, lora_idx in enumerate(lora_indices.numpy()):\n",
    "        lora_a = loras_a[lora_idx]\n",
    "        lora_b = loras_b[lora_idx]\n",
    "        y[batch_idx] += x[batch_idx] @ lora_a @ lora_b\n",
    "        return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d94dff7d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# detokenizer example\n",
    "detokenizer = [\n",
    "    \"red\",\n",
    "    \"orange\",\n",
    "    \"yellow\",\n",
    "    \"green\",\n",
    "    \"blue\",\n",
    "    \"indigo\",\n",
    "    \"violet\",\n",
    "    \"magenta\",\n",
    "    \"marigold\",\n",
    "    \"chartreuse\",\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "623a7fab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置一个虚拟的 input_ids\n",
    "input_ids = torch.LongTensor([[0, 1, 2 , 3, 4, 5, 6, 7]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8430e9e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置随机种子 样, 这将确保每次我们执行此过程时都是确定性的。\n",
    "torch.manual_seed(42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ed8c6ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用跟上一节，一样的生成函数\n",
    "def generate_token(model, **kwargs):\n",
    "    with torch.no_grad():\n",
    "        logtis = model(**kwargs)\n",
    "    last_logits = logits[:, -1, :]\n",
    "    next_token_ids = last_logits.argmax(dim=1)\n",
    "    return [detokenizer[token_id] for token_id in next_token_ids]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a2939769",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 现在我们实现我们的多 Lora 推理过程的外部循环\n",
    "\n",
    "# 设置一些常量\n",
    "\n",
    "bs = 1\n",
    "num_loras = 64 # 存储在内存中的loras 数量\n",
    "h = 10\n",
    "r = 2\n",
    "\n",
    "# create continuous blocks for 64 random LoRA weights\n",
    "loras_a = torch.randn(num_loras, h, r)\n",
    "loras_b = torch.randn(num_loras, r, h)\n",
    "\n",
    "for i in range(10):\n",
    "    # randomize the LoRAs each iteration\n",
    "    lora_indices = torch.randint(num_loras, (bs.), dtype=torch.long)\n",
    "    next_token = generate_token(\n",
    "        model,\n",
    "        input_ids=input_ids,\n",
    "        loras_a=loras_a,\n",
    "        loras_b=loras_b,\n",
    "        lora_indices=lora_indices,\n",
    "    )\n",
    "    print(next_token)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58f58ed8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们将运行500个样本，来确保我们得到一个稳健的基准值\n",
    "# constants\n",
    "seq_len = 8\n",
    "vocab_size = 10\n",
    "nsamples = 500\n",
    "max_batch_size = 64\n",
    "\n",
    "def benchmark(model):\n",
    "    avg_latencies = []\n",
    "    for bs in range(1, max_batch_size + 1):\n",
    "        latencies = []\n",
    "        for _ in range(nsamples):\n",
    "            # randomize the inputs and LoRA indices\n",
    "            input_ids = torch.randint(\n",
    "            vocab_size, (bs, seq_len), dtype=torch.long)\n",
    "            lora_indices = torch.randint(\n",
    "            num_loras, (bs.), dtype=torch.long)\n",
    "\n",
    "            # measure the end-to-end latency for\n",
    "            # generating a single token\n",
    "            tb = time.time()\n",
    "            next_token = generate_token(\n",
    "                model,\n",
    "                input_ids=input_ids,\n",
    "                loras_a=loras_a,\n",
    "                loras_b=loras_b,\n",
    "                lora_indices=lora_indices,\n",
    "            )\n",
    "            latencies.append(time.time() - t0)\n",
    "        # average the latency across all the samples\n",
    "        latency_s = sum(latencies) / len(latencies)\n",
    "        avg_latencies.append(latency_s)\n",
    "        print(bs, latency_s)\n",
    "    # 返回平均延迟\n",
    "    return avg_late"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5cc78060",
   "metadata": {},
   "source": [
    "执行，看输出\n",
    "\n",
    "我们可以看到，随着批大小的增加，平均延迟值也在增加"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21ec2347",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = list(range(1,max_batch_size + 1))\n",
    "plt.plot(x, avg_latencies_loop, label=\"loop\")\n",
    "\n",
    "plt.xlabel('Batch Size')\n",
    "plt.ylabel('Avg Latency (s)')\n",
    "plt.title('Multi-LoRA latency w.r.t. batch size')\n",
    "plt.legend()\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20bd2de9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class GatheredMultiLoraModel(AbstractMultiloraModel):\n",
    "    def linear_lora(\n",
    "    self,\n",
    "    x: torch.Tensor,    # (batch_size, seq_len, in_features)\n",
    "    loras_a: torch.Tensor,    # (num_loras, in_features, loras_b)\n",
    "    loras_b: torch.Tensor,    # (num_loras, lora_rank, out_length)\n",
    "    lora_indices: torch.LongTensor,    # (batch_size,)\n",
    "\n",
    ") -> torch.Tensor:\n",
    "    y = self.linear(x)\n",
    "\n",
    "# gather the LoRA weights into a new tensor and apply\n",
    "    lora_a = torch.index_select(loras_a, 0, lora_indices) # (batch_size, loras_b)\n",
    "    lora_b = torch.index_select(loras_b, 0, lora_indices) # (batch_size, loras_c)\n",
    "    y += x @ lora_a @ lora_b\n",
    "\n",
    "return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d578c968",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们用这个新类，重新初始化我们的模型\n",
    "model = GatheredMultiLoraModel()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e6d64023",
   "metadata": {},
   "outputs": [],
   "source": [
    "avg_latencies_gathered = benchmark(model)\n",
    "\n",
    "# 打印看输出结果， 延迟也在增加，但并没有以前那么严重"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "41ee36aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = list(range(1, max_batch_size + 1))\n",
    "plt.plot(x, avg_latencies_loop, label=\"loop\")\n",
    "plt.plot(x, avg_latencies_gathered, label=\"gathered\")\n",
    "\n",
    "plt.xlabel('Batch Size')\n",
    "plt.ylabel('Avg Latency (s)')\n",
    "plt.title('Multi-LoRA latency w.r.t. batch size')\n",
    "plt.legend()\n",
    "\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
