{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "d5433cc3-b5cb-486a-855f-0f4e71564a2c",
   "metadata": {
    "tags": []
   },
   "source": [
    "# 推理需求计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "644ec8fd-1115-47ef-9bf0-300fa07d1e4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "from prettytable import PrettyTable"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e41e1b20-47ce-4d6f-8746-1c8dfea32381",
   "metadata": {},
   "source": [
    "### 1 定义模型和GPU卡的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2e475659-7bd1-4c93-b174-fe65bcfe8102",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Model 参数\n",
    "class Model1:\n",
    "    name = \"DeepSeek-Distill-Qwen-14B\"\n",
    "    total_params = 14\n",
    "    sequence_length = 2048\n",
    "    layers = 48\n",
    "    d_model = 5120\n",
    "\n",
    "# GPU 参数\n",
    "class GPU1:\n",
    "    name = \"L20\"\n",
    "    tflops = 239\n",
    "    gpu_mem = 48\n",
    "    gpu_mem_bw = 864\n",
    "    gpu_interconnect_bw = 0\n",
    "    compute_precision = 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "91dcf9ff-cec7-4cf0-936f-118d183ac24c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Model 参数\n",
    "class Model2:\n",
    "    name = \"DeepSeek-R1\"\n",
    "    total_params = 671\n",
    "    sequence_length = 8192\n",
    "    layers = 61\n",
    "    d_model = 7168\n",
    "\n",
    "# GPU 参数\n",
    "class GPU2:\n",
    "    name = \"H20\"\n",
    "    tflops = 296\n",
    "    gpu_mem = 96\n",
    "    gpu_mem_bw = 4000\n",
    "    gpu_interconnect_bw = 900\n",
    "    compute_precision = 2"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ac5cffe5-dff9-4415-8ad6-93296af38cf9",
   "metadata": {},
   "source": [
    "### 2 定义计算公式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a2aa9be2-2095-46ac-8acf-956b35fd71f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class InferenceComputeEstimator:\n",
    "    \"\"\"计算算力配置\"\"\"\n",
    "    def __init__(self,model,gpu, batch_size = 1, group_num = 16):\n",
    "        # 初始化，其中默认并发1路，多卡16卡为一组\n",
    "        self.model_name = model.name\n",
    "        self.batch_size = batch_size\n",
    "        self.group_num = group_num\n",
    "        self.total_params = model.total_params\n",
    "        self.sequence_length = model.sequence_length\n",
    "        self.layers = model.layers\n",
    "        self.d_model = model.d_model\n",
    "        self.gpu_name = gpu.name\n",
    "        self.gpu_tflops = gpu.tflops\n",
    "        self.gpu_mem = gpu.gpu_mem\n",
    "        self.gpu_mem_bandwidth = gpu.gpu_mem_bw\n",
    "        self.gpu_interconnect_bandwidth = gpu.gpu_interconnect_bw \n",
    "        self.compute_precision = gpu.compute_precision\n",
    "\n",
    "    def estimate_memory(self):\n",
    "        # 计算模型权重的显存需求\n",
    "        self.weight_gpu_mem = self.compute_precision * self.total_params\n",
    "        # 计算单独推理的KV显存需求\n",
    "        self.single_batch_kv_mem = round(\n",
    "            self.sequence_length * 2 * self.layers * self.d_model * self.compute_precision /10**9, 2\n",
    "        )\n",
    "        # 判断是否可以有单卡部署（至少一路）\n",
    "        self.single_card = self.gpu_mem > self.weight_gpu_mem + self.single_batch_kv_mem\n",
    "\n",
    "    def estimate_gpu_count(self):\n",
    "        if self.single_card:\n",
    "            # 单卡部署的情况，如果要求并发数过多则增加卡数\n",
    "            self.single_batch_num = math.floor(\n",
    "                (self.gpu_mem - self.weight_gpu_mem)/self.single_batch_kv_mem\n",
    "            )\n",
    "            self.gpu_count = math.ceil(self.batch_size/self.single_batch_num)\n",
    "\n",
    "        else:\n",
    "            # 多卡部署以默认的组数为准，按组扩容\n",
    "            self.single_group_batch_num = math.floor(\n",
    "                (self.group_num * self.gpu_mem - self.weight_gpu_mem)/self.single_batch_kv_mem\n",
    "            )\n",
    "            self.gpu_count = math.ceil(self.batch_size/self.single_group_batch_num) * self.group_num\n",
    "\n",
    "    def estimate_per_token_latency(self):\n",
    "        if self.single_card:\n",
    "            # 单卡情况下的tpot\n",
    "            self.tpot = round(\n",
    "                (self.weight_gpu_mem + min(self.batch_size, self.single_batch_num)* self.single_batch_kv_mem)/(self.gpu_mem_bandwidth * 0.6) * pow(10,3),2\n",
    "            )\n",
    "        else: \n",
    "            # 单卡的tpot\n",
    "            self.tpot_1 = round(\n",
    "                (self.weight_gpu_mem + min(self.batch_size, self.single_group_batch_num)*self.single_batch_kv_mem)/(self.group_num * self.gpu_mem_bandwidth * 0.4)* pow(10,3),2\n",
    "            )\n",
    "            # 卡间的tpot\n",
    "            self.tpot_2 = round(\n",
    "                2 * self.d_model * self.single_group_batch_num * self.layers * 2 / (self.gpu_interconnect_bandwidth * pow(10,6)),2\n",
    "            )\n",
    "            self.tpot = round((self.tpot_1 + self.tpot_2), 2)\n",
    "    def estimate_compute(self):\n",
    "        self.estimate_memory()\n",
    "        self.estimate_gpu_count()\n",
    "        self.estimate_per_token_latency()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bfb3d336-4bdd-4e32-9a2b-4717d93f1cb7",
   "metadata": {},
   "source": [
    "### 3 example"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3d2b53ad-7932-42c1-85ff-5d53d1808e4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "compute_estimator_1 = InferenceComputeEstimator(model=Model1, gpu=GPU1, batch_size=14)\n",
    "compute_estimator_2 = InferenceComputeEstimator(model=Model2, gpu=GPU2, batch_size=10)\n",
    "compute_estimator_1.estimate_compute()\n",
    "compute_estimator_2.estimate_compute()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "64bc47ac-d444-4eee-b3ea-98ecab08e355",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<table>\n",
       "    <thead>\n",
       "        <tr>\n",
       "            <th>模型名</th>\n",
       "            <th>GPU Model</th>\n",
       "            <th>数量</th>\n",
       "            <th>TPOT(ms)</th>\n",
       "            <th>并行数量</th>\n",
       "        </tr>\n",
       "    </thead>\n",
       "    <tbody>\n",
       "        <tr>\n",
       "            <td>DeepSeek-Distill-Qwen-14B</td>\n",
       "            <td>L20</td>\n",
       "            <td>2</td>\n",
       "            <td>88.91</td>\n",
       "            <td>14</td>\n",
       "        </tr>\n",
       "        <tr>\n",
       "            <td>DeepSeek-R1</td>\n",
       "            <td>H20</td>\n",
       "            <td>16</td>\n",
       "            <td>58.05</td>\n",
       "            <td>10</td>\n",
       "        </tr>\n",
       "    </tbody>\n",
       "</table>"
      ],
      "text/plain": [
       "+---------------------------+-----------+------+----------+----------+\n",
       "|           模型名          | GPU Model | 数量 | TPOT(ms) | 并行数量 |\n",
       "+---------------------------+-----------+------+----------+----------+\n",
       "| DeepSeek-Distill-Qwen-14B |    L20    |  2   |  88.91   |    14    |\n",
       "|        DeepSeek-R1        |    H20    |  16  |  58.05   |    10    |\n",
       "+---------------------------+-----------+------+----------+----------+"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "table = PrettyTable(['模型名','GPU Model','数量', 'TPOT(ms)', '并行数量'])\n",
    "table.add_row([compute_estimator_1.model_name, compute_estimator_1.gpu_name, compute_estimator_1.gpu_count, compute_estimator_1.tpot, compute_estimator_1.batch_size])\n",
    "table.add_row([compute_estimator_2.model_name, compute_estimator_2.gpu_name, compute_estimator_2.gpu_count, compute_estimator_2.tpot, compute_estimator_2.batch_size])\n",
    "table"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
