{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c0804ec3",
   "metadata": {},
   "source": [
    "# 测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e13886f2",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import tvm\n",
    "from tvm.script import ir as I\n",
    "from tvm.relax import BasePyModule\n",
    "\n",
    "# --------- 校准与量化参数计算 ---------\n",
    "\n",
    "def calc_symmetric_qparams(min_v: np.ndarray, max_v: np.ndarray, qmax=127):\n",
    "    # min_v/max_v: 可以是标量或向量（per-channel）\n",
    "    absmax = np.maximum(np.abs(min_v), np.abs(max_v))\n",
    "    # 防止除零\n",
    "    absmax = np.where(absmax == 0, 1e-8, absmax)\n",
    "    scale = absmax / qmax\n",
    "    zp = np.zeros_like(scale, dtype=np.int32)\n",
    "    return scale.astype(np.float32), zp\n",
    "\n",
    "def calibrate_activation_per_tensor(calib_loader):\n",
    "    mins, maxs = [], []\n",
    "    with torch.no_grad():\n",
    "        for x in calib_loader:\n",
    "            mins.append(x.min().item())\n",
    "            maxs.append(x.max().item())\n",
    "    a_min, a_max = float(np.min(mins)), float(np.max(maxs))\n",
    "    s, z = calc_symmetric_qparams(np.array([a_min]), np.array([a_max]))\n",
    "    return torch.tensor(s[0], dtype=torch.float32), torch.tensor(z[0], dtype=torch.int32)\n",
    "\n",
    "def calibrate_weight_per_channel(w: torch.Tensor, axis: int = 0):\n",
    "    # w: [oc, ic, kh, kw]，按 out_channels 做 per-channel\n",
    "    w_np = w.detach().cpu().numpy()\n",
    "    oc = w_np.shape[axis]\n",
    "    w_flat = w_np.reshape(oc, -1)\n",
    "    w_min = w_flat.min(axis=1)\n",
    "    w_max = w_flat.max(axis=1)\n",
    "    s, z = calc_symmetric_qparams(w_min, w_max)\n",
    "    return torch.tensor(s, dtype=torch.float32), torch.tensor(z, dtype=torch.int32)\n",
    "\n",
    "# --------- BasePyModule：量化-反量化与推理 ---------\n",
    "\n",
    "@I.ir_module\n",
    "class PerChannelPTQ(BasePyModule):\n",
    "    \"\"\"Per-tensor 激活 + per-channel 权重量化 的 Python 端仿真模块。\"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def quant_dequant_activation(self, x: torch.Tensor, act_scale: torch.Tensor) -> torch.Tensor:\n",
    "        # 对称量化，zp=0\n",
    "        q = torch.clamp(torch.round(x / act_scale), -128, 127).to(torch.int8)\n",
    "        return q.to(torch.float32) * act_scale\n",
    "\n",
    "    @I.pyfunc\n",
    "    def quant_dequant_weight_pc(self, w: torch.Tensor, w_scales: torch.Tensor) -> torch.Tensor:\n",
    "        # w_scales: [oc]，对齐到 [oc,1,1,1]\n",
    "        s = w_scales.view(-1, 1, 1, 1)\n",
    "        q = torch.clamp(torch.round(w / s), -128, 127).to(torch.int8)\n",
    "        return q.to(torch.float32) * s\n",
    "\n",
    "    @I.pyfunc\n",
    "    def conv2d_infer(\n",
    "        self,\n",
    "        x: torch.Tensor,\n",
    "        w: torch.Tensor,\n",
    "        bias: torch.Tensor,\n",
    "        stride_h: int,\n",
    "        stride_w: int,\n",
    "        pad_h: int,\n",
    "        pad_w: int,\n",
    "        dil_h: int,\n",
    "        dil_w: int,\n",
    "        groups: int,\n",
    "    ) -> torch.Tensor:\n",
    "        return F.conv2d(x, w, bias, stride=(stride_h, stride_w), padding=(pad_h, pad_w),\n",
    "                        dilation=(dil_h, dil_w), groups=groups)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def main(\n",
    "        self,\n",
    "        x: torch.Tensor,\n",
    "        w: torch.Tensor,\n",
    "        b: torch.Tensor,\n",
    "        act_scale: torch.Tensor,    # 标量\n",
    "        w_scales: torch.Tensor,     # [out_channels]\n",
    "    ) -> torch.Tensor:\n",
    "        # 1) 激活 per-tensor 假量化\n",
    "        x_qdq = self.quant_dequant_activation(x, act_scale)\n",
    "\n",
    "        # 2) 权重 per-channel 假量化（沿 oc 维度）\n",
    "        w_qdq = self.quant_dequant_weight_pc(w, w_scales)\n",
    "\n",
    "        # 3) 用量化-反量化后的张量进行推理\n",
    "        y = self.conv2d_infer(\n",
    "            x_qdq, w_qdq, b,\n",
    "            stride_h=1, stride_w=1,\n",
    "            pad_h=0, pad_w=0,\n",
    "            dil_h=1, dil_w=1,\n",
    "            groups=1,\n",
    "        )\n",
    "        return y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a61c454b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n",
      "torch.Size([1, 16, 222, 222])\n"
     ]
    }
   ],
   "source": [
    "# 假设我们有一层 conv 的权重与偏置\n",
    "conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=True).eval()\n",
    "w = conv.weight\n",
    "b = conv.bias\n",
    "\n",
    "# 准备校准数据并计算量化参数\n",
    "def calib_loader(n=8, bs=4):\n",
    "    for _ in range(n // bs):\n",
    "        yield torch.randn(bs, 3, 224, 224)\n",
    "\n",
    "act_scale, _ = calibrate_activation_per_tensor(calib_loader())\n",
    "w_scales, _ = calibrate_weight_per_channel(w, axis=0)\n",
    "\n",
    "# 构建模块实例（CPU / CUDA 都行）\n",
    "module = PerChannelPTQ\n",
    "instance = module(tvm.cpu(0))\n",
    "\n",
    "# 运行一次前向\n",
    "x = torch.randn(1, 3, 224, 224)\n",
    "y = instance.main(x, w, b, act_scale, w_scales)\n",
    "print(y.shape)  # -> torch.Size([1, 16, 224, 224])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f4bde9e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "from tvm.script import ir as I\n",
    "from tvm.relax import BasePyModule\n",
    "\n",
    "# ===== 纯 numpy 量化工具函数 =====\n",
    "def calc_symmetric_qparams(min_v: np.ndarray, max_v: np.ndarray, qmax=127):\n",
    "    absmax = np.maximum(np.abs(min_v), np.abs(max_v))\n",
    "    absmax = np.where(absmax == 0, 1e-8, absmax)\n",
    "    scale = absmax / qmax\n",
    "    zp = np.zeros_like(scale, dtype=np.int32)\n",
    "    return scale.astype(np.float32), zp\n",
    "\n",
    "def quant_dequant_activation_np(x: np.ndarray, act_scale: float):\n",
    "    q = np.clip(np.round(x / act_scale), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * act_scale\n",
    "\n",
    "def quant_dequant_weight_pc_np(w: np.ndarray, w_scales: np.ndarray):\n",
    "    s = w_scales.reshape(-1, 1, 1, 1)\n",
    "    q = np.clip(np.round(w / s), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * s\n",
    "\n",
    "def conv2d_nchw_np(x, w, b, stride=1, padding=0):\n",
    "    N, C_in, H, W = x.shape\n",
    "    C_out, _, KH, KW = w.shape\n",
    "    H_out = (H + 2*padding - KH) // stride + 1\n",
    "    W_out = (W + 2*padding - KW) // stride + 1\n",
    "    if padding > 0:\n",
    "        x_padded = np.zeros((N, C_in, H + 2*padding, W + 2*padding), dtype=x.dtype)\n",
    "        x_padded[:, :, padding:padding+H, padding:padding+W] = x\n",
    "    else:\n",
    "        x_padded = x\n",
    "    out = np.zeros((N, C_out, H_out, W_out), dtype=np.float32)\n",
    "    for n in range(N):\n",
    "        for oc in range(C_out):\n",
    "            for i in range(H_out):\n",
    "                for j in range(W_out):\n",
    "                    region = x_padded[n, :, i*stride:i*stride+KH, j*stride:j*stride+KW]\n",
    "                    out[n, oc, i, j] = np.sum(region * w[oc]) + b[oc]\n",
    "    return out\n",
    "\n",
    "# ===== 嵌入到 BasePyModule =====\n",
    "@I.ir_module\n",
    "class NumpyPTQModule(BasePyModule):\n",
    "    \"\"\"NumPy 版 PTQ 仿真模块：激活 per-tensor + 权重 per-channel\"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def quant_dequant_activation(self, x: np.ndarray, act_scale: float) -> np.ndarray:\n",
    "        return quant_dequant_activation_np(x, act_scale)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def quant_dequant_weight_pc(self, w: np.ndarray, w_scales: np.ndarray) -> np.ndarray:\n",
    "        return quant_dequant_weight_pc_np(w, w_scales)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def conv2d_infer(\n",
    "        self,\n",
    "        x: np.ndarray,\n",
    "        w: np.ndarray,\n",
    "        b: np.ndarray,\n",
    "        stride_h: int,\n",
    "        stride_w: int,\n",
    "        pad_h: int,\n",
    "        pad_w: int,\n",
    "    ) -> np.ndarray:\n",
    "        # 简化：stride_h == stride_w, pad_h == pad_w\n",
    "        return conv2d_nchw_np(x, w, b, stride=stride_h, padding=pad_h)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def main(\n",
    "        self,\n",
    "        x: np.ndarray,\n",
    "        w: np.ndarray,\n",
    "        b: np.ndarray,\n",
    "        act_scale: float,\n",
    "        w_scales: np.ndarray,\n",
    "    ) -> np.ndarray:\n",
    "        # 1) 激活 Q/DQ\n",
    "        x_qdq = self.quant_dequant_activation(x, act_scale)\n",
    "        # 2) 权重 Q/DQ\n",
    "        w_qdq = self.quant_dequant_weight_pc(w, w_scales)\n",
    "        # 3) 推理\n",
    "        y = self.conv2d_infer(x_qdq, w_qdq, b, 1, 1, 1, 1)\n",
    "        return y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "fee17cff",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "from tvm.script import ir as I\n",
    "from tvm.relax import BasePyModule\n",
    "\n",
    "# ======== 工具：BN 融合到卷积（若你提供未融合参数，可先调用该函数） ========\n",
    "def fold_bn_into_conv(w_conv, b_conv, gamma, beta, mean, var, eps=1e-5):\n",
    "    # w_conv: (C_out, C_in, KH, KW), b_conv: (C_out,)\n",
    "    # BN: y = gamma * (x - mean) / sqrt(var+eps) + beta\n",
    "    C_out = w_conv.shape[0]\n",
    "    if b_conv is None:\n",
    "        b_conv = np.zeros(C_out, dtype=np.float32)\n",
    "    denom = gamma / np.sqrt(var + eps)  # (C_out,)\n",
    "    w_fused = w_conv * denom.reshape(-1, 1, 1, 1)\n",
    "    b_fused = (b_conv - mean) * denom + beta\n",
    "    return w_fused.astype(np.float32), b_fused.astype(np.float32)\n",
    "\n",
    "# ======== 量化/反量化（NumPy） ========\n",
    "def qdq_activation(x: np.ndarray, act_scale: float):\n",
    "    q = np.clip(np.round(x / act_scale), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * act_scale\n",
    "\n",
    "def qdq_weight_pc(w: np.ndarray, w_scales: np.ndarray):\n",
    "    s = w_scales.reshape(-1, 1, 1, 1)\n",
    "    q = np.clip(np.round(w / s), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * s\n",
    "\n",
    "def qdq_weight_pc_linear(w: np.ndarray, w_scales: np.ndarray):\n",
    "    # w: (out_features, in_features); per-output-channel scales\n",
    "    s = w_scales.reshape(-1, 1)\n",
    "    q = np.clip(np.round(w / s), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * s\n",
    "\n",
    "# ======== 基础算子（简明直观、便于对齐验证） ========\n",
    "def conv2d_nchw(x, w, b, stride=1, padding=0):\n",
    "    N, C_in, H, W = x.shape\n",
    "    C_out, _, KH, KW = w.shape\n",
    "    H_out = (H + 2*padding - KH) // stride + 1\n",
    "    W_out = (W + 2*padding - KW) // stride + 1\n",
    "    if padding > 0:\n",
    "        x_pad = np.zeros((N, C_in, H + 2*padding, W + 2*padding), dtype=x.dtype)\n",
    "        x_pad[:, :, padding:padding+H, padding:padding+W] = x\n",
    "    else:\n",
    "        x_pad = x\n",
    "    out = np.zeros((N, C_out, H_out, W_out), dtype=np.float32)\n",
    "    for n in range(N):\n",
    "        for oc in range(C_out):\n",
    "            for i in range(H_out):\n",
    "                for j in range(W_out):\n",
    "                    region = x_pad[n, :, i*stride:i*stride+KH, j*stride:j*stride+KW]\n",
    "                    out[n, oc, i, j] = np.sum(region * w[oc]) + (0.0 if b is None else b[oc])\n",
    "    return out\n",
    "\n",
    "def relu(x): return np.maximum(x, 0.0)\n",
    "\n",
    "def maxpool2d_nchw(x, kernel=3, stride=2, padding=1):\n",
    "    N, C, H, W = x.shape\n",
    "    H_out = (H + 2*padding - kernel) // stride + 1\n",
    "    W_out = (W + 2*padding - kernel) // stride + 1\n",
    "    if padding > 0:\n",
    "        x_pad = np.full((N, C, H + 2*padding, W + 2*padding), -np.inf, dtype=x.dtype)\n",
    "        x_pad[:, :, padding:padding+H, padding:padding+W] = x\n",
    "    else:\n",
    "        x_pad = x\n",
    "    out = np.zeros((N, C, H_out, W_out), dtype=np.float32)\n",
    "    for n in range(N):\n",
    "        for c in range(C):\n",
    "            for i in range(H_out):\n",
    "                for j in range(W_out):\n",
    "                    region = x_pad[n, c, i*stride:i*stride+kernel, j*stride:j*stride+kernel]\n",
    "                    out[n, c, i, j] = np.max(region)\n",
    "    return out\n",
    "\n",
    "def global_avg_pool_nchw(x):\n",
    "    # N,C,H,W -> N,C\n",
    "    return x.mean(axis=(2, 3))\n",
    "\n",
    "def linear(x, w, b):\n",
    "    # x: (N, in_features), w: (out_features, in_features), b: (out_features,)\n",
    "    return x @ w.T + (0.0 if b is None else b)\n",
    "\n",
    "# ======== 架构与命名（与 PyTorch ResNet18 一致） ========\n",
    "RESNET18_TOPO = [\n",
    "    # (name, stride, padding)\n",
    "    (\"conv1\", 2, 3),\n",
    "    # layer1 (2 blocks, no downsample)\n",
    "    (\"layer1.0.conv1\", 1, 1),\n",
    "    (\"layer1.0.conv2\", 1, 1),\n",
    "    (\"layer1.1.conv1\", 1, 1),\n",
    "    (\"layer1.1.conv2\", 1, 1),\n",
    "    # layer2 (first block has downsample and stride=2 on conv1)\n",
    "    (\"layer2.0.conv1\", 2, 1),\n",
    "    (\"layer2.0.conv2\", 1, 1),\n",
    "    (\"layer2.0.downsample.0\", 2, 0),\n",
    "    (\"layer2.1.conv1\", 1, 1),\n",
    "    (\"layer2.1.conv2\", 1, 1),\n",
    "    # layer3\n",
    "    (\"layer3.0.conv1\", 2, 1),\n",
    "    (\"layer3.0.conv2\", 1, 1),\n",
    "    (\"layer3.0.downsample.0\", 2, 0),\n",
    "    (\"layer3.1.conv1\", 1, 1),\n",
    "    (\"layer3.1.conv2\", 1, 1),\n",
    "    # layer4\n",
    "    (\"layer4.0.conv1\", 2, 1),\n",
    "    (\"layer4.0.conv2\", 1, 1),\n",
    "    (\"layer4.0.downsample.0\", 2, 0),\n",
    "    (\"layer4.1.conv1\", 1, 1),\n",
    "    (\"layer4.1.conv2\", 1, 1),\n",
    "]\n",
    "DOWNSAMPLE_KEYS = {\"layer2.0.downsample.0\", \"layer3.0.downsample.0\", \"layer4.0.downsample.0\"}\n",
    "\n",
    "# 将每个 block 的边界用于残差加法与 ReLU\n",
    "BLOCK_ENDS = {\n",
    "    # (conv names in block, optional downsample name)\n",
    "    \"layer1.0\": ([\"layer1.0.conv1\", \"layer1.0.conv2\"], None),\n",
    "    \"layer1.1\": ([\"layer1.1.conv1\", \"layer1.1.conv2\"], None),\n",
    "    \"layer2.0\": ([\"layer2.0.conv1\", \"layer2.0.conv2\"], \"layer2.0.downsample.0\"),\n",
    "    \"layer2.1\": ([\"layer2.1.conv1\", \"layer2.1.conv2\"], None),\n",
    "    \"layer3.0\": ([\"layer3.0.conv1\", \"layer3.0.conv2\"], \"layer3.0.downsample.0\"),\n",
    "    \"layer3.1\": ([\"layer3.1.conv1\", \"layer3.1.conv2\"], None),\n",
    "    \"layer4.0\": ([\"layer4.0.conv1\", \"layer4.0.conv2\"], \"layer4.0.downsample.0\"),\n",
    "    \"layer4.1\": ([\"layer4.1.conv1\", \"layer4.1.conv2\"], None),\n",
    "}\n",
    "\n",
    "# ======== BasePyModule：自动遍历 ResNet18，逐层 Q/DQ + 推理 ========\n",
    "@I.ir_module\n",
    "class NumpyPTQResNet18(BasePyModule):\n",
    "    \"\"\"\n",
    "    NumPy 版 ResNet18 PTQ 仿真模块\n",
    "    - set_params: 装入已 BN 融合的所有卷积与 FC 参数\n",
    "    - set_quant:  装入每个卷积/FC 的量化参数（激活 per-tensor，权重 per-channel）\n",
    "    - main:       端到端按拓扑自动遍历、逐层 Q/DQ + 推理\n",
    "    \"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def set_params(self, params: object) -> None:\n",
    "        \"\"\"\n",
    "        params: dict[str, np.ndarray]\n",
    "          - 卷积:  \"<name>.weight\" -> (C_out,C_in,KH,KW), \"<name>.bias\" -> (C_out,)\n",
    "          - FC:   \"fc.weight\" -> (num_classes, 512), \"fc.bias\" -> (num_classes,)\n",
    "          约定 name 来自 RESNET18_TOPO 中的 key（外加 \"fc\"）\n",
    "        \"\"\"\n",
    "        self.params = params\n",
    "\n",
    "    @I.pyfunc\n",
    "    def set_quant(self, w_scales: object, act_scales: object, fc_w_scales: np.ndarray, fc_act_scale: float) -> None:\n",
    "        \"\"\"\n",
    "        w_scales: dict[str, np.ndarray]     # per-channel, shape (C_out,)\n",
    "        act_scales: dict[str, float]        # per-tensor, each conv input\n",
    "        fc_w_scales: (num_classes,)\n",
    "        fc_act_scale: float\n",
    "        \"\"\"\n",
    "        self.w_scales = w_scales\n",
    "        self.act_scales = act_scales\n",
    "        self.fc_w_scales = fc_w_scales.astype(np.float32)\n",
    "        self.fc_act_scale = float(fc_act_scale)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _run_conv_qdq(self, name: str, x: np.ndarray, stride: int, padding: int) -> np.ndarray:\n",
    "        # 激活 Q/DQ（输入到该卷积的激活）\n",
    "        x_q = qdq_activation(x, float(self.act_scales[name]))\n",
    "        # 权重 Q/DQ（该卷积 per-channel）\n",
    "        w = self.params[f\"{name}.weight\"]; b = self.params.get(f\"{name}.bias\", None)\n",
    "        w_q = qdq_weight_pc(w, self.w_scales[name])\n",
    "        # 卷积\n",
    "        y = conv2d_nchw(x_q, w_q, b, stride=stride, padding=padding)\n",
    "        return y\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _run_block(self, x_in: np.ndarray, block_key: str) -> np.ndarray:\n",
    "        # block_key: \"layer{1..4}.{0|1}\"\n",
    "        convs, down = BLOCK_ENDS[block_key]\n",
    "        # 保存残差输入\n",
    "        identity = x_in\n",
    "\n",
    "        # conv1\n",
    "        y = self._run_conv_qdq(convs[0], x_in, *self._sp(convs[0]))\n",
    "        y = relu(y)\n",
    "        # conv2\n",
    "        y = self._run_conv_qdq(convs[1], y, *self._sp(convs[1]))\n",
    "\n",
    "        # 下采样分支（若有）\n",
    "        if down is not None:\n",
    "            identity = self._run_conv_qdq(down, identity, *self._sp(down))\n",
    "\n",
    "        # 残差相加 + ReLU\n",
    "        out = relu(y + identity)\n",
    "        return out\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _sp(self, name: str) -> tuple:\n",
    "        # 返回 (stride, padding)\n",
    "        for k, s, p in RESNET18_TOPO:\n",
    "            if k == name:\n",
    "                return (s, p)\n",
    "        # 不应发生\n",
    "        return (1, 0)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def main(self, x: np.ndarray) -> np.ndarray:\n",
    "        # stem\n",
    "        x = self._run_conv_qdq(\"conv1\", x, *self._sp(\"conv1\"))\n",
    "        x = relu(x)\n",
    "        x = maxpool2d_nchw(x, kernel=3, stride=2, padding=1)\n",
    "\n",
    "        # layer1\n",
    "        x = self._run_block(x, \"layer1.0\")\n",
    "        x = self._run_block(x, \"layer1.1\")\n",
    "        # layer2\n",
    "        x = self._run_block(x, \"layer2.0\")\n",
    "        x = self._run_block(x, \"layer2.1\")\n",
    "        # layer3\n",
    "        x = self._run_block(x, \"layer3.0\")\n",
    "        x = self._run_block(x, \"layer3.1\")\n",
    "        # layer4\n",
    "        x = self._run_block(x, \"layer4.0\")\n",
    "        x = self._run_block(x, \"layer4.1\")\n",
    "\n",
    "        # GAP + FC（FC 也做 Q/DQ）\n",
    "        x = global_avg_pool_nchw(x)              # (N, 512)\n",
    "        x_q = qdq_activation(x, float(self.fc_act_scale))\n",
    "        w = self.params[\"fc.weight\"]; b = self.params.get(\"fc.bias\", None)\n",
    "        w_q = qdq_weight_pc_linear(w, self.fc_w_scales)  # per-out\n",
    "        y = linear(x_q, w_q, b)                  # (N, num_classes)\n",
    "        return y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc953265",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "from tvm.script import ir as I\n",
    "from tvm.relax import BasePyModule\n",
    "\n",
    "# ========= 可选：BN 融合到卷积 =========\n",
    "def fold_bn_into_conv(w_conv, b_conv, gamma, beta, mean, var, eps=1e-5):\n",
    "    # w_conv: (C_out, C_in, KH, KW), b_conv: (C_out,)\n",
    "    C_out = w_conv.shape[0]\n",
    "    if b_conv is None:\n",
    "        b_conv = np.zeros(C_out, dtype=np.float32)\n",
    "    denom = gamma / np.sqrt(var + eps)  # (C_out,)\n",
    "    w_fused = w_conv * denom.reshape(-1, 1, 1, 1)\n",
    "    b_fused = (b_conv - mean) * denom + beta\n",
    "    return w_fused.astype(np.float32), b_fused.astype(np.float32)\n",
    "\n",
    "# ========= 量化/反量化（NumPy） =========\n",
    "def qdq_activation(x: np.ndarray, act_scale: float):\n",
    "    q = np.clip(np.round(x / act_scale), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * act_scale\n",
    "\n",
    "def qdq_weight_pc(w: np.ndarray, w_scales: np.ndarray):\n",
    "    s = w_scales.reshape(-1, 1, 1, 1)  # per-out-channel\n",
    "    q = np.clip(np.round(w / s), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * s\n",
    "\n",
    "def qdq_weight_pc_linear(w: np.ndarray, w_scales: np.ndarray):\n",
    "    s = w_scales.reshape(-1, 1)  # per-out-channel\n",
    "    q = np.clip(np.round(w / s), -128, 127).astype(np.int8)\n",
    "    return q.astype(np.float32) * s\n",
    "\n",
    "# ========= 基础算子（NumPy） =========\n",
    "def conv2d_nchw(x, w, b, stride=1, padding=0):\n",
    "    N, C_in, H, W = x.shape\n",
    "    C_out, _, KH, KW = w.shape\n",
    "    H_out = (H + 2*padding - KH) // stride + 1\n",
    "    W_out = (W + 2*padding - KW) // stride + 1\n",
    "    if padding > 0:\n",
    "        x_pad = np.zeros((N, C_in, H + 2*padding, W + 2*padding), dtype=x.dtype)\n",
    "        x_pad[:, :, padding:padding+H, padding:padding+W] = x\n",
    "    else:\n",
    "        x_pad = x\n",
    "    out = np.zeros((N, C_out, H_out, W_out), dtype=np.float32)\n",
    "    for n in range(N):\n",
    "        for oc in range(C_out):\n",
    "            for i in range(H_out):\n",
    "                for j in range(W_out):\n",
    "                    region = x_pad[n, :, i*stride:i*stride+KH, j*stride:j*stride+KW]\n",
    "                    out[n, oc, i, j] = np.sum(region * w[oc]) + (0.0 if b is None else b[oc])\n",
    "    return out\n",
    "\n",
    "def relu(x): return np.maximum(x, 0.0)\n",
    "\n",
    "def maxpool2d_nchw(x, kernel=3, stride=2, padding=1):\n",
    "    N, C, H, W = x.shape\n",
    "    H_out = (H + 2*padding - kernel) // stride + 1\n",
    "    W_out = (W + 2*padding - kernel) // stride + 1\n",
    "    if padding > 0:\n",
    "        x_pad = np.full((N, C, H + 2*padding, W + 2*padding), -np.inf, dtype=x.dtype)\n",
    "        x_pad[:, :, padding:padding+H, padding:padding+W] = x\n",
    "    else:\n",
    "        x_pad = x\n",
    "    out = np.zeros((N, C, H_out, W_out), dtype=np.float32)\n",
    "    for n in range(N):\n",
    "        for c in range(C):\n",
    "            for i in range(H_out):\n",
    "                for j in range(W_out):\n",
    "                    region = x_pad[n, c, i*stride:i*stride+kernel, j*stride:j*stride+kernel]\n",
    "                    out[n, c, i, j] = np.max(region)\n",
    "    return out\n",
    "\n",
    "def global_avg_pool_nchw(x):\n",
    "    # N,C,H,W -> N,C\n",
    "    return x.mean(axis=(2, 3))\n",
    "\n",
    "def linear(x, w, b):\n",
    "    # x: (N, in_features), w: (out_features, in_features)\n",
    "    return x @ w.T + (0.0 if b is None else b)\n",
    "\n",
    "# ========= ResNet18 拓扑与 block 边界 =========\n",
    "RESNET18_TOPO = [\n",
    "    (\"conv1\", 2, 3),\n",
    "    # layer1\n",
    "    (\"layer1.0.conv1\", 1, 1),\n",
    "    (\"layer1.0.conv2\", 1, 1),\n",
    "    (\"layer1.1.conv1\", 1, 1),\n",
    "    (\"layer1.1.conv2\", 1, 1),\n",
    "    # layer2\n",
    "    (\"layer2.0.conv1\", 2, 1),\n",
    "    (\"layer2.0.conv2\", 1, 1),\n",
    "    (\"layer2.0.downsample.0\", 2, 0),\n",
    "    (\"layer2.1.conv1\", 1, 1),\n",
    "    (\"layer2.1.conv2\", 1, 1),\n",
    "    # layer3\n",
    "    (\"layer3.0.conv1\", 2, 1),\n",
    "    (\"layer3.0.conv2\", 1, 1),\n",
    "    (\"layer3.0.downsample.0\", 2, 0),\n",
    "    (\"layer3.1.conv1\", 1, 1),\n",
    "    (\"layer3.1.conv2\", 1, 1),\n",
    "    # layer4\n",
    "    (\"layer4.0.conv1\", 2, 1),\n",
    "    (\"layer4.0.conv2\", 1, 1),\n",
    "    (\"layer4.0.downsample.0\", 2, 0),\n",
    "    (\"layer4.1.conv1\", 1, 1),\n",
    "    (\"layer4.1.conv2\", 1, 1),\n",
    "]\n",
    "\n",
    "BLOCK_ENDS = {\n",
    "    \"layer1.0\": ([\"layer1.0.conv1\", \"layer1.0.conv2\"], None),\n",
    "    \"layer1.1\": ([\"layer1.1.conv1\", \"layer1.1.conv2\"], None),\n",
    "    \"layer2.0\": ([\"layer2.0.conv1\", \"layer2.0.conv2\"], \"layer2.0.downsample.0\"),\n",
    "    \"layer2.1\": ([\"layer2.1.conv1\", \"layer2.1.conv2\"], None),\n",
    "    \"layer3.0\": ([\"layer3.0.conv1\", \"layer3.0.conv2\"], \"layer3.0.downsample.0\"),\n",
    "    \"layer3.1\": ([\"layer3.1.conv1\", \"layer3.1.conv2\"], None),\n",
    "    \"layer4.0\": ([\"layer4.0.conv1\", \"layer4.0.conv2\"], \"layer4.0.downsample.0\"),\n",
    "    \"layer4.1\": ([\"layer4.1.conv1\", \"layer4.1.conv2\"], None),\n",
    "}\n",
    "\n",
    "# ========= BasePyModule：自动遍历 + 逐层 Q/DQ =========\n",
    "@I.ir_module\n",
    "class NumpyPTQResNet18(BasePyModule):\n",
    "    \"\"\"\n",
    "    NumPy 版 ResNet18 PTQ 仿真\n",
    "    - set_params: 装入已 BN 融合的所有卷积与 FC 参数\n",
    "    - set_quant:  装入每个卷积/FC 的量化参数（激活 per-tensor，权重 per-channel）\n",
    "    - main:       端到端按拓扑自动遍历、逐层 Q/DQ + 推理\n",
    "    \"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def set_params(self, params: object) -> None:\n",
    "        \"\"\"\n",
    "        params: dict[str, np.ndarray]\n",
    "          卷积:  \"<name>.weight\" (C_out,C_in,KH,KW), \"<name>.bias\" (C_out,)\n",
    "          FC:    \"fc.weight\" (num_classes, 512), \"fc.bias\" (num_classes,)\n",
    "        \"\"\"\n",
    "        self.params = params\n",
    "\n",
    "    @I.pyfunc\n",
    "    def set_quant(\n",
    "        self,\n",
    "        w_scales: object,\n",
    "        act_scales: object,\n",
    "        fc_w_scales: np.ndarray,\n",
    "        fc_act_scale: float\n",
    "    ) -> None:\n",
    "        \"\"\"\n",
    "        w_scales:   dict[str, np.ndarray]  # per-channel, shape (C_out,)\n",
    "        act_scales: dict[str, float]       # per-tensor, 每个卷积输入\n",
    "        fc_w_scales: np.ndarray            # (num_classes,)\n",
    "        fc_act_scale: float\n",
    "        \"\"\"\n",
    "        self.w_scales = w_scales\n",
    "        self.act_scales = act_scales\n",
    "        self.fc_w_scales = fc_w_scales.astype(np.float32)\n",
    "        self.fc_act_scale = float(fc_act_scale)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _sp(self, name: str) -> tuple:\n",
    "        # 返回 (stride, padding)\n",
    "        for k, s, p in RESNET18_TOPO:\n",
    "            if k == name:\n",
    "                return (s, p)\n",
    "        return (1, 0)\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _run_conv_qdq(self, name: str, x: np.ndarray, stride: int, padding: int) -> np.ndarray:\n",
    "        # 激活 Q/DQ（输入到该卷积的激活）\n",
    "        x_q = qdq_activation(x, float(self.act_scales[name]))\n",
    "        # 权重 Q/DQ（该卷积 per-channel）\n",
    "        w = self.params[f\"{name}.weight\"]; b = self.params.get(f\"{name}.bias\", None)\n",
    "        w_q = qdq_weight_pc(w, self.w_scales[name])\n",
    "        # 卷积\n",
    "        y = conv2d_nchw(x_q, w_q, b, stride=stride, padding=padding)\n",
    "        return y\n",
    "\n",
    "    @I.pyfunc\n",
    "    def _run_block(self, x_in: np.ndarray, block_key: str) -> np.ndarray:\n",
    "        # block_key: \"layer{1..4}.{0|1}\"\n",
    "        convs, down = BLOCK_ENDS[block_key]\n",
    "        identity = x_in\n",
    "\n",
    "        # conv1\n",
    "        y = self._run_conv_qdq(convs[0], x_in, *self._sp(convs[0]))\n",
    "        y = relu(y)\n",
    "        # conv2\n",
    "        y = self._run_conv_qdq(convs[1], y, *self._sp(convs[1]))\n",
    "\n",
    "        # 下采样分支（可选）\n",
    "        if down is not None:\n",
    "            identity = self._run_conv_qdq(down, identity, *self._sp(down))\n",
    "\n",
    "        # 残差相加 + ReLU\n",
    "        out = relu(y + identity)\n",
    "        return out\n",
    "\n",
    "    @I.pyfunc\n",
    "    def main(self, x: np.ndarray) -> np.ndarray:\n",
    "        # stem\n",
    "        x = self._run_conv_qdq(\"conv1\", x, *self._sp(\"conv1\"))\n",
    "        x = relu(x)\n",
    "        x = maxpool2d_nchw(x, kernel=3, stride=2, padding=1)\n",
    "\n",
    "        # layer1\n",
    "        x = self._run_block(x, \"layer1.0\")\n",
    "        x = self._run_block(x, \"layer1.1\")\n",
    "        # layer2\n",
    "        x = self._run_block(x, \"layer2.0\")\n",
    "        x = self._run_block(x, \"layer2.1\")\n",
    "        # layer3\n",
    "        x = self._run_block(x, \"layer3.0\")\n",
    "        x = self._run_block(x, \"layer3.1\")\n",
    "        # layer4\n",
    "        x = self._run_block(x, \"layer4.0\")\n",
    "        x = self._run_block(x, \"layer4.1\")\n",
    "\n",
    "        # GAP + FC（FC 也做 Q/DQ）\n",
    "        x = global_avg_pool_nchw(x)              # (N, 512)\n",
    "        x_q = qdq_activation(x, float(self.fc_act_scale))\n",
    "        w = self.params[\"fc.weight\"]; b = self.params.get(\"fc.bias\", None)\n",
    "        w_q = qdq_weight_pc_linear(w, self.fc_w_scales)  # per-out-channel\n",
    "        y = linear(x_q, w_q, b)                  # (N, num_classes)\n",
    "        return y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55e43704",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "\n",
    "# 1) 设备与实例\n",
    "device = tvm.cpu(0)\n",
    "Module = NumpyPTQResNet18\n",
    "inst = Module(device)\n",
    "\n",
    "# 2) 构造权重（示例：随机占位）\n",
    "def make_shape_map():\n",
    "    return {\n",
    "        \"conv1\": (64, 3, 7, 7),\n",
    "        \"layer1.0.conv1\": (64, 64, 3, 3),\n",
    "        \"layer1.0.conv2\": (64, 64, 3, 3),\n",
    "        \"layer1.1.conv1\": (64, 64, 3, 3),\n",
    "        \"layer1.1.conv2\": (64, 64, 3, 3),\n",
    "        \"layer2.0.conv1\": (128, 64, 3, 3),\n",
    "        \"layer2.0.conv2\": (128, 128, 3, 3),\n",
    "        \"layer2.0.downsample.0\": (128, 64, 1, 1),\n",
    "        \"layer2.1.conv1\": (128, 128, 3, 3),\n",
    "        \"layer2.1.conv2\": (128, 128, 3, 3),\n",
    "        \"layer3.0.conv1\": (256, 128, 3, 3),\n",
    "        \"layer3.0.conv2\": (256, 256, 3, 3),\n",
    "        \"layer3.0.downsample.0\": (256, 128, 1, 1),\n",
    "        \"layer3.1.conv1\": (256, 256, 3, 3),\n",
    "        \"layer3.1.conv2\": (256, 256, 3, 3),\n",
    "        \"layer4.0.conv1\": (512, 256, 3, 3),\n",
    "        \"layer4.0.conv2\": (512, 512, 3, 3),\n",
    "        \"layer4.0.downsample.0\": (512, 256, 1, 1),\n",
    "        \"layer4.1.conv1\": (512, 512, 3, 3),\n",
    "        \"layer4.1.conv2\": (512, 512, 3, 3),\n",
    "    }\n",
    "\n",
    "np.random.seed(0)\n",
    "shapes = make_shape_map()\n",
    "params = {}\n",
    "for name, shape in shapes.items():\n",
    "    params[f\"{name}.weight\"] = np.random.randn(*shape).astype(np.float32)\n",
    "    params[f\"{name}.bias\"] = np.random.randn(shape[0]).astype(np.float32)\n",
    "\n",
    "num_classes = 1000\n",
    "params[\"fc.weight\"] = np.random.randn(num_classes, 512).astype(np.float32)\n",
    "params[\"fc.bias\"] = np.random.randn(num_classes).astype(np.float32)\n",
    "\n",
    "inst.set_params(params)\n",
    "\n",
    "# 3) 量化参数（示例：随机/常数占位；实际应来自校准）\n",
    "act_scales = {}\n",
    "w_scales = {}\n",
    "for name, shape in shapes.items():\n",
    "    C_out = shape[0]\n",
    "    act_scales[name] = float(0.05)  # per-tensor\n",
    "    # per-channel scale，避免为 0\n",
    "    w_scales[name] = (np.random.rand(C_out).astype(np.float32) * 0.1 + 1e-3)\n",
    "\n",
    "fc_act_scale = float(0.05)\n",
    "fc_w_scales = (np.random.rand(num_classes).astype(np.float32) * 0.1 + 1e-3)\n",
    "\n",
    "inst.set_quant(w_scales, act_scales, fc_w_scales, fc_act_scale)\n",
    "\n",
    "# 4) 端到端推理\n",
    "x = np.random.randn(1, 3, 224, 224).astype(np.float32)\n",
    "y = inst.main(x)\n",
    "print(y.shape, y.dtype)  # (1, num_classes) float32\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
