{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PyTorch pass"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## `torch._C._jit_pass_lower_all_tuples`\n",
    "\n",
    "来源：[[Bug] [Frontend][Pytorch] Relay IR is inconsistent with that of the original model](https://discuss.tvm.apache.org/t/bug-frontend-pytorch-relay-ir-is-inconsistent-with-that-of-the-original-model/12010)\n",
    "\n",
    "`torch._C._jit_pass_lower_all_tuples` 是 PyTorch 的内部函数，用于将 Python 组转换为 TorchScript 元组。这个函数的主要作用是在 TorchScript 编译过程中，将 Python 代码中的元组操作转换为 TorchScript 元组操作，以便在 TorchScript 环境中执行。\n",
    "\n",
    "具体来说，`torch._C._jit_pass_lower_all_tuples` 函数会遍历 TorchScript IR（Intermediate Representation，中间表示）中的所有节点，找到所有使用 Python 元组的算子，并将它们替换为对应的 TorchScript 元组算子。这样，在后续的 TorchScript 优化和变换过程中，就可以直接处理这些元组算子，而不需要额外的 Python 到 TorchScript 的桥接代码。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import set_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fn (%input: Tensor[(1, 3, 128, 128), float32] /* span=aten::quantize_per_tensor_0.input:0:0 */, %backbone.conv1.conv_weight: Tensor[(16, 3, 3, 3), float32] /* span=quantized::conv2d_relu_0:0:0 */, %backbone.conv1.conv_bias: Tensor[(16), float32] /* span=quantized::conv2d_relu_0:0:0 */, %backbone.conv2.depth_wise.conv_weight: Tensor[(16, 1, 3, 3), float32] /* span=quantized::conv2d_relu_1:0:0 */, %backbone.conv2.depth_wise.conv_bias: Tensor[(16), float32] /* span=quantized::conv2d_relu_1:0:0 */, %backbone.conv2.point_wise.conv_weight: Tensor[(32, 16, 1, 1), float32] /* span=quantized::conv2d_relu_2:0:0 */, %backbone.conv2.point_wise.conv_bias: Tensor[(32), float32] /* span=quantized::conv2d_relu_2:0:0 */) {\n",
      "  %0 = qnn.quantize(%input, 0.0347108f /* span=aten::quantize_per_tensor_0:0:0 */, 125 /* span=aten::quantize_per_tensor_0:0:0 */, out_dtype=\"uint8\", axis=1) /* span=aten::quantize_per_tensor_0:0:0 */;\n",
      "  %1 = nn.pad(%0, 125f /* span=quantized::conv2d_relu_0:0:0 */, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]]) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %2 = qnn.quantize(%backbone.conv1.conv_weight, 0.00150606f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %3 = qnn.conv2d(%1, %2, 125 /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, 0.0347108f /* span=quantized::conv2d_relu_0:0:0 */, 0.00150606f /* span=quantized::conv2d_relu_0:0:0 */, strides=[2, 2], padding=[0, 0, 0, 0], channels=16, kernel_size=[3, 3], out_dtype=\"int32\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %4 = qnn.quantize(%backbone.conv1.conv_bias, 5.22766e-05f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %5 = nn.bias_add(%3, %4) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %6 = qnn.requantize(%5, 5.22766e-05f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, 0.0132984f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %7 = clip(%6, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %8 = cast(%7, dtype=\"uint8\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %9 = nn.pad(%8, 0f /* span=quantized::conv2d_relu_1:0:0 */, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]]) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %10 = qnn.quantize(%backbone.conv2.depth_wise.conv_weight, 0.00256311f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %11 = qnn.conv2d(%9, %10, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0.0132984f /* span=quantized::conv2d_relu_1:0:0 */, 0.00256311f /* span=quantized::conv2d_relu_1:0:0 */, padding=[0, 0, 0, 0], groups=16, channels=16, kernel_size=[3, 3], out_dtype=\"int32\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %12 = qnn.quantize(%backbone.conv2.depth_wise.conv_bias, 3.40854e-05f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %13 = nn.bias_add(%11, %12) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %14 = qnn.requantize(%13, 3.40854e-05f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0.00509362f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %15 = clip(%14, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %16 = cast(%15, dtype=\"uint8\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %17 = qnn.quantize(%backbone.conv2.point_wise.conv_weight, 0.00195794f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %18 = qnn.conv2d(%16, %17, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0.00509362f /* span=quantized::conv2d_relu_2:0:0 */, 0.00195794f /* span=quantized::conv2d_relu_2:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1], out_dtype=\"int32\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %19 = qnn.quantize(%backbone.conv2.point_wise.conv_bias, 9.97299e-06f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %20 = nn.bias_add(%18, %19) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %21 = qnn.requantize(%20, 9.97299e-06f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0.00208748f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %22 = clip(%21, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %23 = cast(%22, dtype=\"uint8\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %24 = qnn.dequantize(%8, 0.0132984f /* span=aten::dequantize_0:0:0 */, 0 /* span=aten::dequantize_0:0:0 */, out_dtype=\"float32\") /* span=aten::dequantize_0:0:0 */;\n",
      "  %25 = qnn.dequantize(%23, 0.00208748f /* span=aten::dequantize_1:0:0 */, 0 /* span=aten::dequantize_1:0:0 */, out_dtype=\"float32\") /* span=aten::dequantize_1:0:0 */;\n",
      "  (%24, %25)\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.quantization import QuantStub, DeQuantStub\n",
    "from torch.quantization import prepare_qat, get_default_qat_qconfig, convert\n",
    "from tvm import relay\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "class ConvBnRelu(nn.Module):\n",
    "    def __init__(self, inp, oup, kernel_size=3, stride=1, padding=1, bias=True, groups=1):\n",
    "        super(ConvBnRelu, self).__init__()\n",
    "        if groups > 1:\n",
    "            self.conv = nn.Conv2d(inp, inp, kernel_size, stride, padding, bias=bias, groups=groups)\n",
    "            self.bn = nn.BatchNorm2d(inp)\n",
    "        else:\n",
    "            self.conv = nn.Conv2d(inp, oup, kernel_size, stride, padding, bias=bias, groups=groups)\n",
    "            self.bn = nn.BatchNorm2d(oup)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        x = self.conv(inputs)\n",
    "        x = self.bn(x)\n",
    "        x = self.relu(x)\n",
    "        return x\n",
    "def conv_bn(inp, oup, stride=1, width_multiplier=1):\n",
    "    return ConvBnRelu(inp, oup, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "\n",
    "\n",
    "def conv_dw(inp, oup, stride, width_multiplier=1, padding=1):\n",
    "    dw_block = nn.Sequential()\n",
    "    depth_wise = ConvBnRelu(inp, oup, kernel_size=3, stride=stride, padding=padding, bias=False, groups=inp)\n",
    "    point_wise = ConvBnRelu(inp, oup, kernel_size=1, stride=1, padding=0, bias=False)\n",
    "\n",
    "    dw_block.add_module('depth_wise', depth_wise)\n",
    "    dw_block.add_module('point_wise', point_wise)\n",
    "\n",
    "    return dw_block\n",
    "\n",
    "class Backbone(nn.Module):\n",
    "    def __init__(self, width_multiplier=1):\n",
    "        super(Backbone, self).__init__()\n",
    "        self.width_multiplier = width_multiplier\n",
    "        self.conv1 = conv_bn(3, 16, 2, self.width_multiplier)\n",
    "        self.conv2 = conv_dw(16, 32, 1, self.width_multiplier)\n",
    "    \n",
    "    def forward(self, inputs):\n",
    "        x1 = self.conv1(inputs)\n",
    "        x2 = self.conv2(x1)\n",
    "        return [x1, x2]\n",
    "\n",
    "class QuantizableBackbone(nn.Module):\n",
    "    def __init__(self, inputsize=(128, 128)):\n",
    "        super(QuantizableBackbone, self).__init__()\n",
    "        self.quant = QuantStub()\n",
    "        self.dequant = DeQuantStub()\n",
    "        self.backbone = Backbone()\n",
    "\n",
    "    def fuse_model(self):\n",
    "        for idx, m in enumerate(self.modules()):\n",
    "            if type(m) == ConvBnRelu:\n",
    "                torch.quantization.fuse_modules(m, ['conv', 'bn', 'relu'], inplace=True)\n",
    "\n",
    "    def forward(self, input):\n",
    "        input = self.quant(input)\n",
    "        y0, y1 = self.backbone(input)\n",
    "        y0 = self.dequant(y0)\n",
    "        y1 = self.dequant(y1)\n",
    "        return y0, y1\n",
    "\n",
    "fp32_input = torch.randn(1, 3, 128, 128)\n",
    "model = QuantizableBackbone()\n",
    "model.eval()\n",
    "model.fuse_model()\n",
    "model.qconfig = get_default_qat_qconfig(\"qnnpack\")\n",
    "model.train()\n",
    "prepare_qat(model, inplace=True)\n",
    "model.eval()\n",
    "model(fp32_input)\n",
    "\n",
    "model_int8 = torch.quantization.convert(model, inplace=True)\n",
    "script_module = torch.jit.trace(model_int8, fp32_input).eval()\n",
    "\n",
    "input_infos = [(\"input\", (fp32_input.shape, \"float32\"))]\n",
    "mod, _ = relay.frontend.from_pytorch(script_module, input_infos)\n",
    "print(mod[\"main\"])\n",
    "output = mod[\"main\"].body\n",
    "assert isinstance(output, relay.Tuple) and len(output) == 2\n",
    "dq1, dq2 = output\n",
    "assert str(dq1.op) == str(dq2.op) == 'Op(qnn.dequantize)'\n",
    "scale1 = dq1.args[1].data.numpy().item()\n",
    "scale2 = dq2.args[1].data.numpy().item()\n",
    "assert scale1 != scale2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fn (%input: Tensor[(1, 3, 128, 128), float32] /* span=aten::quantize_per_tensor_0.input:0:0 */, %backbone.conv1.conv_weight: Tensor[(16, 3, 3, 3), float32] /* span=quantized::conv2d_relu_0:0:0 */, %backbone.conv1.conv_bias: Tensor[(16), float32] /* span=quantized::conv2d_relu_0:0:0 */, %backbone.conv2.depth_wise.conv_weight: Tensor[(16, 1, 3, 3), float32] /* span=quantized::conv2d_relu_1:0:0 */, %backbone.conv2.depth_wise.conv_bias: Tensor[(16), float32] /* span=quantized::conv2d_relu_1:0:0 */, %backbone.conv2.point_wise.conv_weight: Tensor[(32, 16, 1, 1), float32] /* span=quantized::conv2d_relu_2:0:0 */, %backbone.conv2.point_wise.conv_bias: Tensor[(32), float32] /* span=quantized::conv2d_relu_2:0:0 */) {\n",
      "  %0 = qnn.quantize(%input, 0.0347108f /* span=aten::quantize_per_tensor_0:0:0 */, 125 /* span=aten::quantize_per_tensor_0:0:0 */, out_dtype=\"uint8\", axis=1) /* span=aten::quantize_per_tensor_0:0:0 */;\n",
      "  %1 = nn.pad(%0, 125f /* span=quantized::conv2d_relu_0:0:0 */, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]]) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %2 = qnn.quantize(%backbone.conv1.conv_weight, 0.00150606f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %3 = qnn.conv2d(%1, %2, 125 /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, 0.0347108f /* span=quantized::conv2d_relu_0:0:0 */, 0.00150606f /* span=quantized::conv2d_relu_0:0:0 */, strides=[2, 2], padding=[0, 0, 0, 0], channels=16, kernel_size=[3, 3], out_dtype=\"int32\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %4 = qnn.quantize(%backbone.conv1.conv_bias, 5.22766e-05f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %5 = nn.bias_add(%3, %4) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %6 = qnn.requantize(%5, 5.22766e-05f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, 0.0132984f /* span=quantized::conv2d_relu_0:0:0 */, 0 /* span=quantized::conv2d_relu_0:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %7 = clip(%6, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %8 = cast(%7, dtype=\"uint8\") /* span=quantized::conv2d_relu_0:0:0 */;\n",
      "  %9 = nn.pad(%8, 0f /* span=quantized::conv2d_relu_1:0:0 */, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]]) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %10 = qnn.quantize(%backbone.conv2.depth_wise.conv_weight, 0.00256311f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %11 = qnn.conv2d(%9, %10, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0.0132984f /* span=quantized::conv2d_relu_1:0:0 */, 0.00256311f /* span=quantized::conv2d_relu_1:0:0 */, padding=[0, 0, 0, 0], groups=16, channels=16, kernel_size=[3, 3], out_dtype=\"int32\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %12 = qnn.quantize(%backbone.conv2.depth_wise.conv_bias, 3.40854e-05f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %13 = nn.bias_add(%11, %12) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %14 = qnn.requantize(%13, 3.40854e-05f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, 0.00509362f /* span=quantized::conv2d_relu_1:0:0 */, 0 /* span=quantized::conv2d_relu_1:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %15 = clip(%14, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %16 = cast(%15, dtype=\"uint8\") /* span=quantized::conv2d_relu_1:0:0 */;\n",
      "  %17 = qnn.quantize(%backbone.conv2.point_wise.conv_weight, 0.00195794f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, out_dtype=\"int8\", axis=0) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %18 = qnn.conv2d(%16, %17, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0.00509362f /* span=quantized::conv2d_relu_2:0:0 */, 0.00195794f /* span=quantized::conv2d_relu_2:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1], out_dtype=\"int32\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %19 = qnn.quantize(%backbone.conv2.point_wise.conv_bias, 9.97299e-06f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, out_dtype=\"int32\", axis=0) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %20 = nn.bias_add(%18, %19) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %21 = qnn.requantize(%20, 9.97299e-06f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, 0.00208748f /* span=quantized::conv2d_relu_2:0:0 */, 0 /* span=quantized::conv2d_relu_2:0:0 */, axis=1, out_dtype=\"int32\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %22 = clip(%21, a_min=0f, a_max=255f) /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %23 = cast(%22, dtype=\"uint8\") /* span=quantized::conv2d_relu_2:0:0 */;\n",
      "  %24 = qnn.dequantize(%8, 0.0132984f /* span=aten::dequantize_0:0:0 */, 0 /* span=aten::dequantize_0:0:0 */, out_dtype=\"float32\") /* span=aten::dequantize_0:0:0 */;\n",
      "  %25 = qnn.dequantize(%23, 0.00208748f /* span=aten::dequantize_1:0:0 */, 0 /* span=aten::dequantize_1:0:0 */, out_dtype=\"float32\") /* span=aten::dequantize_1:0:0 */;\n",
      "  (%24, %25)\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "torch.jit.save(torch.jit.trace(model_int8, fp32_input), \"quantized_model.pt\")\n",
    "model = torch.jit.load(\"quantized_model.pt\")\n",
    "# model = torch.jit.trace(model, fp32_input).eval()\n",
    "model = torch.jit.script(model)\n",
    "mod, params = relay.frontend.from_pytorch(model, input_infos)\n",
    "print(mod[\"main\"])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "xin",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
