{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# fold-explicit-padding\n",
    "\n",
    "参考：`tvm/tests/python/relay/test_pass_fold_explicit_padding.py`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "from tvm import relay\n",
    "from tvm.relay import transform\n",
    "from tvm.relay.testing import run_opt_pass\n",
    "\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 简化 conv padding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "convs = [relay.nn.conv1d, relay.nn.conv2d, relay.nn.conv3d]\n",
    "\n",
    "def validate(ndim, pad_width, pad_value, pad_mode, orig_padding, layout, no_fold=False):\n",
    "    if layout[1] == \"C\":\n",
    "        shape = [1, 3] + [10] * ndim\n",
    "        wshape = [8, 3] + [3] * ndim\n",
    "    elif layout[-1] == \"C\":\n",
    "        shape = [1] + [10] * ndim + [3]\n",
    "        wshape = [8] + [3] * ndim + [3]\n",
    "    else:\n",
    "        raise ValueError(\"This test only supports NC* and N*C\")\n",
    "\n",
    "    x = relay.var(\"x\", shape=shape, dtype=\"float32\")\n",
    "    w = relay.var(\"w\", shape=wshape, dtype=\"float32\")\n",
    "    pad = relay.nn.pad(x, pad_width, pad_value, pad_mode)\n",
    "    if layout[1] == \"C\":\n",
    "        conv = convs[ndim - 1](pad, w, padding=orig_padding)\n",
    "    else:\n",
    "        conv = convs[ndim - 1](\n",
    "            pad, w, padding=orig_padding, data_layout=layout, kernel_layout=\"DHWIO\"[3 - ndim :]\n",
    "        )\n",
    "\n",
    "    if pad_mode == \"constant\" and pad_value == 0:\n",
    "        new_padding = []\n",
    "        for j in range(2):\n",
    "            for i in range(len(pad_width)):\n",
    "                if layout[i] in [\"D\", \"H\", \"W\"]:\n",
    "                    new_padding.append(pad_width[i][j])\n",
    "        for i in range(len(new_padding)):\n",
    "            new_padding[i] += orig_padding[i]\n",
    "        if layout[1] == \"C\":\n",
    "            after = convs[ndim - 1](x, w, padding=new_padding)\n",
    "        else:\n",
    "            after = convs[ndim - 1](\n",
    "                x, w, padding=new_padding, data_layout=layout, kernel_layout=\"DHWIO\"[3 - ndim :]\n",
    "            )\n",
    "    else:\n",
    "        after = conv\n",
    "\n",
    "    zz = run_opt_pass(conv, transform.FoldExplicitPadding())\n",
    "    expected = run_opt_pass(after, transform.InferType())\n",
    "    assert tvm.ir.structural_equal(zz, expected)\n",
    "\n",
    "    mod1 = tvm.IRModule.from_expr(conv)\n",
    "    mod2 = tvm.IRModule.from_expr(zz)\n",
    "\n",
    "    if not no_fold:\n",
    "        op_freqs = relay.analysis.list_op_freqs(mod2)\n",
    "        assert \"nn.pad\" not in op_freqs\n",
    "\n",
    "    with tvm.transform.PassContext():\n",
    "        func1 = relay.create_executor(\n",
    "            \"vm\", mod=mod1, device=tvm.cpu(), target=\"llvm\"\n",
    "        ).evaluate()\n",
    "    func2 = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\").evaluate()\n",
    "    x_np = np.random.rand(*shape).astype(\"float32\")\n",
    "    w_np = np.random.rand(*wshape).astype(\"float32\")\n",
    "\n",
    "    result1 = func1(x_np, w_np)\n",
    "    result2 = func2(x_np, w_np)\n",
    "\n",
    "    np.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "One or more operators have not been tuned. Please tune your model for better performance. Use DEBUG logging level to see more details.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n",
      "conv2d NHWC layout is not optimized for x86 with autotvm.\n"
     ]
    }
   ],
   "source": [
    "# Test fold cases\n",
    "for orig_pad in [[0, 0], [2, 0], [0, 2]]:\n",
    "    for i_pad in [[0, 0], [1, 1], [1, 0]]:\n",
    "        for ndim in [1, 2, 3]:\n",
    "            for channels_last in [0, 1]:\n",
    "                if channels_last:\n",
    "                    layout = \"NDHWC\"\n",
    "                    layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]\n",
    "                    padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]\n",
    "                else:\n",
    "                    layout = \"NCDHW\"\n",
    "                    layout = layout[0:2] + layout[5 - ndim :]\n",
    "                    padding = [[0, 0]] * 2 + [i_pad] * ndim\n",
    "\n",
    "                validate(ndim, padding, 0, \"constant\", orig_pad * ndim, layout)\n",
    "\n",
    "# Test no fold cases\n",
    "ndim = 2\n",
    "# Conv only folds when pad_value=0\n",
    "validate(\n",
    "    ndim, [[0, 0]] * 2 + [i_pad] * ndim, 1, \"constant\", orig_pad * ndim, \"NCHW\", no_fold=True\n",
    ")\n",
    "# Conv only folds when pad's pad_mode=\"constant\"\n",
    "validate(ndim, [[0, 0]] * 2 + [i_pad] * ndim, 0, \"edge\", orig_pad * ndim, \"NCHW\", no_fold=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 简化 pool padding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_min_value(dtype):\n",
    "    if np.issubdtype(dtype, np.floating):\n",
    "        return np.finfo(dtype).min\n",
    "    elif np.issubdtype(dtype, np.integer):\n",
    "        return np.iinfo(dtype).min\n",
    "    else:\n",
    "        raise ValueError(\"Cannot get min value for dtypes that are not integer or not floating\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_pools = [relay.nn.max_pool1d, relay.nn.max_pool2d, relay.nn.max_pool3d]\n",
    "avg_pools = [relay.nn.avg_pool1d, relay.nn.avg_pool2d, relay.nn.avg_pool3d]\n",
    "\n",
    "def validate(\n",
    "    pools,\n",
    "    ndim,\n",
    "    pad_width,\n",
    "    pad_value,\n",
    "    orig_padding,\n",
    "    layout,\n",
    "    pool_size,\n",
    "    pad_mode=\"constant\",\n",
    "    dtype=\"float32\",\n",
    "    no_fold=False,\n",
    "    **kwargs,\n",
    "):\n",
    "    pad_value_const = relay.const(pad_value, dtype=dtype)\n",
    "\n",
    "    if layout[1] == \"C\":\n",
    "        shape = [1, 3] + [10] * ndim\n",
    "    elif layout[-1] == \"C\":\n",
    "        shape = [1] + [10] * ndim + [3]\n",
    "    else:\n",
    "        raise ValueError(\"This test only supports NC* and N*C\")\n",
    "\n",
    "    x = relay.var(\"x\", shape=shape, dtype=dtype)\n",
    "    pad = relay.nn.pad(x, pad_width, pad_value_const, pad_mode)\n",
    "    if layout[1] == \"C\":\n",
    "        pool = pools[ndim - 1](pad, padding=orig_padding, pool_size=pool_size, **kwargs)\n",
    "    else:\n",
    "        pool = pools[ndim - 1](\n",
    "            pad, padding=orig_padding, layout=layout, pool_size=pool_size, **kwargs\n",
    "        )\n",
    "\n",
    "    if pools == max_pools:\n",
    "        foldable_pad_value = get_min_value(dtype)\n",
    "    else:\n",
    "        foldable_pad_value = 0\n",
    "\n",
    "    if pad_mode == \"constant\" and pad_value == foldable_pad_value:\n",
    "        new_padding = []\n",
    "        for j in range(2):\n",
    "            for i in range(len(pad_width)):\n",
    "                if layout[i] in [\"D\", \"H\", \"W\"]:\n",
    "                    new_padding.append(pad_width[i][j])\n",
    "        for i in range(len(new_padding)):\n",
    "            new_padding[i] += orig_padding[i]\n",
    "\n",
    "        if pools == avg_pools and all(v == 0 for v in orig_padding):\n",
    "            # If the orig padding for AvgPool is all zero and the pad op to fold\n",
    "            # has non-zero pad width, the resultant folded AvgPool will have\n",
    "            # count_include_pad=True so AvgPool's divisor is agnostic of pad boundaries\n",
    "            kwargs[\"count_include_pad\"] = True\n",
    "        if layout[1] == \"C\":\n",
    "            after = pools[ndim - 1](x, padding=new_padding, pool_size=pool_size, **kwargs)\n",
    "        else:\n",
    "            after = pools[ndim - 1](\n",
    "                x, padding=new_padding, layout=layout, pool_size=pool_size, **kwargs\n",
    "            )\n",
    "    else:\n",
    "        after = pool\n",
    "\n",
    "    zz = run_opt_pass(pool, transform.FoldExplicitPadding())\n",
    "    expected = run_opt_pass(after, transform.InferType())\n",
    "\n",
    "    assert tvm.ir.structural_equal(zz, expected)\n",
    "\n",
    "    mod1 = tvm.IRModule.from_expr(pool)\n",
    "    mod2 = tvm.IRModule.from_expr(zz)\n",
    "\n",
    "    if not no_fold:\n",
    "        op_freqs = relay.analysis.list_op_freqs(mod2)\n",
    "        assert \"nn.pad\" not in op_freqs\n",
    "\n",
    "    with tvm.transform.PassContext():\n",
    "        func1 = relay.create_executor(\n",
    "            \"vm\", mod=mod1, device=tvm.cpu(), target=\"llvm\"\n",
    "        ).evaluate()\n",
    "\n",
    "    func2 = relay.create_executor(\"vm\", mod=mod2, device=tvm.cpu(), target=\"llvm\").evaluate()\n",
    "    x_np = np.random.rand(*shape).astype(dtype)\n",
    "\n",
    "    result1 = func1(x_np)\n",
    "    result2 = func2(x_np)\n",
    "\n",
    "    tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)\n",
    "\n",
    "# Test fold cases\n",
    "float_min_val = get_min_value(\"float32\")\n",
    "for orig_pad in [[0, 0], [2, 0]]:\n",
    "    for i_pad in [[1, 1], [1, 0]]:\n",
    "        for ndim in [1, 2, 3]:\n",
    "            for channels_last in [0, 1]:\n",
    "                if channels_last:\n",
    "                    layout = \"NDHWC\"\n",
    "                    layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]\n",
    "                    padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]\n",
    "                else:\n",
    "                    layout = \"NCDHW\"\n",
    "                    layout = layout[0:2] + layout[5 - ndim :]\n",
    "                    padding = [[0, 0]] * 2 + [i_pad] * ndim\n",
    "\n",
    "                validate(max_pools, ndim, padding, float_min_val, orig_pad * ndim, layout, 2)\n",
    "\n",
    "# Check Pool pad folding when pad width on pad op is all zero.\n",
    "validate(max_pools, 1, [[0, 0], [0, 0], [0, 0]], float_min_val, [2, 0], \"NCW\", 2)\n",
    "# Check MaxPool pad folding with uint dtype\n",
    "int_min_val = get_min_value(\"uint8\")\n",
    "validate(\n",
    "    max_pools,\n",
    "    2,\n",
    "    [[0, 0], [0, 0], [0, 2], [2, 0]],\n",
    "    int_min_val,\n",
    "    [2, 0, 0, 0],\n",
    "    \"NCHW\",\n",
    "    2,\n",
    "    dtype=\"uint8\",\n",
    ")\n",
    "# Fold when original AvgPool has its own padding but count_include_pad=True\n",
    "validate(\n",
    "    avg_pools,\n",
    "    2,\n",
    "    [[0, 0], [0, 0], [0, 2], [2, 0]],\n",
    "    0,\n",
    "    [0, 0, 1, 0],\n",
    "    \"NCHW\",\n",
    "    2,\n",
    "    count_include_pad=True,\n",
    ")\n",
    "# Fold when count_include_pad=False but original AvgPool has no orig padding\n",
    "validate(avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], \"NCHW\", 2)\n",
    "\n",
    "# Test no fold cases\n",
    "# AvgPool only folds pad when count_include_pad (False by default) is True\n",
    "validate(\n",
    "    avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], \"NCHW\", 2, no_fold=True\n",
    ")\n",
    "# MaxPool only folds pad when pad_value is the min for its dtype\n",
    "validate(max_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], \"NCHW\", 2, no_fold=True)\n",
    "# AvgPool only folds pad when pad_value=0\n",
    "validate(avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 1, [0, 0], \"NCHW\", 2, no_fold=True)\n",
    "# Pools only fold when pad_mode=\"constant\"\n",
    "validate(\n",
    "    avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], \"NCHW\", 2, pad_mode=\"edge\", no_fold=True\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 折叠 pad_qconv2d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    x = relay.var(\"x\", shape=(1, 56, 56, 64), dtype=\"int8\")\n",
    "    weight = relay.var(\"weight\", shape=(3, 3, 64, 64), dtype=\"int8\")\n",
    "    input_zero_point = 10\n",
    "    pad = relay.nn.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], pad_value=input_zero_point)\n",
    "    return relay.qnn.op.conv2d(\n",
    "        pad,\n",
    "        weight,\n",
    "        relay.const(input_zero_point, \"int32\"),\n",
    "        relay.const(1, \"int32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        channels=64,\n",
    "        kernel_size=(3, 3),\n",
    "        padding=(0, 0),\n",
    "        data_layout=\"NHWC\",\n",
    "        kernel_layout=\"HWIO\",\n",
    "    )\n",
    "\n",
    "def expected():\n",
    "    x = relay.var(\"x\", shape=(1, 56, 56, 64), dtype=\"int8\")\n",
    "    weight = relay.var(\"weight\", shape=(3, 3, 64, 64), dtype=\"int8\")\n",
    "    input_zero_point = 10\n",
    "    return relay.qnn.op.conv2d(\n",
    "        x,\n",
    "        weight,\n",
    "        relay.const(input_zero_point, \"int32\"),\n",
    "        relay.const(1, \"int32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        channels=64,\n",
    "        kernel_size=(3, 3),\n",
    "        padding=(1, 1),\n",
    "        data_layout=\"NHWC\",\n",
    "        kernel_layout=\"HWIO\",\n",
    "    )\n",
    "\n",
    "a = run_opt_pass(before(), relay.transform.FoldExplicitPadding())\n",
    "b = run_opt_pass(expected(), transform.InferType())\n",
    "\n",
    "assert tvm.ir.structural_equal(a, b, map_free_vars=True), \"Actual = \\n\" + str(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>x: Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>weight: Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int32] {\n",
       "  <span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span> <span style=\"color: #AA22FF; font-weight: bold\">=</span> nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>pad(<span style=\"color: #AA22FF; font-weight: bold\">%</span>x, <span style=\"color: #008000\">10</span> <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>int32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, pad_width<span style=\"color: #AA22FF; font-weight: bold\">=</span>[[<span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>], [<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], [<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], [<span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>]]) <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">58</span>, <span style=\"color: #008000\">58</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>;\n",
       "  qnn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span><span style=\"color: #008000\">0</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>weight, <span style=\"color: #008000\">10</span> <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>int32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span> <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>int32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span>f <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>float32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span>f <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>float32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>, <span style=\"color: #008000\">0</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">64</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>], data_layout<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NHWC&quot;</span>, kernel_layout<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;HWIO&quot;</span>, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;int32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod = tvm.IRModule.from_expr(before())\n",
    "mod = relay.transform.InferType()(mod)\n",
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #AA22FF\">@main</span>(<span style=\"color: #AA22FF; font-weight: bold\">%</span>x: Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #AA22FF; font-weight: bold\">%</span>weight: Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">64</span>), int8] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int32] {\n",
       "  qnn<span style=\"color: #AA22FF; font-weight: bold\">.</span>conv2d(<span style=\"color: #AA22FF; font-weight: bold\">%</span>x, <span style=\"color: #AA22FF; font-weight: bold\">%</span>weight, <span style=\"color: #008000\">10</span> <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>int32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span> <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>int32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span>f <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>float32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, <span style=\"color: #008000\">1</span>f <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>float32 <span style=\"color: #AA22FF; font-weight: bold\">*/</span>, padding<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], channels<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000\">64</span>, kernel_size<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>], data_layout<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NHWC&quot;</span>, kernel_layout<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;HWIO&quot;</span>, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;int32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">/*</span> ty<span style=\"color: #AA22FF; font-weight: bold\">=</span>Tensor[(<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">56</span>, <span style=\"color: #008000\">64</span>), int32] <span style=\"color: #AA22FF; font-weight: bold\">*/</span>\n",
       "}\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "(relay.transform.FoldExplicitPadding()(mod)).show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "test_pad_qconv2d_no_fold："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_expr():\n",
    "    x = relay.var(\"x\", shape=(1, 1, 2, 2), dtype=\"int8\")\n",
    "    weight = relay.var(\"weight\", shape=(1, 1, 2, 2), dtype=\"int8\")\n",
    "    # Pad value and input zp are not equal\n",
    "    pad_value = 1\n",
    "    input_zero_point = 0\n",
    "    pad = relay.nn.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], pad_value=pad_value)\n",
    "    return relay.qnn.op.conv2d(\n",
    "        pad,\n",
    "        weight,\n",
    "        relay.const(input_zero_point, \"int32\"),\n",
    "        relay.const(0, \"int32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        relay.const(1, \"float32\"),\n",
    "        channels=1,\n",
    "        kernel_size=(2, 2),\n",
    "        padding=(0, 0),\n",
    "    )\n",
    "\n",
    "a = run_opt_pass(get_expr(), relay.transform.FoldExplicitPadding())\n",
    "b = run_opt_pass(get_expr(), transform.InferType())\n",
    "\n",
    "assert tvm.ir.structural_equal(a, b, map_free_vars=True), (\n",
    "    \"\\nActual = \\n\" + str(a) + \"\\nExpected = \\n\" + str(b)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
