{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "0efc93f6",
   "metadata": {},
   "source": [
    "# 优化布局变换"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3a52a861",
   "metadata": {},
   "source": [
    "`OptimizeLayoutTransform` 能够优化计算图中的布局变换操作，消除不必要的张量布局变换，从而提高计算效率。文件通过三个测试场景全面检验了该优化 pass 的能力：\n",
    "\n",
    "1. **单参数布局转换优化**：验证在单一输入和输出情况下，布局优化能够消除中间不必要的布局转换操作。\n",
    "2. **多参数布局转换优化**：验证在多输入情况下，布局优化能够有效管理多个张量的布局转换，避免冗余操作。\n",
    "3. **填充与移除填充操作优化**：验证在包含填充、计算和移除填充的复杂场景中，布局优化能够识别并消除不必要的填充/移除填充循环操作。\n",
    "\n",
    "每个测试场景都定义了优化前的 `Before` 模块和预期优化后的 `Expected` 模块，并通过 `_run_pass_compare_output` 函数应用转换并验证结果。这些测试确保了 TVM Relax 布局优化转换能够正确识别和优化各种复杂场景下的张量布局转换操作。\n",
    "       "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0c348645",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import numpy as np\n",
    "from tvm import relax\n",
    "from tvm.relax.transform import DeadCodeElimination, FuseTIR, OptimizeLayoutTransform\n",
    "from tvm.script import ir as I, tir as T, relax as R"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a1cb24d3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义辅助函数，用于运行一系列转换并比较输出结果\n",
    "def _run_pass_compare_output(Before,):\n",
    "    # 顺序应用布局优化、死代码消除和 TIR 融合转换\n",
    "    After = tvm.ir.transform.Sequential(\n",
    "        [\n",
    "            OptimizeLayoutTransform(),  # 优化布局转换\n",
    "            DeadCodeElimination(),      # 消除死代码\n",
    "            FuseTIR(),                  # 融合 TIR 函数\n",
    "        ]\n",
    "    )(Before)\n",
    "    After.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c89d1e27",
   "metadata": {},
   "source": [
    "## 单参数布局变换优化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "db87cad6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义优化前的 IR 模块\n",
    "@I.ir_module\n",
    "class Before:\n",
    "    # 定义 TIR 原语函数作为 relax.add 的替代实现\n",
    "    @T.prim_func(private=True)\n",
    "    def relax_add_replacement(\n",
    "        arg0: T.Buffer((4, 4), \"float32\"),\n",
    "        arg1: T.Buffer((4, 4), \"float32\"),\n",
    "        output: T.Buffer((4, 4), \"float32\"),\n",
    "    ):\n",
    "        T.func_attr({\"operator_name\": \"relax.add\"})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(4, 4):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(arg0[v_ax0, v_ax1], arg1[v_ax0, v_ax1])\n",
    "                T.writes(output[v_ax0, v_ax1])\n",
    "                output[v_ax0, v_ax1] = arg0[v_ax0, v_ax1] + arg1[v_ax0, v_ax1]\n",
    "\n",
    "    # 定义主函数，包含多次布局转换和计算操作\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((16,), dtype=\"float32\"), y: R.Tensor((16,), dtype=\"float32\")\n",
    "    ) -> R.Tensor((16,), dtype=\"float32\"):\n",
    "        with R.dataflow():\n",
    "            # 将一维张量转换为二维布局 (4x4)\n",
    "            lv: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                x, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv1: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                y, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            # 调用 TIR 原语函数进行加法计算\n",
    "            lv2 = R.call_tir(\n",
    "                Before.relax_add_replacement,\n",
    "                (lv, lv1),\n",
    "                out_sinfo=R.Tensor((4, 4), dtype=\"float32\"),\n",
    "            )\n",
    "            # 将结果转换回一维布局\n",
    "            lv0: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv2, index_map=lambda axis0, axis1: (axis0 * 4 + axis1,), pad_value=None\n",
    "            )\n",
    "            # 重复不必要的布局转换和计算（这些应该被优化掉）\n",
    "            lv3: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                lv0, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv4: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                y, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv5 = R.call_tir(\n",
    "                Before.relax_add_replacement,\n",
    "                (lv4, lv3),\n",
    "                out_sinfo=R.Tensor((4, 4), dtype=\"float32\"),\n",
    "            )\n",
    "            lv2_1: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv5, index_map=lambda axis0, axis1: (axis0 * 4 + axis1,), pad_value=None\n",
    "            )\n",
    "            gv: R.Tensor((16,), dtype=\"float32\") = lv2_1\n",
    "            R.output(gv)\n",
    "        return gv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c6630178",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">relax_add_replacement</span>(arg0: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), arg1: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), output: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;operator_name&quot;</span>: <span style=\"color: #BA2121\">&quot;relax.add&quot;</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0, ax1 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>grid(<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;T_add&quot;</span>):\n",
       "                v_ax0, v_ax1 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [ax0, ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(arg0[v_ax0, v_ax1], arg1[v_ax0, v_ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(output[v_ax0, v_ax1])\n",
       "                output[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">=</span> arg0[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">+</span> arg1[v_ax0, v_ax1]\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), y: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(x, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">//</span> <span style=\"color: #008000\">4</span>, i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">4</span>)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(y, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">//</span> <span style=\"color: #008000\">4</span>, i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">4</span>)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv2 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_add_replacement, (lv, lv1), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv5 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_add_replacement, (lv1, lv2), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(lv5, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> axis0, axis1: (axis0 <span style=\"color: #A2F; font-weight: bold\">*</span> <span style=\"color: #008000\">4</span> <span style=\"color: #A2F; font-weight: bold\">+</span> axis1,)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_run_pass_compare_output(Before,)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f67ae7a",
   "metadata": {},
   "source": [
    "## 多参数（三个输入）情况下的布局变换优化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3ef45665",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义优化前的 IR 模块\n",
    "@I.ir_module\n",
    "class Before:\n",
    "    # 定义 TIR 原语函数\n",
    "    @T.prim_func(private=True)\n",
    "    def relax_add_replacement(\n",
    "        arg0: T.Buffer((4, 4), \"float32\"),\n",
    "        arg1: T.Buffer((4, 4), \"float32\"),\n",
    "        output: T.Buffer((4, 4), \"float32\"),\n",
    "    ):\n",
    "        T.func_attr({\"operator_name\": \"relax.add\"})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(4, 4):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(arg0[v_ax0, v_ax1], arg1[v_ax0, v_ax1])\n",
    "                T.writes(output[v_ax0, v_ax1])\n",
    "                output[v_ax0, v_ax1] = arg0[v_ax0, v_ax1] + arg1[v_ax0, v_ax1]\n",
    "\n",
    "    # 定义主函数，包含三个输入和多次布局转换\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((16,), dtype=\"float32\"),\n",
    "        y: R.Tensor((16,), dtype=\"float32\"),\n",
    "        z: R.Tensor((16,), dtype=\"float32\"),\n",
    "    ) -> R.Tensor((16,), dtype=\"float32\"):\n",
    "        with R.dataflow():\n",
    "            # 将三个输入转换为二维布局\n",
    "            lv: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                x, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv1: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                y, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv2: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                z, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            # 第一次加法计算\n",
    "            lv3 = R.call_tir(\n",
    "                Before.relax_add_replacement,\n",
    "                (lv, lv1),\n",
    "                out_sinfo=R.Tensor((4, 4), dtype=\"float32\"),\n",
    "            )\n",
    "            # 第二次加法计算\n",
    "            lv4 = R.call_tir(\n",
    "                Before.relax_add_replacement,\n",
    "                (lv, lv2),\n",
    "                out_sinfo=R.Tensor((4, 4), dtype=\"float32\"),\n",
    "            )\n",
    "            # 不必要的布局转换回一维\n",
    "            lv5: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv3, index_map=lambda axis0, axis1: (axis0 * 4 + axis1,), pad_value=None\n",
    "            )\n",
    "            lv6: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv4, index_map=lambda axis0, axis1: (axis0 * 4 + axis1,), pad_value=None\n",
    "            )\n",
    "            # 再次转换为二维，用于第三次加法\n",
    "            lv7: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                lv5, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv8: R.Tensor((4, 4), dtype=\"float32\") = R.layout_transform(\n",
    "                lv6, index_map=lambda i: (i // 4, i % 4), pad_value=None\n",
    "            )\n",
    "            lv9 = R.call_tir(\n",
    "                Before.relax_add_replacement,\n",
    "                (lv7, lv8),\n",
    "                out_sinfo=R.Tensor((4, 4), dtype=\"float32\"),\n",
    "            )\n",
    "            # 最后转换回一维\n",
    "            lv10: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv9, index_map=lambda axis0, axis1: (axis0 * 4 + axis1,), pad_value=None\n",
    "            )\n",
    "            gv: R.Tensor((16,), dtype=\"float32\") = lv10\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c7d73292",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">relax_add_replacement</span>(arg0: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), arg1: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), output: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;operator_name&quot;</span>: <span style=\"color: #BA2121\">&quot;relax.add&quot;</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0, ax1 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>grid(<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;T_add&quot;</span>):\n",
       "                v_ax0, v_ax1 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [ax0, ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(arg0[v_ax0, v_ax1], arg1[v_ax0, v_ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(output[v_ax0, v_ax1])\n",
       "                output[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">=</span> arg0[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">+</span> arg1[v_ax0, v_ax1]\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), y: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), z: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(x, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">//</span> <span style=\"color: #008000\">4</span>, i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">4</span>)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(y, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">//</span> <span style=\"color: #008000\">4</span>, i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">4</span>)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv2: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(z, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">//</span> <span style=\"color: #008000\">4</span>, i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">4</span>)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv3 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_add_replacement, (lv, lv1), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv4 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_add_replacement, (lv, lv2), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv9 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_add_replacement, (lv3, lv4), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(lv9, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> axis0, axis1: (axis0 <span style=\"color: #A2F; font-weight: bold\">*</span> <span style=\"color: #008000\">4</span> <span style=\"color: #A2F; font-weight: bold\">+</span> axis1,)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_run_pass_compare_output(Before,)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "21b6fdde",
   "metadata": {},
   "source": [
    "## 包含填充和移除填充操作的布局变换优化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "0594ea30",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义优化前的 IR 模块\n",
    "@I.ir_module\n",
    "class Before:\n",
    "    # 定义 ReLU 操作的 TIR 原语函数实现\n",
    "    @T.prim_func(private=True)\n",
    "    def relax_relu_replacement(\n",
    "        arg0: T.Buffer((16,), \"float32\"), output: T.Buffer((16,), \"float32\")\n",
    "    ):\n",
    "        T.func_attr({\"operator_name\": \"relax.relu\"})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0 in range(16):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0 = T.axis.spatial(16, ax0)\n",
    "                T.reads(arg0[v_ax0])\n",
    "                T.writes(output[v_ax0])\n",
    "                output[v_ax0] = T.max(arg0[v_ax0], T.float32(0))\n",
    "\n",
    "    # 定义移除填充操作的 TIR 原语函数\n",
    "    @T.prim_func(private=True)\n",
    "    def remove_pad(var_input: T.handle, var_output: T.handle):\n",
    "        T.func_attr({\"operator_name\": \"remove_pad\", \"tir.noalias\": True})\n",
    "        p0 = T.int64()\n",
    "        input = T.match_buffer(var_input, (p0,))\n",
    "        i0 = T.int64()\n",
    "        output = T.match_buffer(var_output, (i0,))\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0 in range(i0):\n",
    "            with T.block(\"output\"):\n",
    "                v_ax0 = T.axis.spatial(i0, ax0)\n",
    "                T.reads(input[v_ax0])\n",
    "                T.writes(output[v_ax0])\n",
    "                output[v_ax0] = input[v_ax0]\n",
    "\n",
    "    # 定义主函数，包含填充、ReLU、移除填充等操作\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((14,), dtype=\"float32\")) -> R.Tensor((14,), dtype=\"float32\"):\n",
    "        with R.dataflow():\n",
    "            # 填充操作：将 14 元素的张量填充到 16 元素\n",
    "            lv: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                x,\n",
    "                index_map=T.index_map(lambda i: (i % 16,)),\n",
    "                pad_value=None,\n",
    "                axis_separators=[],\n",
    "            )\n",
    "            # 应用 ReLU 操作\n",
    "            lv1 = R.call_tir(\n",
    "                Before.relax_relu_replacement,\n",
    "                (lv,),\n",
    "                out_sinfo=R.Tensor((16,), dtype=\"float32\"),\n",
    "            )\n",
    "            # 不必要的恒等布局转换\n",
    "            lv2: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv1,\n",
    "                index_map=T.index_map(lambda axis0: (axis0,)),\n",
    "                pad_value=None,\n",
    "                axis_separators=[],\n",
    "            )\n",
    "            # 移除填充，从 16 元素回到 14 元素\n",
    "            lv_1 = R.call_tir(\n",
    "                Before.remove_pad, (lv2,), out_sinfo=R.Tensor((14,), dtype=\"float32\")\n",
    "            )\n",
    "            # 再次进行填充、ReLU、不必要的布局转换和移除填充（这些应该被优化掉）\n",
    "            lv3: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv_1,\n",
    "                index_map=T.index_map(lambda i: (i % 16,)),\n",
    "                pad_value=None,\n",
    "                axis_separators=[],\n",
    "            )\n",
    "            lv4 = R.call_tir(\n",
    "                Before.relax_relu_replacement,\n",
    "                (lv3,),\n",
    "                out_sinfo=R.Tensor((16,), dtype=\"float32\"),\n",
    "            )\n",
    "            lv5: R.Tensor((16,), dtype=\"float32\") = R.layout_transform(\n",
    "                lv4,\n",
    "                index_map=T.index_map(lambda axis0: (axis0,)),\n",
    "                pad_value=None,\n",
    "                axis_separators=[],\n",
    "            )\n",
    "            lv_2 = R.call_tir(\n",
    "                Before.remove_pad, (lv5,), out_sinfo=R.Tensor((14,), dtype=\"float32\")\n",
    "            )\n",
    "            gv: R.Tensor((14,), dtype=\"float32\") = lv_2\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4e4d2c59",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">relax_relu_replacement</span>(arg0: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">16</span>,), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), output: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">16</span>,), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;operator_name&quot;</span>: <span style=\"color: #BA2121\">&quot;relax.relu&quot;</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0 <span style=\"color: #008000; font-weight: bold\">in</span> range(<span style=\"color: #008000\">16</span>):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;T_add&quot;</span>):\n",
       "                v_ax0 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>spatial(<span style=\"color: #008000\">16</span>, ax0)\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(arg0[v_ax0])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(output[v_ax0])\n",
       "                output[v_ax0] <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>max(arg0[v_ax0], T<span style=\"color: #A2F; font-weight: bold\">.</span>float32(<span style=\"color: #008000\">0.0</span>))\n",
       "\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">remove_pad</span>(var_input: T<span style=\"color: #A2F; font-weight: bold\">.</span>handle, var_output: T<span style=\"color: #A2F; font-weight: bold\">.</span>handle):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;operator_name&quot;</span>: <span style=\"color: #BA2121\">&quot;remove_pad&quot;</span>, <span style=\"color: #BA2121\">&quot;tir.noalias&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        p0 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>int64()\n",
       "        input <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>match_buffer(var_input, (p0,))\n",
       "        i0 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>int64()\n",
       "        output <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>match_buffer(var_output, (i0,))\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0 <span style=\"color: #008000; font-weight: bold\">in</span> range(i0):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;output&quot;</span>):\n",
       "                v_ax0 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>spatial(i0, ax0)\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(input[v_ax0])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(output[v_ax0])\n",
       "                output[v_ax0] <span style=\"color: #A2F; font-weight: bold\">=</span> input[v_ax0]\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">14</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">14</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(x, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> i: (i <span style=\"color: #A2F; font-weight: bold\">%</span> <span style=\"color: #008000\">16</span>,)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            lv1 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_relu_replacement, (lv,), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv4 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>relax_relu_replacement, (lv1,), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv5: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>layout_transform(lv4, index_map<span style=\"color: #A2F; font-weight: bold\">=</span>T<span style=\"color: #A2F; font-weight: bold\">.</span>index_map(<span style=\"color: #008000; font-weight: bold\">lambda</span> axis0: (axis0,)), pad_value<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>, axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[], input_axis_separators<span style=\"color: #A2F; font-weight: bold\">=</span>[])\n",
       "            gv <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>remove_pad, (lv5,), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">14</span>,), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_run_pass_compare_output(Before,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f85aea55",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
