{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "d758cdbb",
   "metadata": {},
   "source": [
    "# TVM `FuseOps` 变换"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7c869f10",
   "metadata": {},
   "source": [
    "算子融合是一种优化技术，通过将多个连续的算子合并为单一的算子，减少内存传输和提高计算效率。\n",
    "\n",
    "测试涵盖了各种融合场景，包括简单算子融合、卷积算子融合、元组算子融合等。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3de7874b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import relax, topi\n",
    "from tvm.script import ir as I, relax as R, tir as T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1d680ead",
   "metadata": {},
   "outputs": [],
   "source": [
    "def _check(mod_actual, mod_expected):\n",
    "    \"\"\"验证算子融合结果是否符合预期\n",
    "\n",
    "    参数:\n",
    "        mod_actual: 实际的IR模块，将经过算子融合转换\n",
    "        mod_expected: 预期的IR模块，作为参考标准\n",
    "    \"\"\"\n",
    "    # 为实际模块添加TIR算子模式注解并执行融合\n",
    "    mod_actual = relax.transform.AnnotateTIROpPattern()(mod_actual)\n",
    "    mod_actual = relax.transform.FuseOps()(mod_actual)\n",
    "    # 为预期模块添加TIR算子模式注解（但不执行融合）\n",
    "    mod_expected = relax.transform.AnnotateTIROpPattern()(mod_expected)\n",
    "    # 断言两个模块在结构上相等\n",
    "    tvm.ir.assert_structural_equal(mod_actual, mod_expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "349709c0",
   "metadata": {},
   "source": [
    "## 简单算子融合测试\n",
    "\n",
    "测试基本的算子融合功能：将 `add`、`exp` 和 `squeeze` 三个连续的算子融合为单一算子。\n",
    "    \n",
    "这是基础测试，验证融合机制的正确性。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8d0bb84c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor([10, 20], \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv1 = bb.emit_te(topi.exp, lv0)\n",
    "            gv = bb.emit_output(bb.call_te(topi.squeeze, lv1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "74875afd",
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">add</span>(x: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), B: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), T_add: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;op_pattern&quot;</span>: <span style=\"color: #008000\">0</span>, <span style=\"color: #BA2121\">&quot;tir.noalias&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0, ax1 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>grid(T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;T_add&quot;</span>):\n",
       "                v_ax0, v_ax1 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [ax0, ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(x[v_ax0, v_ax1], B[()])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(T_add[v_ax0, v_ax1])\n",
       "                T_add[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">=</span> x[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">+</span> B[()]\n",
       "\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">exp</span>(lv: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), compute: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;op_pattern&quot;</span>: <span style=\"color: #008000\">0</span>, <span style=\"color: #BA2121\">&quot;tir.noalias&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> i0, i1 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>grid(T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;compute&quot;</span>):\n",
       "                v_i0, v_i1 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [i0, i1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(lv[v_i0, v_i1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(compute[v_i0, v_i1])\n",
       "                compute[v_i0, v_i1] <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>exp(lv[v_i0, v_i1])\n",
       "\n",
       "    <span style=\"color: #A2F\">@T</span><span style=\"color: #A2F; font-weight: bold\">.</span>prim_func(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">squeeze</span>(lv1: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), T_squeeze: T<span style=\"color: #A2F; font-weight: bold\">.</span>Buffer((T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        T<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;op_pattern&quot;</span>: <span style=\"color: #008000\">0</span>, <span style=\"color: #BA2121\">&quot;tir.noalias&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> ax0, ax1 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>grid(T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">10</span>), T<span style=\"color: #A2F; font-weight: bold\">.</span>int64(<span style=\"color: #008000\">20</span>)):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;T_squeeze&quot;</span>):\n",
       "                v_ax0, v_ax1 <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>axis<span style=\"color: #A2F; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [ax0, ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>reads(lv1[v_ax0, v_ax1])\n",
       "                T<span style=\"color: #A2F; font-weight: bold\">.</span>writes(T_squeeze[v_ax0, v_ax1])\n",
       "                T_squeeze[v_ax0, v_ax1] <span style=\"color: #A2F; font-weight: bold\">=</span> lv1[v_ax0, v_ax1]\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">fused_add_exp_squeeze</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_0: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;Primitive&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>add, (x, param_0), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv1 <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>exp, (lv,), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            gv <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #A2F; font-weight: bold\">.</span>squeeze, (lv1,), out_sinfo<span style=\"color: #A2F; font-weight: bold\">=</span>R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">20</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>fused_add_exp_squeeze(x, R<span style=\"color: #A2F; font-weight: bold\">.</span>const(<span style=\"color: #008000\">1.0</span>, <span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 为实际模块添加 TIR 算子模式注解并执行融合\n",
    "mod_actual = before()\n",
    "mod_actual = relax.transform.AnnotateTIROpPattern()(mod_actual)\n",
    "mod_actual = relax.transform.FuseOps()(mod_actual)\n",
    "mod_actual.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9500375d",
   "metadata": {},
   "source": [
    "## 测试卷积算子融合的情况\n",
    "\n",
    "该测试验证卷积算子与加法算子的融合逻辑，特别是在存在多条计算路径的情况下，融合是否能够正确进行。测试会验证不同数据类型(float32, float16, int8)下的融合结果。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f74de72c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before(dtype):\n",
    "    \"\"\"创建融合前的计算图\n",
    "\n",
    "    参数:\n",
    "        dtype: 数据类型，如'float32', 'float16', 'int8'\n",
    "\n",
    "    返回:\n",
    "        融合前的Relax模块\n",
    "    \"\"\"\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), dtype))  # 输入张量\n",
    "    w1 = relax.Var(\"w1\", R.Tensor((16, 16, 3, 3), dtype))  # 卷积核1\n",
    "    w2 = relax.Var(\"w2\", R.Tensor((16, 16, 1, 1), dtype))  # 卷积核2\n",
    "    w3 = relax.Var(\"w3\", R.Tensor((16, 16, 3, 3), dtype))  # 卷积核3\n",
    "    with bb.function(\"main\", [x, w1, w2, w3]):\n",
    "        with bb.dataflow():\n",
    "            # 输入加偏置\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, dtype))\n",
    "            # 第一卷积层\n",
    "            lv1 = bb.emit_te(topi.nn.conv2d, lv0, w1, strides=1, padding=1, dilation=1)\n",
    "            # 这是下一个支配节点\n",
    "            lv2 = bb.emit_te(topi.add, relax.const(1, dtype), lv1)\n",
    "            lv3 = bb.emit_te(topi.add, lv1, lv2)\n",
    "            # 第二条路径\n",
    "            lv4 = bb.emit_te(topi.nn.conv2d, lv3, w2, strides=1, padding=0, dilation=1)\n",
    "            lv5 = bb.emit_te(topi.nn.conv2d, lv3, w3, strides=1, padding=1, dilation=1)\n",
    "            # 合并两条路径的结果\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv4, lv5))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "12808703",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def expected(dtype):\n",
    "    \"\"\"创建融合后的预期计算图\n",
    "\n",
    "    参数:\n",
    "        dtype: 数据类型，如'float32', 'float16', 'int8'\n",
    "\n",
    "    返回:\n",
    "        融合后的Relax模块\n",
    "    \"\"\"\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # 融合函数1: conv2d + add1 + add2\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), dtype))\n",
    "    w = relax.Var(\"w\", R.Tensor((16, 16, 3, 3), dtype))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), dtype))\n",
    "    with bb.function(\n",
    "        \"fused_conv2d_add1_add2\", [x, w, p0], attrs={\"Primitive\": True}, private=True\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.conv2d,\n",
    "                x,\n",
    "                w,\n",
    "                strides=1,\n",
    "                padding=1,\n",
    "                dilation=1,\n",
    "                primfunc_name_hint=\"conv2d\",\n",
    "            )\n",
    "            lv1 = bb.emit_te(topi.add, p0, lv0, primfunc_name_hint=\"add1\")\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv0, lv1, primfunc_name_hint=\"add2\"))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # 融合函数2: conv2d1 + add2\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), dtype))\n",
    "    w = relax.Var(\"w\", R.Tensor((16, 16, 1, 1), dtype))\n",
    "    y = relax.Var(\"y\", R.Tensor((1, 16, 64, 64), dtype))\n",
    "    with bb.function(\"fused_conv2d1_add2\", [x, w, y], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.conv2d,\n",
    "                x,\n",
    "                w,\n",
    "                strides=1,\n",
    "                padding=0,\n",
    "                dilation=1,\n",
    "                primfunc_name_hint=\"conv2d1\",\n",
    "            )\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv0, y, primfunc_name_hint=\"add2\"))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # 获取融合函数的全局变量\n",
    "    mod = bb.get()\n",
    "    fused_conv2d_add1_add2 = mod.get_global_var(\"fused_conv2d_add1_add2\")\n",
    "    fused_conv2d1_add2 = mod.get_global_var(\"fused_conv2d1_add2\")\n",
    "\n",
    "    # 主函数\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), dtype))\n",
    "    w1 = relax.Var(\"w1\", R.Tensor((16, 16, 3, 3), dtype))\n",
    "    w2 = relax.Var(\"w2\", R.Tensor((16, 16, 1, 1), dtype))\n",
    "    w3 = relax.Var(\"w3\", R.Tensor((16, 16, 3, 3), dtype))\n",
    "    with bb.function(\"main\", [x, w1, w2, w3]):\n",
    "        with bb.dataflow():\n",
    "            # 输入加偏置\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, dtype))\n",
    "            # 调用融合函数1\n",
    "            lv1 = bb.emit(relax.Call(fused_conv2d_add1_add2, [lv0, w1, relax.const(1, dtype)]))\n",
    "            # 未融合的卷积层\n",
    "            lv2 = bb.emit_te(\n",
    "                topi.nn.conv2d,\n",
    "                lv1,\n",
    "                w3,\n",
    "                strides=1,\n",
    "                padding=1,\n",
    "                dilation=1,\n",
    "            )\n",
    "            # 调用融合函数2\n",
    "            gv = bb.emit_output(relax.Call(fused_conv2d1_add2, [lv1, w2, lv2]))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "328f372f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 验证不同数据类型下的融合结果\n",
    "_check(before(\"float32\"), expected(\"float32\"))\n",
    "_check(before(\"float16\"), expected(\"float16\"))\n",
    "_check(before(\"int8\"), expected(\"int8\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bbb1578a",
   "metadata": {},
   "source": [
    "## 涉及连接算子和元组节点的融合测试\n",
    "\n",
    "测试包含 `concatenate` 算子和 Tuple 节点的融合场景。\n",
    "    \n",
    "具体测试 `max_pool2d`、`upsampling`、`concatenate` 和 `add` 四个算子的融合。此测试验证了包含复杂数据结构（如元组）的算子融合机制。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "31bd8736",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def before():\n",
    "    \"\"\"创建融合前的计算图\n",
    "\n",
    "    返回:\n",
    "        融合前的Relax模块，包含max_pool2d、upsampling、concatenate和add算子\n",
    "    \"\"\"\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))  # 输入张量 (批次大小, 通道数, 高度, 宽度)\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            # 最大池化操作，2x2核，步长2，无填充\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.pool2d,\n",
    "                x,\n",
    "                kernel=(2, 2),\n",
    "                stride=(2, 2),\n",
    "                dilation=(1, 1),\n",
    "                padding=(0, 0, 0, 0),\n",
    "                pool_type=\"max\",\n",
    "            )\n",
    "            # 上采样操作，将特征图大小放大2倍\n",
    "            lv1 = bb.emit_te(topi.nn.upsampling, lv0, scale_h=2.0, scale_w=2.0)\n",
    "            # 在通道维度(axis=1)上拼接上采样结果和原始输入\n",
    "            lv2 = bb.emit_te(topi.concatenate, (lv1, x), axis=1)\n",
    "            # 对拼接结果加1\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv2, relax.const(1, \"float32\")))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "78c156f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def expected():\n",
    "    \"\"\"\n",
    "    创建融合后的计算图，验证算子融合结果\n",
    "    \n",
    "    返回:\n",
    "        relax.IRModule: 融合后的计算图模块，包含融合的上采样、拼接和加法算子\n",
    "    \"\"\"\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # 融合函数定义 - 将上采样、拼接和加法算子融合为一个函数\n",
    "    # 输入变量定义\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))  # 输入张量 (批次大小, 通道数, 高度, 宽度)\n",
    "    w = relax.Var(\"w\", R.Tensor((1, 16, 32, 32), \"float32\"))  # 权重张量 (输出通道, 输入通道, 高度, 宽度)\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))  # 标量参数\n",
    "    \n",
    "    # 定义融合函数 fused_upsampling_concatenate_add\n",
    "    with bb.function(\n",
    "        \"fused_upsampling_concatenate_add\", [w, x, p0], attrs={\"Primitive\": True}, private=True\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            # 上采样操作：将权重张量大小放大2倍\n",
    "            lv0 = bb.emit_te(topi.nn.upsampling, w, scale_h=2.0, scale_w=2.0)\n",
    "            # 拼接操作：在通道维度(axis=1)上拼接上采样结果和原始输入\n",
    "            lv1 = bb.emit_te(topi.concatenate, (lv0, x), axis=1)\n",
    "            # 加法操作：对拼接结果加标量参数p0\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv1, p0))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # 获取融合函数的全局变量引用\n",
    "    fused_upsampling_concatenate_add = bb.get().get_global_var(\n",
    "        \"fused_upsampling_concatenate_add\"\n",
    "    )\n",
    "\n",
    "    # 主函数定义\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))  # 主函数输入张量\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            # 最大池化操作：2x2核，步长2，无填充\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.pool2d,\n",
    "                x,\n",
    "                kernel=(2, 2),\n",
    "                stride=(2, 2),\n",
    "                dilation=(1, 1),\n",
    "                padding=(0, 0, 0, 0),\n",
    "                pool_type=\"max\",\n",
    "            )\n",
    "            # 调用融合函数，传入池化结果、原始输入和常数1\n",
    "            gv = bb.emit_output(\n",
    "                relax.Call(\n",
    "                    fused_upsampling_concatenate_add, (lv0, x, relax.const(1, \"float32\"))\n",
    "                )\n",
    "            )\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d737bb02",
   "metadata": {},
   "source": [
    "## 测试当 `Tuple` 节点是其组中的根节点时的算子融合情况\n",
    "\n",
    "此测试验证当数据流向以 `Tuple` 节点为根时，融合逻辑是否正确处理这种情况。\n",
    "    \n",
    "预期结果：由于 `Tuple` 节点作为输出根节点，不会发生算子融合。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6f3f600e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    \"\"\"\n",
    "    创建融合前的计算图\n",
    "    \n",
    "    返回:\n",
    "        relax.IRModule: 包含池化、上采样和Tuple输出的计算图模块\n",
    "    \"\"\"\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))  # 输入张量 (批次大小, 通道数, 高度, 宽度)\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            # 最大池化操作：2x2核，步长2，无填充\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.pool2d,\n",
    "                x,\n",
    "                kernel=(2, 2),\n",
    "                stride=(2, 2),\n",
    "                dilation=(1, 1),\n",
    "                padding=(0, 0, 0, 0),\n",
    "                pool_type=\"max\",\n",
    "            )\n",
    "            # 上采样操作：将池化结果大小放大2倍\n",
    "            lv1 = bb.emit_te(topi.nn.upsampling, lv0, scale_h=2.0, scale_w=2.0)\n",
    "            # 输出Tuple节点：包含上采样结果和原始输入\n",
    "            gv = bb.emit_output((lv1, x))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "# 预期融合不会发生变化，因此将原始图与自身比较\n",
    "_check(before(), before())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "39385e7b",
   "metadata": {},
   "source": [
    "## 元组获取与元素级算子的融合测试\n",
    "\n",
    "测试从元组中获取元素并进行元素级算子的融合场景。\n",
    "    \n",
    "具体测试 `split`、`TupleGetItem`、`sigmoid`、`tanh`、`exp`、`multiply` 和 `add` 等算子的融合。\n",
    "\n",
    "此测试验证了元组算子与元素级计算的融合机制。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "beae46b5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before(dim: int):\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor((1, dim), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((3 * dim, dim), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.nn.dense, x, w)\n",
    "            lv1 = bb.emit_te(topi.split, lv0, indices_or_sections=3, axis=1)\n",
    "            lv2 = bb.emit(relax.TupleGetItem(lv1, 0))\n",
    "            lv3 = bb.emit_te(topi.sigmoid, lv2)\n",
    "            lv4 = bb.emit(relax.TupleGetItem(lv1, 1))\n",
    "            lv5 = bb.emit_te(topi.tanh, lv4)\n",
    "            lv6 = bb.emit(relax.TupleGetItem(lv1, 2))\n",
    "            lv7 = bb.emit_te(topi.exp, lv6)\n",
    "            lv8 = bb.emit_te(topi.multiply, lv5, lv7)\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv3, lv8))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "081683de",
   "metadata": {},
   "outputs": [],
   "source": [
    "def expected(dim: int):\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function\n",
    "    dense = relax.Var(\"dense\", R.Tensor((1, 3 * dim), \"float32\"))\n",
    "    with bb.function(\n",
    "        \"fused_split_sigmoid_tanh_exp_multiply_add\",\n",
    "        [dense],\n",
    "        attrs={\"Primitive\": True},\n",
    "        private=True,\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.split, dense, indices_or_sections=3, axis=1)\n",
    "            lv1 = bb.emit(relax.TupleGetItem(lv0, 0))\n",
    "            lv2 = bb.emit_te(topi.sigmoid, lv1)\n",
    "            lv3 = bb.emit(relax.TupleGetItem(lv0, 1))\n",
    "            lv4 = bb.emit_te(topi.tanh, lv3)\n",
    "            lv5 = bb.emit(relax.TupleGetItem(lv0, 2))\n",
    "            lv6 = bb.emit_te(topi.exp, lv5)\n",
    "            lv7 = bb.emit_te(topi.multiply, lv4, lv6)\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv2, lv7))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    fused_split_sigmoid_tanh_exp_multiply_add = bb.get().get_global_var(\n",
    "        \"fused_split_sigmoid_tanh_exp_multiply_add\"\n",
    "    )\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, dim), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((3 * dim, dim), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.nn.dense, x, w)\n",
    "            gv = bb.emit_output(relax.Call(fused_split_sigmoid_tanh_exp_multiply_add, (lv0,)))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "dim = 10\n",
    "_check(before(dim), expected(dim))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "88ff1634",
   "metadata": {},
   "source": [
    "## 元组获取作为根节点的融合测试\n",
    "\n",
    "测试 `TupleGetItem` 算子作为根节点的融合场景。\n",
    "    \n",
    "具体测试 `split` 和 `TupleGetItem` 的融合，以及后续的 `dense` 算子。\n",
    "    \n",
    "此测试验证了元组算子作为根节点时的融合行为。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "55d9619d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before(dim: int):\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 3 * dim), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((dim, dim), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.split, x, indices_or_sections=3, axis=1)\n",
    "            lv1 = bb.emit(relax.TupleGetItem(lv0, 0))\n",
    "            gv = bb.emit_output(bb.call_te(topi.nn.dense, lv1, w))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e0051eab",
   "metadata": {},
   "outputs": [],
   "source": [
    "def expected(dim: int):\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 3 * dim), \"float32\"))\n",
    "    with bb.function(\"fused_split\", [x], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.split, x, indices_or_sections=3, axis=1)\n",
    "            gv = bb.emit_output(relax.TupleGetItem(lv0, 0))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    fused_split = bb.get().get_global_var(\"fused_split\")\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 3 * dim), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((dim, dim), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(relax.Call(fused_split, (x,)))\n",
    "            gv = bb.emit_output(bb.call_te(topi.nn.dense, lv0, w))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "dim = 10\n",
    "_check(before(dim), expected(dim))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a104fd00",
   "metadata": {},
   "source": [
    "## 元组作为中间节点的融合测试\n",
    "\n",
    "测试元组作为中间节点的融合场景。\n",
    "    \n",
    "具体测试多个 `squeeze`、`add` 和 `concatenate` 算子的融合。\n",
    "    \n",
    "此测试验证了包含多个中间元组节点的复杂计算图的融合机制。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "257c57af",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.squeeze, x)\n",
    "            lv1 = bb.emit_te(topi.add, lv0, relax.const(1, \"float32\"))\n",
    "            lv2 = bb.emit_te(topi.squeeze, lv0)\n",
    "            lv3 = bb.emit_te(topi.add, lv2, relax.const(1, \"float32\"))\n",
    "            lv4 = bb.emit_te(topi.add, lv3, relax.const(1, \"float32\"))\n",
    "            lv5 = bb.emit_te(topi.add, lv0, relax.const(1, \"float32\"))\n",
    "            lv6 = bb.emit_te(topi.concatenate, (lv1, lv4, lv5), axis=1)\n",
    "            lv7 = bb.emit_te(topi.squeeze, lv6)\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv7, relax.const(1, \"float32\")))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))\n",
    "    p1 = relax.Var(\"p1\", R.Tensor((), \"float32\"))\n",
    "    p2 = relax.Var(\"p2\", R.Tensor((), \"float32\"))\n",
    "    p3 = relax.Var(\"p3\", R.Tensor((), \"float32\"))\n",
    "    p4 = relax.Var(\"p4\", R.Tensor((), \"float32\"))\n",
    "    with bb.function(\n",
    "        \"fused_squeeze_add_squeeze1_add_add_add_concatenate_squeeze2_add1\",\n",
    "        [x, p0, p1, p2, p3, p4],\n",
    "        attrs={\"Primitive\": True},\n",
    "        private=True,\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.squeeze, x)\n",
    "            lv1 = bb.emit_te(topi.add, lv0, p0)\n",
    "            lv2 = bb.emit_te(topi.squeeze, lv0)\n",
    "            lv3 = bb.emit_te(topi.add, lv2, p1)\n",
    "            lv4 = bb.emit_te(topi.add, lv3, p2)\n",
    "            lv5 = bb.emit_te(topi.add, lv0, p3)\n",
    "            lv6 = bb.emit_te(topi.concatenate, (lv1, lv4, lv5), axis=1)\n",
    "            lv7 = bb.emit_te(topi.squeeze, lv6)\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv7, p4))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    fused_func = bb.get().get_global_var(\n",
    "        \"fused_squeeze_add_squeeze1_add_add_add_concatenate_squeeze2_add1\"\n",
    "    )\n",
    "\n",
    "    # Main func\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            gv = bb.emit_output(\n",
    "                relax.Call(\n",
    "                    fused_func,\n",
    "                    (\n",
    "                        x,\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                    ),\n",
    "                )\n",
    "            )\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "19a5585a",
   "metadata": {},
   "source": [
    "## 连续元组算子的融合测试\n",
    "\n",
    "测试连续元组算子的融合场景。\n",
    "\n",
    "具体测试多次 `add`、`concatenate`、`pool2d` 等算子的融合。此测试验证了包含重复模式和多层次元组算子的复杂计算图的融合机制。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "203a8ba2",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv1 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv2 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv3 = bb.emit_te(topi.concatenate, (lv0, lv1, lv2), axis=1)\n",
    "            lv4 = bb.emit_te(topi.add, lv3, relax.const(1, \"float32\"))\n",
    "            lv5 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv6 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv7 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv8 = bb.emit_te(topi.concatenate, (lv5, lv6, lv7), axis=1)\n",
    "            lv9 = bb.emit_te(topi.add, lv8, relax.const(1, \"float32\"))\n",
    "            lv10 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv11 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv12 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv13 = bb.emit_te(topi.concatenate, (lv10, lv11, lv12), axis=1)\n",
    "            lv14 = bb.emit_te(topi.add, lv13, relax.const(1, \"float32\"))\n",
    "            lv15 = bb.emit_te(topi.concatenate, (lv4, lv9, lv14), axis=1)\n",
    "            lv16 = bb.emit_te(\n",
    "                topi.nn.pool2d,\n",
    "                lv15,\n",
    "                kernel=(2, 2),\n",
    "                stride=(2, 2),\n",
    "                dilation=(1, 1),\n",
    "                padding=(0, 0, 0, 0),\n",
    "                pool_type=\"max\",\n",
    "            )\n",
    "            lv17 = bb.emit_te(topi.add, lv16, relax.const(1, \"float32\"))\n",
    "            lv18 = bb.emit_te(topi.add, lv17, relax.const(1, \"float32\"))\n",
    "            gv = bb.emit_output((lv17, lv18))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "4fdbdca4",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function 1\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))\n",
    "    p1 = relax.Var(\"p1\", R.Tensor((), \"float32\"))\n",
    "    p2 = relax.Var(\"p2\", R.Tensor((), \"float32\"))\n",
    "    p3 = relax.Var(\"p3\", R.Tensor((), \"float32\"))\n",
    "    p4 = relax.Var(\"p4\", R.Tensor((), \"float32\"))\n",
    "    p5 = relax.Var(\"p5\", R.Tensor((), \"float32\"))\n",
    "    p6 = relax.Var(\"p6\", R.Tensor((), \"float32\"))\n",
    "    p7 = relax.Var(\"p7\", R.Tensor((), \"float32\"))\n",
    "    p8 = relax.Var(\"p8\", R.Tensor((), \"float32\"))\n",
    "    p9 = relax.Var(\"p9\", R.Tensor((), \"float32\"))\n",
    "    p10 = relax.Var(\"p10\", R.Tensor((), \"float32\"))\n",
    "    p11 = relax.Var(\"p11\", R.Tensor((), \"float32\"))\n",
    "    with bb.function(\n",
    "        \"fused_add_add_add_concatenate_add1_add_add_add_concatenate_add1_add_add_add_concatenate_add1_concatenate1\",\n",
    "        [x, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11],\n",
    "        attrs={\"Primitive\": True},\n",
    "        private=True,\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, p0)\n",
    "            lv1 = bb.emit_te(topi.add, x, p1)\n",
    "            lv2 = bb.emit_te(topi.add, x, p2)\n",
    "            lv3 = bb.emit_te(topi.concatenate, (lv0, lv1, lv2), axis=1)\n",
    "            lv4 = bb.emit_te(topi.add, lv3, p3)\n",
    "            lv5 = bb.emit_te(topi.add, x, p4)\n",
    "            lv6 = bb.emit_te(topi.add, x, p5)\n",
    "            lv7 = bb.emit_te(topi.add, x, p6)\n",
    "            lv8 = bb.emit_te(topi.concatenate, (lv5, lv6, lv7), axis=1)\n",
    "            lv9 = bb.emit_te(topi.add, lv8, p7)\n",
    "            lv10 = bb.emit_te(topi.add, x, p8)\n",
    "            lv11 = bb.emit_te(topi.add, x, p9)\n",
    "            lv12 = bb.emit_te(topi.add, x, p10)\n",
    "            lv13 = bb.emit_te(topi.concatenate, (lv10, lv11, lv12), axis=1)\n",
    "            lv14 = bb.emit_te(topi.add, lv13, p11)\n",
    "            gv = bb.emit_output(bb.call_te(topi.concatenate, (lv4, lv9, lv14), axis=1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Grouped function 2\n",
    "    concat = relax.Var(\"concat\", R.Tensor((1, 144, 64, 64), \"float32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))\n",
    "    with bb.function(\n",
    "        \"fused_pool2d_add2\", [concat, p0], attrs={\"Primitive\": True}, private=True\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.pool2d,\n",
    "                concat,\n",
    "                kernel=(2, 2),\n",
    "                stride=(2, 2),\n",
    "                dilation=(1, 1),\n",
    "                padding=(0, 0, 0, 0),\n",
    "                pool_type=\"max\",\n",
    "            )\n",
    "            gv = bb.emit_output(bb.call_te(topi.add, lv0, p0))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    mod = bb.get()\n",
    "    fused_func1 = mod.get_global_var(\n",
    "        \"fused_add_add_add_concatenate_add1_add_add_add_concatenate_add1_add_add_add_concatenate_add1_concatenate1\"\n",
    "    )\n",
    "    fused_func2 = mod.get_global_var(\"fused_pool2d_add2\")\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(\n",
    "                relax.Call(\n",
    "                    fused_func1,\n",
    "                    (\n",
    "                        x,\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                        relax.const(1, \"float32\"),\n",
    "                    ),\n",
    "                )\n",
    "            )\n",
    "            lv1 = bb.emit(relax.Call(fused_func2, (lv0, relax.const(1, \"float32\"))))\n",
    "            lv2 = bb.emit_te(topi.add, lv1, relax.const(1, \"float32\"))\n",
    "            gv = bb.emit_output((lv1, lv2))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "faaaf0c6",
   "metadata": {},
   "source": [
    "## 测试类似 Inception 网络结构的算子融合情况\n",
    "\n",
    "该测试验证具有分支结构的网络（如Inception模块）中，卷积和激活函数等算子的融合逻辑是否正确。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "97b66668",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    w0 = relax.Var(\"w0\", R.Tensor((16, 16, 3, 3), \"float32\"))\n",
    "    w1 = relax.Var(\"w1\", R.Tensor((16, 16, 3, 3), \"float32\"))\n",
    "    w2 = relax.Var(\"w2\", R.Tensor((16, 32, 3, 3), \"float32\"))\n",
    "    w3 = relax.Var(\"w3\", R.Tensor((16, 32, 3, 3), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w0, w1, w2, w3]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.nn.conv2d, x, w0, strides=1, padding=1, dilation=1)\n",
    "            lv1 = bb.emit_te(topi.nn.relu, lv0)\n",
    "            lv2 = bb.emit_te(topi.nn.conv2d, x, w1, strides=1, padding=1, dilation=1)\n",
    "            lv3 = bb.emit_te(topi.nn.relu, lv2)\n",
    "            lv4 = bb.emit_te(topi.concatenate, (lv1, lv3), axis=1)\n",
    "            lv5 = bb.emit_te(topi.nn.conv2d, lv4, w2, strides=1, padding=1, dilation=1)\n",
    "            lv6 = bb.emit_te(topi.nn.relu, lv5)\n",
    "            lv7 = bb.emit_te(topi.nn.conv2d, lv4, w3, strides=1, padding=1, dilation=1)\n",
    "            lv8 = bb.emit_te(topi.nn.relu, lv7)\n",
    "            gv = bb.emit_output(bb.call_te(topi.concatenate, (lv6, lv8), axis=1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "2fb7ab6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function 1\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((16, 16, 3, 3), \"float32\"))\n",
    "    with bb.function(\"fused_conv2d_relu\", [x, w], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.conv2d,\n",
    "                x,\n",
    "                w,\n",
    "                strides=1,\n",
    "                padding=1,\n",
    "                dilation=1,\n",
    "                primfunc_name_hint=\"conv2d\",\n",
    "            )\n",
    "            gv = bb.emit_output(bb.call_te(topi.nn.relu, lv0))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Grouped function 2\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 32, 64, 64), \"float32\"))\n",
    "    w = relax.Var(\"w\", R.Tensor((16, 32, 3, 3), \"float32\"))\n",
    "    with bb.function(\"fused_conv2d1_relu\", [x, w], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(\n",
    "                topi.nn.conv2d,\n",
    "                x,\n",
    "                w,\n",
    "                strides=1,\n",
    "                padding=1,\n",
    "                dilation=1,\n",
    "                primfunc_name_hint=\"conv2d1\",\n",
    "            )\n",
    "            gv = bb.emit_output(bb.call_te(topi.nn.relu, lv0))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    mod = bb.get()\n",
    "    fused_conv2d_relu1 = mod.get_global_var(\"fused_conv2d_relu\")\n",
    "    fused_conv2d_relu2 = mod.get_global_var(\"fused_conv2d1_relu\")\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((1, 16, 64, 64), \"float32\"))\n",
    "    w0 = relax.Var(\"w0\", R.Tensor((16, 16, 3, 3), \"float32\"))\n",
    "    w1 = relax.Var(\"w1\", R.Tensor((16, 16, 3, 3), \"float32\"))\n",
    "    w2 = relax.Var(\"w2\", R.Tensor((16, 32, 3, 3), \"float32\"))\n",
    "    w3 = relax.Var(\"w3\", R.Tensor((16, 32, 3, 3), \"float32\"))\n",
    "    with bb.function(\"main\", [x, w0, w1, w2, w3]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(relax.Call(fused_conv2d_relu1, (x, w0)))\n",
    "            lv1 = bb.emit(relax.Call(fused_conv2d_relu1, (x, w1)))\n",
    "            lv2 = bb.emit_te(topi.concatenate, (lv0, lv1), axis=1)\n",
    "            lv3 = bb.emit(relax.Call(fused_conv2d_relu2, (lv2, w2)))\n",
    "            lv4 = bb.emit(relax.Call(fused_conv2d_relu2, (lv2, w3)))\n",
    "            gv = bb.emit_output(bb.call_te(topi.concatenate, (lv3, lv4), axis=1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eac48465",
   "metadata": {},
   "source": [
    "## 测试并行的单射算子融合\n",
    "\n",
    "该测试验证多个并行的单射算子（如add、squeeze、transpose）是否能被正确融合为复合算子。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "d22bea2b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor((10, 20), \"int32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, \"int32\"))\n",
    "            lv1 = bb.emit_te(topi.squeeze, lv0)\n",
    "            lv2 = bb.emit_te(topi.transpose, lv0, axes=[1, 0])\n",
    "            lv3 = bb.emit_te(topi.transpose, lv2, axes=[1, 0])\n",
    "            gv = bb.emit_output(bb.call_te(topi.left_shift, lv1, lv3))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "39690a3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function\n",
    "    x = relax.Var(\"x\", R.Tensor((10, 20), \"int32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"int32\"))\n",
    "    with bb.function(\n",
    "        \"fused_add_squeeze_transpose_transpose1_left_shift\",\n",
    "        [x, p0],\n",
    "        attrs={\"Primitive\": True},\n",
    "        private=True,\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, p0)\n",
    "            lv1 = bb.emit_te(topi.squeeze, lv0)\n",
    "            lv2 = bb.emit_te(topi.transpose, lv0, axes=[1, 0])\n",
    "            lv3 = bb.emit_te(topi.transpose, lv2, axes=[1, 0], primfunc_name_hint=\"transpose1\")\n",
    "            gv = bb.emit_output(bb.call_te(topi.left_shift, lv1, lv3))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    fused_func = bb.get().get_global_var(\"fused_add_squeeze_transpose_transpose1_left_shift\")\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((10, 20), \"int32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            gv = bb.emit_output(relax.Call(fused_func, (x, relax.const(1, \"int32\"))))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af1565b9",
   "metadata": {},
   "source": [
    "## 测试softmax算子与后续算子的融合情况\n",
    "\n",
    "该测试验证 `softmax` 算子是否能与后续的 `cast` 算子融合，以优化执行效率。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "9446404f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor((16, 16), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.nn.softmax, x)\n",
    "            gv = bb.emit_output(bb.call_te(topi.cast, lv0, dtype=\"float16\"))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    # Grouped function\n",
    "    x = relax.Var(\"x\", R.Tensor((16, 16), \"float32\"))\n",
    "    with bb.function(\"fused_softmax_cast\", [x], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.nn.softmax, x)\n",
    "            gv = bb.emit_output(bb.call_te(topi.cast, lv0, dtype=\"float16\"))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    # Get the global variables of the grouped functions\n",
    "    fused_func = bb.get().get_global_var(\"fused_softmax_cast\")\n",
    "\n",
    "    # Main function\n",
    "    x = relax.Var(\"x\", R.Tensor((16, 16), \"float32\"))\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            gv = bb.emit_output(relax.Call(fused_func, (x,)))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "66e7f692",
   "metadata": {},
   "source": [
    "## 测试多个 Relax 函数的算子融合\n",
    "\n",
    "该测试验证在包含多个独立函数的模块中，每个函数内的算子是否能被正确融合。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "28cc43de",
   "metadata": {},
   "outputs": [],
   "source": [
    "def before():\n",
    "    bb = relax.BlockBuilder()\n",
    "    x = relax.Var(\"x\", R.Tensor([10, 20], \"float32\"))\n",
    "    with bb.function(\"func1\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv1 = bb.emit_te(topi.exp, lv0)\n",
    "            gv = bb.emit_output(bb.call_te(topi.squeeze, lv1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor([20, 10], \"float32\"))\n",
    "    with bb.function(\"func2\", [x]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, relax.const(1, \"float32\"))\n",
    "            lv1 = bb.emit_te(topi.exp, lv0)\n",
    "            gv = bb.emit_output(bb.call_te(topi.squeeze, lv1))\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "def expected():\n",
    "    bb = relax.BlockBuilder()\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor([10, 20], \"float32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))\n",
    "    with bb.function(\"fused_add_exp_squeeze\", [x, p0], attrs={\"Primitive\": True}, private=True):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, p0)\n",
    "            lv1 = bb.emit_te(topi.exp, lv0)\n",
    "            gv = bb.emit_output(bb.call_te(topi.squeeze, lv1))\n",
    "        bb.emit_func_output(gv)\n",
    "    fused_add_exp_squeeze = bb.get().get_global_var(\"fused_add_exp_squeeze\")\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor([20, 10], \"float32\"))\n",
    "    p0 = relax.Var(\"p0\", R.Tensor((), \"float32\"))\n",
    "    with bb.function(\n",
    "        \"fused_add1_exp1_squeeze1\", [x, p0], attrs={\"Primitive\": True}, private=True\n",
    "    ):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit_te(topi.add, x, p0)\n",
    "            lv1 = bb.emit_te(topi.exp, lv0)\n",
    "            gv = bb.emit_output(bb.call_te(topi.squeeze, lv1))\n",
    "        bb.emit_func_output(gv)\n",
    "    fused_add1_exp1_squeeze1 = bb.get().get_global_var(\"fused_add1_exp1_squeeze1\")\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor([10, 20], \"float32\"))\n",
    "    with bb.function(\"func1\", [x]):\n",
    "        with bb.dataflow():\n",
    "            gv = bb.emit_output(\n",
    "                relax.Call(fused_add_exp_squeeze, [x, relax.const(1, \"float32\")])\n",
    "            )\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    x = relax.Var(\"x\", R.Tensor([20, 10], \"float32\"))\n",
    "    with bb.function(\"func2\", [x]):\n",
    "        with bb.dataflow():\n",
    "            gv = bb.emit_output(\n",
    "                relax.Call(fused_add1_exp1_squeeze1, [x, relax.const(1, \"float32\")])\n",
    "            )\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    return bb.get()\n",
    "\n",
    "_check(before(), expected())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9d923b36",
   "metadata": {},
   "source": [
    "## 测试包含 `call_dps_packed` 调用的算子融合\n",
    "\n",
    "该测试验证当函数中包含 `call_dps_packed` 调用时，`FuseOps` 变换是否会跳过这些调用，保持原始结构。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "e44cdff2",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((2, 3), \"float32\")):\n",
    "        with R.dataflow():\n",
    "            y = R.call_dps_packed(\"func_packed_dps\", x, R.Tensor((2, 3), \"float32\"))\n",
    "            R.output(y)\n",
    "        return y\n",
    "\n",
    "# FuseOps should does no change to it.\n",
    "_check(Module, Module)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ad175da8",
   "metadata": {},
   "source": [
    "## 测试 `call_dps_packed` 调用边缘的算子融合\n",
    "\n",
    "该测试验证当 `call_dps_packed` 调用位于算子链边缘时，其他可融合的算子是否能被正确融合。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "75d546fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((2, 3), \"float32\")):\n",
    "        cls = Module\n",
    "        with R.dataflow():\n",
    "            # 调用exp原语函数处理输入x\n",
    "            a = R.call_tir(cls.exp, (x,), out_sinfo=R.Tensor((2, 3), \"float32\"))\n",
    "            # 再次调用exp原语函数处理中间结果a\n",
    "            b = R.call_tir(cls.exp, (a,), out_sinfo=R.Tensor((2, 3), \"float32\"))\n",
    "            # 调用打包的dps函数处理中间结果a\n",
    "            c = R.call_dps_packed(\"packed_dps\", (a,), out_sinfo=R.Tensor((2, 3), \"float32\"))\n",
    "            R.output(b, c)\n",
    "        return R.tuple(b, c)\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def exp(A: T.Buffer((2, 3), \"float32\"), B: T.Buffer((2, 3), \"float32\")):\n",
    "        \"\"\"\n",
    "        原语函数：占位符函数，仅用于测试\n",
    "        \n",
    "        参数:\n",
    "            A: 输入缓冲区，形状为(2, 3)，数据类型为float32\n",
    "            B: 输出缓冲区，形状为(2, 3)，数据类型为float32\n",
    "        \"\"\"\n",
    "        T.evaluate(0)  # 空操作，仅作为占位符\n",
    "\n",
    "# 预期算子融合不会对此模块产生变化，因此将模块与自身比较\n",
    "_check(Module, Module)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a709377e",
   "metadata": {},
   "source": [
    "## 测试 LayerNorm 和 SiLU 激活函数的融合\n",
    "\n",
    "该测试验证 LayerNorm 算子和 ReLU 激活函数是否能被正确融合，以减少内存传输和计算开销。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "a52586c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((1, 512, 64, 64), \"float32\"), mean: R.Tensor((64, 64), \"float32\"), var: R.Tensor((64, 64), \"float32\")):\n",
    "        cls = Module\n",
    "        with R.dataflow():\n",
    "            gv0 = R.call_tir(cls.layer_norm, (x, mean, var), out_sinfo=R.Tensor((1, 512, 64, 64), 'float32'))\n",
    "            gv1 = R.call_tir(cls.relu, gv0, out_sinfo=R.Tensor((1, 512, 64, 64), \"float32\"))\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def layer_norm(A: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\"), gamma: T.Buffer((T.int64(64), T.int64(64)), \"float32\"), beta: T.Buffer((T.int64(64), T.int64(64)), \"float32\"), T_layer_norm: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        rxplaceholder_red_temp_v0 = T.alloc_buffer([T.int64(64), T.int64(64)], dtype=\"float32\")\n",
    "        rxplaceholder_red_temp_v1 = T.alloc_buffer([T.int64(64), T.int64(64)], dtype=\"float32\")\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"rxplaceholder_red_temp\"):\n",
    "                ax0, ax1, k2, k3 = T.axis.remap(\"SSRR\", [i0, i1, i2, i3])\n",
    "                T.reads(A[ax0, ax1, k2, k3])\n",
    "                T.writes(rxplaceholder_red_temp_v0[ax0, ax1], rxplaceholder_red_temp_v1[ax0, ax1])\n",
    "                with T.init():\n",
    "                    rxplaceholder_red_temp_v0[ax0, ax1] = T.float32(0)\n",
    "                    rxplaceholder_red_temp_v1[ax0, ax1] = T.float32(0)\n",
    "                v_rxplaceholder_red_temp_v0: T.float32 = rxplaceholder_red_temp_v0[ax0, ax1] + A[ax0, ax1, k2, k3]\n",
    "                v_rxplaceholder_red_temp_v1: T.float32 = rxplaceholder_red_temp_v1[ax0, ax1] + A[ax0, ax1, k2, k3] * A[ax0, ax1, k2, k3]\n",
    "                rxplaceholder_red_temp_v0[ax0, ax1] = v_rxplaceholder_red_temp_v0\n",
    "                rxplaceholder_red_temp_v1[ax0, ax1] = v_rxplaceholder_red_temp_v1\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"T_layer_norm\"):\n",
    "                ax0, ax1, ax2, ax3 = T.axis.remap(\"SSSS\", [i0, i1, i2, i3])\n",
    "                T.reads(A[ax0, ax1, ax2, ax3], rxplaceholder_red_temp_v0[ax0, ax1], rxplaceholder_red_temp_v1[ax0, ax1], gamma[ax2, ax3], beta[ax2, ax3])\n",
    "                T.writes(T_layer_norm[ax0, ax1, ax2, ax3])\n",
    "                T_layer_norm[ax0, ax1, ax2, ax3] = (A[ax0, ax1, ax2, ax3] - rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.05)) * T.rsqrt(rxplaceholder_red_temp_v1[ax0, ax1] * T.float32(0.05) - rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.05) * (rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.05)) + T.float32(1e-05), dtype=\"float32\") * gamma[ax2, ax3] + beta[ax2, ax3]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def relu(A: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\"), B: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"relu\"):\n",
    "                v_i0, v_i1, v_i2, v_i3 = T.axis.remap(\"SSSS\", [i0, i1, i2, i3])\n",
    "                T.reads(A[v_i0, v_i1, v_i2, v_i3])\n",
    "                T.writes(B[v_i0, v_i1, v_i2, v_i3])\n",
    "                B[v_i0, v_i1, v_i2, v_i3] = T.max(A[v_i0, v_i1, v_i2, v_i3], T.float32(0))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "c06eaec4",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @T.prim_func(private=True)\n",
    "    def layer_norm(A: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\"), gamma: T.Buffer((T.int64(64), T.int64(64)), \"float32\"), beta: T.Buffer((T.int64(64), T.int64(64)), \"float32\"), T_layer_norm: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 4})\n",
    "        # with T.block(\"root\"):\n",
    "        rxplaceholder_red_temp_v0 = T.alloc_buffer((T.int64(64), T.int64(64)))\n",
    "        rxplaceholder_red_temp_v1 = T.alloc_buffer((T.int64(64), T.int64(64)))\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"rxplaceholder_red_temp\"):\n",
    "                ax0, ax1, k2, k3 = T.axis.remap(\"SSRR\", [i0, i1, i2, i3])\n",
    "                T.reads(A[ax0, ax1, k2, k3])\n",
    "                T.writes(rxplaceholder_red_temp_v0[ax0, ax1], rxplaceholder_red_temp_v1[ax0, ax1])\n",
    "                with T.init():\n",
    "                    rxplaceholder_red_temp_v0[ax0, ax1] = T.float32(0)\n",
    "                    rxplaceholder_red_temp_v1[ax0, ax1] = T.float32(0)\n",
    "                v_rxplaceholder_red_temp_v0: T.float32 = rxplaceholder_red_temp_v0[ax0, ax1] + A[ax0, ax1, k2, k3]\n",
    "                v_rxplaceholder_red_temp_v1: T.float32 = rxplaceholder_red_temp_v1[ax0, ax1] + A[ax0, ax1, k2, k3] * A[ax0, ax1, k2, k3]\n",
    "                rxplaceholder_red_temp_v0[ax0, ax1] = v_rxplaceholder_red_temp_v0\n",
    "                rxplaceholder_red_temp_v1[ax0, ax1] = v_rxplaceholder_red_temp_v1\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"T_layer_norm\"):\n",
    "                ax0, ax1, ax2, ax3 = T.axis.remap(\"SSSS\", [i0, i1, i2, i3])\n",
    "                T.reads(A[ax0, ax1, ax2, ax3], rxplaceholder_red_temp_v0[ax0, ax1], rxplaceholder_red_temp_v1[ax0, ax1], gamma[ax2, ax3], beta[ax2, ax3])\n",
    "                T.writes(T_layer_norm[ax0, ax1, ax2, ax3])\n",
    "                T_layer_norm[ax0, ax1, ax2, ax3] = (A[ax0, ax1, ax2, ax3] - rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.050000000000000003)) * T.rsqrt(rxplaceholder_red_temp_v1[ax0, ax1] * T.float32(0.050000000000000003) - rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.050000000000000003) * (rxplaceholder_red_temp_v0[ax0, ax1] * T.float32(0.050000000000000003)) + T.float32(1.0000000000000001e-05)) * gamma[ax2, ax3] + beta[ax2, ax3]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def relu(A: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\"), B: T.Buffer((T.int64(1), T.int64(512), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0})\n",
    "        # with T.block(\"root\"):\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(512), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"relu\"):\n",
    "                v_i0, v_i1, v_i2, v_i3 = T.axis.remap(\"SSSS\", [i0, i1, i2, i3])\n",
    "                T.reads(A[v_i0, v_i1, v_i2, v_i3])\n",
    "                T.writes(B[v_i0, v_i1, v_i2, v_i3])\n",
    "                B[v_i0, v_i1, v_i2, v_i3] = T.max(A[v_i0, v_i1, v_i2, v_i3], T.float32(0))\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def fused_layer_norm_relu(x: R.Tensor((1, 512, 64, 64), dtype=\"float32\"), mean: R.Tensor((64, 64), dtype=\"float32\"), var: R.Tensor((64, 64), dtype=\"float32\")) -> R.Tensor((1, 512, 64, 64), dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            gv0 = R.call_tir(cls.layer_norm, (x, mean, var), out_sinfo=R.Tensor((1, 512, 64, 64), 'float32'))\n",
    "            gv = R.call_tir(cls.relu, (gv0,), out_sinfo=R.Tensor((1, 512, 64, 64), dtype=\"float32\"))\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((1, 512, 64, 64), dtype=\"float32\"), mean: R.Tensor((64, 64), dtype=\"float32\"), var: R.Tensor((64, 64), dtype=\"float32\")) -> R.Tensor((1, 512, 64, 64), dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            gv: R.Tensor((1, 512, 64, 64), dtype=\"float32\") = cls.fused_layer_norm_relu(x, mean, var)\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "# fmt: on\n",
    "\n",
    "_check(Module, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "38a9c1bc",
   "metadata": {},
   "source": [
    "## 测试多路径网络结构的算子融合\n",
    "\n",
    "该测试验证具有多条计算路径的网络中，各路径上的算子是否能被正确融合，特别是卷积、矩阵乘法和加法等算子的融合。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "7c7d8410",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(\n",
    "        inp_0: R.Tensor((2, 320, 64, 64), dtype=\"float32\"),\n",
    "        inp_1: R.Tensor((2, 1280), dtype=\"float32\"),\n",
    "        w1: R.Tensor((320, 320, 3, 3), dtype=\"float32\"),\n",
    "        b1: R.Tensor((320,), \"float32\"),\n",
    "        w2: R.Tensor((320, 1280), \"float32\"),\n",
    "        b2: R.Tensor((320,), \"float32\"),\n",
    "    ):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            lv27: R.Tensor((2, 320, 64, 64), dtype=\"float32\") = R.nn.conv2d(inp_0, w1, strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=1, data_layout=\"NCHW\", kernel_layout=\"OIHW\", out_layout=\"NCHW\", out_dtype=\"float32\")\n",
    "            lv28: R.Tensor((1, 320, 1, 1), dtype=\"float32\") = R.reshape(b1, R.shape([1, 320, 1, 1]))  ##\n",
    "            lv29: R.Tensor((2, 320, 64, 64), dtype=\"float32\") = R.add(lv27, lv28)\n",
    "            lv31: R.Tensor((1280, 320), dtype=\"float32\") = R.permute_dims(w2, axes=None)  ##\n",
    "            lv32: R.Tensor((2, 320), dtype=\"float32\") = R.matmul(inp_1, lv31, out_dtype=\"float32\")\n",
    "            lv33: R.Tensor((2, 320), dtype=\"float32\") = R.add(lv32, b2)\n",
    "            lv35: R.Tensor((2, 320, 1, 1), dtype=\"float32\") = R.reshape(lv33, R.shape([2, 320, 1, 1]))\n",
    "            lv36: R.Tensor((2, 320, 64, 64), dtype=\"float32\") = R.add(lv29, lv35)\n",
    "            gv = lv36\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "52fab53d",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @T.prim_func(private=True)\n",
    "    def add(rxplaceholder: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(1), T.int64(320), T.int64(1), T.int64(1)), \"float32\"), T_add: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        for ax0, ax1, ax2, ax3 in T.grid(T.int64(2), T.int64(320), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap(\"SSSS\", [ax0, ax1, ax2, ax3])\n",
    "                T.reads(rxplaceholder[v_ax0, v_ax1, v_ax2, v_ax3], rxplaceholder_1[T.int64(0), v_ax1, T.int64(0), T.int64(0)])\n",
    "                T.writes(T_add[v_ax0, v_ax1, v_ax2, v_ax3])\n",
    "                T_add[v_ax0, v_ax1, v_ax2, v_ax3] = rxplaceholder[v_ax0, v_ax1, v_ax2, v_ax3] + rxplaceholder_1[T.int64(0), v_ax1, T.int64(0), T.int64(0)]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def add1(rxplaceholder: T.Buffer((T.int64(2), T.int64(320)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(320),), \"float32\"), T_add: T.Buffer((T.int64(2), T.int64(320)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        for ax0, ax1 in T.grid(T.int64(2), T.int64(320)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax0, v_ax1], rxplaceholder_1[v_ax1])\n",
    "                T.writes(T_add[v_ax0, v_ax1])\n",
    "                T_add[v_ax0, v_ax1] = rxplaceholder[v_ax0, v_ax1] + rxplaceholder_1[v_ax1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def add2(rxplaceholder: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(2), T.int64(320), T.int64(1), T.int64(1)), \"float32\"), T_add: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        for ax0, ax1, ax2, ax3 in T.grid(T.int64(2), T.int64(320), T.int64(64), T.int64(64)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap(\"SSSS\", [ax0, ax1, ax2, ax3])\n",
    "                T.reads(rxplaceholder[v_ax0, v_ax1, v_ax2, v_ax3], rxplaceholder_1[v_ax0, v_ax1, T.int64(0), T.int64(0)])\n",
    "                T.writes(T_add[v_ax0, v_ax1, v_ax2, v_ax3])\n",
    "                T_add[v_ax0, v_ax1, v_ax2, v_ax3] = rxplaceholder[v_ax0, v_ax1, v_ax2, v_ax3] + rxplaceholder_1[v_ax0, v_ax1, T.int64(0), T.int64(0)]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def conv2d(rxplaceholder: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(320), T.int64(320), T.int64(3), T.int64(3)), \"float32\"), conv2d_nchw: T.Buffer((T.int64(2), T.int64(320), T.int64(64), T.int64(64)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 4, \"tir.noalias\": True})\n",
    "        pad_temp = T.alloc_buffer((T.int64(2), T.int64(320), T.int64(66), T.int64(66)))\n",
    "        for i0, i1, i2, i3 in T.grid(T.int64(2), T.int64(320), T.int64(66), T.int64(66)):\n",
    "            with T.block(\"pad_temp\"):\n",
    "                v_i0, v_i1, v_i2, v_i3 = T.axis.remap(\"SSSS\", [i0, i1, i2, i3])\n",
    "                T.reads(rxplaceholder[v_i0, v_i1, v_i2 - T.int64(1), v_i3 - T.int64(1)])\n",
    "                T.writes(pad_temp[v_i0, v_i1, v_i2, v_i3])\n",
    "                pad_temp[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(T.int64(1) <= v_i2 and v_i2 < T.int64(65) and T.int64(1) <= v_i3 and v_i3 < T.int64(65), rxplaceholder[v_i0, v_i1, v_i2 - T.int64(1), v_i3 - T.int64(1)], T.float32(0))\n",
    "        for nn, ff, yy, xx, rc, ry, rx in T.grid(T.int64(2), T.int64(320), T.int64(64), T.int64(64), T.int64(320), T.int64(3), T.int64(3)):\n",
    "            with T.block(\"conv2d_nchw\"):\n",
    "                v_nn, v_ff, v_yy, v_xx, v_rc, v_ry, v_rx = T.axis.remap(\"SSSSRRR\", [nn, ff, yy, xx, rc, ry, rx])\n",
    "                T.reads(pad_temp[v_nn, v_rc, v_yy + v_ry, v_xx + v_rx], rxplaceholder_1[v_ff, v_rc, v_ry, v_rx])\n",
    "                T.writes(conv2d_nchw[v_nn, v_ff, v_yy, v_xx])\n",
    "                with T.init():\n",
    "                    conv2d_nchw[v_nn, v_ff, v_yy, v_xx] = T.float32(0)\n",
    "                conv2d_nchw[v_nn, v_ff, v_yy, v_xx] = conv2d_nchw[v_nn, v_ff, v_yy, v_xx] + pad_temp[v_nn, v_rc, v_yy + v_ry, v_xx + v_rx] * rxplaceholder_1[v_ff, v_rc, v_ry, v_rx]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def matmul(rxplaceholder: T.Buffer((T.int64(2), T.int64(1280)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(1280), T.int64(320)), \"float32\"), matmul: T.Buffer((T.int64(2), T.int64(320)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 4, \"tir.noalias\": True})\n",
    "        for i0, i1, k in T.grid(T.int64(2), T.int64(320), T.int64(1280)):\n",
    "            with T.block(\"matmul\"):\n",
    "                v_i0, v_i1, v_k = T.axis.remap(\"SSR\", [i0, i1, k])\n",
    "                T.reads(rxplaceholder[v_i0, v_k], rxplaceholder_1[v_k, v_i1])\n",
    "                T.writes(matmul[v_i0, v_i1])\n",
    "                with T.init():\n",
    "                    matmul[v_i0, v_i1] = T.float32(0)\n",
    "                matmul[v_i0, v_i1] = matmul[v_i0, v_i1] + rxplaceholder[v_i0, v_k] * rxplaceholder_1[v_k, v_i1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def reshape(rxplaceholder: T.Buffer((T.int64(320),), \"float32\"), T_reshape: T.Buffer((T.int64(1), T.int64(320), T.int64(1), T.int64(1)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 2, \"tir.noalias\": True})\n",
    "        for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(320), T.int64(1), T.int64(1)):\n",
    "            with T.block(\"T_reshape\"):\n",
    "                v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap(\"SSSS\", [ax0, ax1, ax2, ax3])\n",
    "                T.reads(rxplaceholder[(v_ax1 + v_ax2 + v_ax3) % T.int64(320)])\n",
    "                T.writes(T_reshape[v_ax0, v_ax1, v_ax2, v_ax3])\n",
    "                T_reshape[v_ax0, v_ax1, v_ax2, v_ax3] = rxplaceholder[(v_ax1 + v_ax2 + v_ax3) % T.int64(320)]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def reshape1(rxplaceholder: T.Buffer((T.int64(2), T.int64(320)), \"float32\"), T_reshape: T.Buffer((T.int64(2), T.int64(320), T.int64(1), T.int64(1)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 2, \"tir.noalias\": True})\n",
    "        for ax0, ax1, ax2, ax3 in T.grid(T.int64(2), T.int64(320), T.int64(1), T.int64(1)):\n",
    "            with T.block(\"T_reshape\"):\n",
    "                v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap(\"SSSS\", [ax0, ax1, ax2, ax3])\n",
    "                T.reads(rxplaceholder[((v_ax1 + v_ax2 + v_ax3) // T.int64(320) + v_ax0) % T.int64(2), (v_ax1 + v_ax2 + v_ax3) % T.int64(320)])\n",
    "                T.writes(T_reshape[v_ax0, v_ax1, v_ax2, v_ax3])\n",
    "                T_reshape[v_ax0, v_ax1, v_ax2, v_ax3] = rxplaceholder[((v_ax1 + v_ax2 + v_ax3) // T.int64(320) + v_ax0) % T.int64(2), (v_ax1 + v_ax2 + v_ax3) % T.int64(320)]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def transpose(rxplaceholder: T.Buffer((T.int64(320), T.int64(1280)), \"float32\"), T_transpose: T.Buffer((T.int64(1280), T.int64(320)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 2, \"tir.noalias\": True})\n",
    "        for ax0, ax1 in T.grid(T.int64(1280), T.int64(320)):\n",
    "            with T.block(\"T_transpose\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax1, v_ax0])\n",
    "                T.writes(T_transpose[v_ax0, v_ax1])\n",
    "                T_transpose[v_ax0, v_ax1] = rxplaceholder[v_ax1, v_ax0]\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def fused_conv2d_add_add2(inp_0: R.Tensor((2, 320, 64, 64), dtype=\"float32\"), w1: R.Tensor((320, 320, 3, 3), dtype=\"float32\"), lv28: R.Tensor((1, 320, 1, 1), dtype=\"float32\"), lv35: R.Tensor((2, 320, 1, 1), dtype=\"float32\")) -> R.Tensor((2, 320, 64, 64), dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv27 = R.call_tir(cls.conv2d, (inp_0, w1), out_sinfo=R.Tensor((2, 320, 64, 64), dtype=\"float32\"))\n",
    "            lv29 = R.call_tir(cls.add, (lv27, lv28), out_sinfo=R.Tensor((2, 320, 64, 64), dtype=\"float32\"))\n",
    "            gv = R.call_tir(cls.add2, (lv29, lv35), out_sinfo=R.Tensor((2, 320, 64, 64), dtype=\"float32\"))\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def fused_matmul_add1(inp_1: R.Tensor((2, 1280), dtype=\"float32\"), lv31: R.Tensor((1280, 320), dtype=\"float32\"), b2: R.Tensor((320,), dtype=\"float32\")) -> R.Tensor((2, 320), dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        with R.dataflow():\n",
    "            lv32 = R.call_tir(cls.matmul, (inp_1, lv31), out_sinfo=R.Tensor((2, 320), dtype=\"float32\"))\n",
    "            gv = R.call_tir(cls.add1, (lv32, b2), out_sinfo=R.Tensor((2, 320), dtype=\"float32\"))\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(inp_0: R.Tensor((2, 320, 64, 64), dtype=\"float32\"), inp_1: R.Tensor((2, 1280), dtype=\"float32\"), w1: R.Tensor((320, 320, 3, 3), dtype=\"float32\"), b1: R.Tensor((320,), dtype=\"float32\"), w2: R.Tensor((320, 1280), dtype=\"float32\"), b2: R.Tensor((320,), dtype=\"float32\")) -> R.Tensor((2, 320, 64, 64), dtype=\"float32\"):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv28 = R.call_tir(cls.reshape, (b1,), out_sinfo=R.Tensor((1, 320, 1, 1), dtype=\"float32\"))\n",
    "            lv31 = R.call_tir(cls.transpose, (w2,), out_sinfo=R.Tensor((1280, 320), dtype=\"float32\"))\n",
    "            lv: R.Tensor((2, 320), dtype=\"float32\") = cls.fused_matmul_add1(inp_1, lv31, b2)\n",
    "            lv35 = R.call_tir(cls.reshape1, (lv,), out_sinfo=R.Tensor((2, 320, 1, 1), dtype=\"float32\"))\n",
    "            lv1: R.Tensor((2, 320, 64, 64), dtype=\"float32\") = cls.fused_conv2d_add_add2(inp_0, w1, lv28, lv35)\n",
    "            gv: R.Tensor((2, 320, 64, 64), dtype=\"float32\") = lv1\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "# fmt: on\n",
    "\n",
    "mod = relax.transform.LegalizeOps()(Module)\n",
    "mod = relax.transform.AnnotateTIROpPattern()(mod)\n",
    "mod = relax.transform.FuseOps()(mod)\n",
    "tvm.ir.assert_structural_equal(mod, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1c585d29",
   "metadata": {},
   "source": [
    "## 测试包含无效组的算子融合\n",
    "\n",
    "该测试验证当网络中存在未被使用的计算路径（无效组）时，FuseOps转换是否能正确处理这种情况，只融合有效的计算路径。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "dbc201f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(inp_0: R.Tensor((1, 784), dtype=\"float32\"), inp_1: R.Tensor((1, 128), dtype=\"float32\"), linear1_bias: R.Tensor((128,), dtype=\"float32\"), linear1_weight: R.Tensor((128, 784), dtype=\"float32\"), linear2_bias: R.Tensor((10,), dtype=\"float32\"), linear2_weight: R.Tensor((10, 128), dtype=\"float32\")) -> R.Tensor((1, 10), dtype=\"float32\"):\n",
    "        R.func_attr({\"num_input\": 1})\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor((784, 128), dtype=\"float32\") = R.permute_dims(linear1_weight, axes=None)\n",
    "            lv1: R.Tensor((1, 128), dtype=\"float32\") = R.matmul(inp_0, lv, out_dtype=\"float32\")\n",
    "            lv2: R.Tensor((1, 128), dtype=\"float32\") = R.add(lv1, linear1_bias)\n",
    "            lv3: R.Tensor((1, 128), dtype=\"float32\") = R.nn.relu(lv2)\n",
    "            lv4: R.Tensor((128, 10), dtype=\"float32\") = R.permute_dims(linear2_weight, axes=None)\n",
    "            lv5: R.Tensor((1, 10), dtype=\"float32\") = R.matmul(inp_1, lv4, out_dtype=\"float32\")\n",
    "            lv6: R.Tensor((1, 10), dtype=\"float32\") = R.add(lv5, linear2_bias)\n",
    "            gv: R.Tensor((1, 10), dtype=\"float32\") = lv6\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "c5ded6dd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[11:32:09] /media/pc/data/lxw/ai/tvm/src/relax/transform/fuse_ops.cc:551: Warning: There are dead codes in the current IRModule, please run the DeadCodeElimination Pass before FuseOps\n"
     ]
    }
   ],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @T.prim_func(private=True)\n",
    "    def add(rxplaceholder: T.Buffer((T.int64(1), T.int64(128)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(128),), \"float32\"), T_add: T.Buffer((T.int64(1), T.int64(128)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(T.int64(1), T.int64(128)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax0, v_ax1], rxplaceholder_1[v_ax1])\n",
    "                T.writes(T_add[v_ax0, v_ax1])\n",
    "                T_add[v_ax0, v_ax1] = rxplaceholder[v_ax0, v_ax1] + rxplaceholder_1[v_ax1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def add1(rxplaceholder: T.Buffer((T.int64(1), T.int64(10)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(10),), \"float32\"), T_add: T.Buffer((T.int64(1), T.int64(10)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(T.int64(1), T.int64(10)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax0, v_ax1], rxplaceholder_1[v_ax1])\n",
    "                T.writes(T_add[v_ax0, v_ax1])\n",
    "                T_add[v_ax0, v_ax1] = rxplaceholder[v_ax0, v_ax1] + rxplaceholder_1[v_ax1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def matmul(rxplaceholder: T.Buffer((T.int64(1), T.int64(784)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(784), T.int64(128)), \"float32\"), matmul_1: T.Buffer((T.int64(1), T.int64(128)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 4, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for i0, i1, k in T.grid(T.int64(1), T.int64(128), T.int64(784)):\n",
    "            with T.block(\"matmul\"):\n",
    "                v_i0, v_i1, v_k = T.axis.remap(\"SSR\", [i0, i1, k])\n",
    "                T.reads(rxplaceholder[v_i0, v_k], rxplaceholder_1[v_k, v_i1])\n",
    "                T.writes(matmul_1[v_i0, v_i1])\n",
    "                with T.init():\n",
    "                    matmul_1[v_i0, v_i1] = T.float32(0)\n",
    "                matmul_1[v_i0, v_i1] = matmul_1[v_i0, v_i1] + rxplaceholder[v_i0, v_k] * rxplaceholder_1[v_k, v_i1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def matmul1(rxplaceholder: T.Buffer((T.int64(1), T.int64(128)), \"float32\"), rxplaceholder_1: T.Buffer((T.int64(128), T.int64(10)), \"float32\"), matmul: T.Buffer((T.int64(1), T.int64(10)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 4, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for i0, i1, k in T.grid(T.int64(1), T.int64(10), T.int64(128)):\n",
    "            with T.block(\"matmul\"):\n",
    "                v_i0, v_i1, v_k = T.axis.remap(\"SSR\", [i0, i1, k])\n",
    "                T.reads(rxplaceholder[v_i0, v_k], rxplaceholder_1[v_k, v_i1])\n",
    "                T.writes(matmul[v_i0, v_i1])\n",
    "                with T.init():\n",
    "                    matmul[v_i0, v_i1] = T.float32(0)\n",
    "                matmul[v_i0, v_i1] = matmul[v_i0, v_i1] + rxplaceholder[v_i0, v_k] * rxplaceholder_1[v_k, v_i1]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def relu(rxplaceholder: T.Buffer((T.int64(1), T.int64(128)), \"float32\"), compute: T.Buffer((T.int64(1), T.int64(128)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 0, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for i0, i1 in T.grid(T.int64(1), T.int64(128)):\n",
    "            with T.block(\"compute\"):\n",
    "                v_i0, v_i1 = T.axis.remap(\"SS\", [i0, i1])\n",
    "                T.reads(rxplaceholder[v_i0, v_i1])\n",
    "                T.writes(compute[v_i0, v_i1])\n",
    "                compute[v_i0, v_i1] = T.max(rxplaceholder[v_i0, v_i1], T.float32(0))\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def transpose(rxplaceholder: T.Buffer((T.int64(128), T.int64(784)), \"float32\"), T_transpose: T.Buffer((T.int64(784), T.int64(128)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 2, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(T.int64(784), T.int64(128)):\n",
    "            with T.block(\"T_transpose\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax1, v_ax0])\n",
    "                T.writes(T_transpose[v_ax0, v_ax1])\n",
    "                T_transpose[v_ax0, v_ax1] = rxplaceholder[v_ax1, v_ax0]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def transpose1(rxplaceholder: T.Buffer((T.int64(10), T.int64(128)), \"float32\"), T_transpose: T.Buffer((T.int64(128), T.int64(10)), \"float32\")):\n",
    "        T.func_attr({\"op_pattern\": 2, \"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1 in T.grid(T.int64(128), T.int64(10)):\n",
    "            with T.block(\"T_transpose\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(rxplaceholder[v_ax1, v_ax0])\n",
    "                T.writes(T_transpose[v_ax0, v_ax1])\n",
    "                T_transpose[v_ax0, v_ax1] = rxplaceholder[v_ax1, v_ax0]\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def fused_matmul1_add1(inp_1: R.Tensor((1, 128), dtype=\"float32\"), lv4: R.Tensor((128, 10), dtype=\"float32\"), linear2_bias: R.Tensor((10,), dtype=\"float32\")) -> R.Tensor((1, 10), dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv5 = R.call_tir(cls.matmul1, (inp_1, lv4), out_sinfo=R.Tensor((1, 10), dtype=\"float32\"))\n",
    "            gv = R.call_tir(cls.add1, (lv5, linear2_bias), out_sinfo=R.Tensor((1, 10), dtype=\"float32\"))\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(inp_0: R.Tensor((1, 784), dtype=\"float32\"), inp_1: R.Tensor((1, 128), dtype=\"float32\"), linear1_bias: R.Tensor((128,), dtype=\"float32\"), linear1_weight: R.Tensor((128, 784), dtype=\"float32\"), linear2_bias: R.Tensor((10,), dtype=\"float32\"), linear2_weight: R.Tensor((10, 128), dtype=\"float32\")) -> R.Tensor((1, 10), dtype=\"float32\"):\n",
    "        R.func_attr({\"num_input\": 1})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv = R.call_tir(cls.transpose, (linear1_weight,), out_sinfo=R.Tensor((784, 128), dtype=\"float32\"))\n",
    "            lv4 = R.call_tir(cls.transpose1, (linear2_weight,), out_sinfo=R.Tensor((128, 10), dtype=\"float32\"))\n",
    "            lv_1: R.Tensor((1, 10), dtype=\"float32\") = cls.fused_matmul1_add1(inp_1, lv4, linear2_bias)\n",
    "            gv: R.Tensor((1, 10), dtype=\"float32\") = lv_1\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "# fmt: on\n",
    "\n",
    "mod = relax.transform.LegalizeOps()(Module)\n",
    "_check(mod, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fdeacfb7",
   "metadata": {},
   "source": [
    "## 测试符号形状感知的算子融合\n",
    "\n",
    "该测试验证当张量形状包含符号变量时，算子融合是否能正确处理这些符号形状。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "59f10081",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Before:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor([\"n\", \"m\"], \"float32\")):\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.add, x, R.const(1, \"float32\"))\n",
    "            lv1 = R.emit_te(topi.exp, lv0)\n",
    "            gv = R.emit_te(topi.squeeze, lv1)\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "67cb9fe2",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @R.function(private=True)\n",
    "    def fused_add_exp_squeeze(\n",
    "        x: R.Tensor([\"n\", \"m\"], \"float32\"), p0: R.Tensor([], \"float32\")\n",
    "    ) -> R.Tensor([\"n\", \"m\"], dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.add, x, p0)\n",
    "            lv1 = R.emit_te(topi.exp, lv0)\n",
    "            gv = R.emit_te(topi.squeeze, lv1)\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(x: R.Tensor([\"n\", \"m\"], \"float32\")) -> R.Tensor([\"n\", \"m\"], dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            gv = cls.fused_add_exp_squeeze(x, R.const(1, \"float32\"))\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "_check(Before, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e89b307",
   "metadata": {},
   "source": [
    "该测试进一步验证包含更复杂符号形状表达式的算子融合情况，包括 `full`、`trilu` 和 `broadcast_to` 等算子的融合。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "636bdee8",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "@I.ir_module\n",
    "class Before:\n",
    "    @R.function\n",
    "    def main(s: R.Shape([\"n\"])):\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.full, [n, n], \"float32\", 0)\n",
    "            lv1 = R.emit_te(topi.trilu, lv0, tvm.tir.const(1, \"int32\"), upper=True)\n",
    "            gv = R.emit_te(topi.broadcast_to, lv1, [1, 1, n, n])\n",
    "            R.output(gv)\n",
    "        return gv\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "9815cb81",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @R.function(private=True)\n",
    "    def fused_full_trilu_broadcast_to(\n",
    "        s: R.Shape([\"n\"]),\n",
    "    ) -> R.Tensor([1, 1, \"n\", \"n\"], \"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.full, [n, n], \"float32\", 0)\n",
    "            lv1 = R.emit_te(topi.trilu, lv0, tvm.tir.const(1, \"int32\"), upper=True)\n",
    "            gv = R.emit_te(topi.broadcast_to, lv1, [1, 1, n, n])\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(s: R.Shape([\"n\"])) -> R.Tensor((1, 1, \"n\", \"n\"), dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            gv: R.Tensor([1, 1, n, n], \"float32\") = cls.fused_full_trilu_broadcast_to(\n",
    "                R.shape([n])\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "_check(Before, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e0796c24",
   "metadata": {},
   "source": [
    "## 测试形状表达式参数的算子融合\n",
    "\n",
    "该测试验证当函数参数包含形状表达式时，算子融合是否能正确处理这种情况。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "8c7a0546",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "@I.ir_module\n",
    "class Before:\n",
    "    @R.function\n",
    "    def main(s: R.Shape([\"n\"]), kv_cache: R.Object):\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.full, [n, n], \"float32\", 0)\n",
    "            lv1 = R.emit_te(topi.trilu, lv0, tvm.tir.const(1, \"int32\"), upper=True)\n",
    "            lv2 = R.emit_te(topi.broadcast_to, lv1, [1, 1, n, n])\n",
    "            gv = R.call_pure_packed(\n",
    "                \"vm.builtin.attention_kv_cache_view\",\n",
    "                kv_cache,\n",
    "                R.shape([1 + n, 32, 128]),\n",
    "                sinfo_args=(R.Tensor((1 + n, 32, 128), dtype=\"float32\"),),\n",
    "            )\n",
    "            R.output(gv, lv2)\n",
    "        return gv, lv2\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "d9ae5203",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Expected:\n",
    "    @R.function(private=True)\n",
    "    def fused_full_trilu_broadcast_to(\n",
    "        s: R.Shape([\"n\"]),\n",
    "    ) -> R.Tensor([1, 1, \"n\", \"n\"], \"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv0 = R.emit_te(topi.full, [n, n], \"float32\", 0)\n",
    "            lv1 = R.emit_te(topi.trilu, lv0, tvm.tir.const(1, \"int32\"), upper=True)\n",
    "            gv = R.emit_te(topi.broadcast_to, lv1, [1, 1, n, n])\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(s: R.Shape([\"n\"]), kv_cache: R.Object):\n",
    "        cls = Expected\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor([1, 1, n, n], \"float32\") = cls.fused_full_trilu_broadcast_to(\n",
    "                R.shape([n])\n",
    "            )\n",
    "            gv = R.call_pure_packed(\n",
    "                \"vm.builtin.attention_kv_cache_view\",\n",
    "                kv_cache,\n",
    "                R.shape([1 + n, 32, 128]),\n",
    "                sinfo_args=(R.Tensor((1 + n, 32, 128), dtype=\"float32\"),),\n",
    "            )\n",
    "            R.output(gv, lv)\n",
    "        return gv, lv\n",
    "\n",
    "_check(Before, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "43b74100",
   "metadata": {},
   "source": [
    "## 测试跳过 `match_cast` 的算子融合\n",
    "\n",
    "该测试验证当函数中包含 `match_cast` 算子时，`FuseOps` 变换是否会跳过这些算子，保持原始结构。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "b07f4fb1",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(A: R.Tensor((10, 20), dtype=\"float32\")) -> R.Tensor(dtype=\"float32\", ndim=2):\n",
    "        m = T.int64()\n",
    "        n = T.int64()\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor((m, n), dtype=\"float32\") = R.match_cast(\n",
    "                A, R.Tensor((m, n), dtype=\"float32\")\n",
    "            )\n",
    "            gv: R.Tensor((m, n), dtype=\"float32\") = lv\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "_check(Module, Module)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d539bccc",
   "metadata": {},
   "source": [
    "## 测试包含原始值参数的算子融合\n",
    "\n",
    "该测试验证当函数调用包含原始值参数（如整数、字符串、数据类型）时，算子融合是否能正确处理这些情况。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "00cf2f90",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(inp: R.Tensor((2, 2), dtype=\"float32\")) -> R.Tensor((2, 2), dtype=\"float32\"):\n",
    "        with R.dataflow():\n",
    "            lv = R.call_pure_packed(\n",
    "                \"my_func1\", inp, R.prim_value(0), sinfo_args=[R.Tensor((2, 2), dtype=\"float32\")]\n",
    "            )\n",
    "            lv1 = R.call_pure_packed(\n",
    "                \"my_func2\", lv, R.str(\"str\"), sinfo_args=[R.Tensor((2, 2), dtype=\"float32\")]\n",
    "            )\n",
    "            gv = R.call_pure_packed(\n",
    "                \"my_func3\",\n",
    "                lv1,\n",
    "                R.dtype(\"float32\"),\n",
    "                sinfo_args=[R.Tensor((2, 2), dtype=\"float32\")],\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "_check(Module, Module)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "62a3ee52",
   "metadata": {},
   "source": [
    "## 测试部分使用元组参数的算子融合。\n",
    "\n",
    "该测试验证当函数参数是元组且只使用了其中一部分元素时，算子融合是否能正确处理这种情况。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "80824135",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tuple(\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "            R.Tensor((2,), \"float32\"),\n",
    "        ),\n",
    "    ):\n",
    "        with R.dataflow():\n",
    "            x0 = x[0]\n",
    "            y0 = R.emit_te(topi.add, x0, R.const(1, \"float32\"))\n",
    "            y1 = R.emit_te(topi.divide, y0, R.const(1, \"float32\"))\n",
    "            gv = y1\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "@I.ir_module\n",
    "class Expected:\n",
    "    @R.function(private=True)\n",
    "    def fused_add_divide(\n",
    "        x_0: R.Tensor((2,), dtype=\"float32\"),\n",
    "        param_0: R.Tensor((), dtype=\"float32\"),\n",
    "        param_1: R.Tensor((), dtype=\"float32\"),\n",
    "    ) -> R.Tensor((2,), dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        with R.dataflow():\n",
    "            y0 = R.emit_te(topi.add, x_0, param_0)\n",
    "            gv = R.emit_te(topi.divide, y0, param_1)\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tuple(\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "        ),\n",
    "    ) -> R.Tensor((2,), dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor((2,), dtype=\"float32\") = x[0]\n",
    "            lv1: R.Tensor((2,), dtype=\"float32\") = cls.fused_add_divide(\n",
    "                lv, R.const(1, \"float32\"), R.const(1, \"float32\")\n",
    "            )\n",
    "            gv: R.Tensor((2,), dtype=\"float32\") = lv1\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "_check(Module, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "19135f9e",
   "metadata": {},
   "source": [
    "## 测试包含 in-place 算子的算子融合\n",
    "\n",
    "该测试验证当函数中包含原地（in-place）算子时，算子融合是否能正确处理这些算子。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "34f689c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    @T.prim_func(private=True)\n",
    "    def add(\n",
    "        A: T.Buffer((T.int64(10), T.int64(20)), \"float32\"),\n",
    "        B: T.Buffer((), \"float32\"),\n",
    "        Out: T.Buffer((T.int64(10), T.int64(20)), \"float32\"),\n",
    "    ):\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        for ax0, ax1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(A[v_ax0, v_ax1], B[()])\n",
    "                T.writes(Out[v_ax0, v_ax1])\n",
    "                Out[v_ax0, v_ax1] = A[v_ax0, v_ax1] + B[()]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def exp_inplace(A: T.Buffer((T.int64(10), T.int64(20)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        for i0, i1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"compute\"):\n",
    "                v_i0, v_i1 = T.axis.remap(\"SS\", [i0, i1])\n",
    "                T.reads(A[v_i0, v_i1])\n",
    "                T.writes(A[v_i0, v_i1])\n",
    "                A[v_i0, v_i1] = T.exp(A[v_i0, v_i1])\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def squeeze_inplace(A: T.Buffer((T.int64(10), T.int64(20)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        for ax0, ax1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"T_squeeze\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(A[v_ax0, v_ax1])\n",
    "                T.writes(A[v_ax0, v_ax1])\n",
    "                A[v_ax0, v_ax1] = A[v_ax0, v_ax1]\n",
    "\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((10, 20), dtype=\"float32\"), p0: R.Tensor((), dtype=\"float32\")\n",
    "    ) -> R.Tensor((10, 20), dtype=\"float32\"):\n",
    "        cls = Module\n",
    "        with R.dataflow():\n",
    "            lv = R.call_tir(\n",
    "                cls.add,\n",
    "                (x, p0),\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            lv1 = R.call_tir_inplace(\n",
    "                cls.exp_inplace,\n",
    "                (lv,),\n",
    "                inplace_indices=[0],\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            gv = R.call_tir_inplace(\n",
    "                cls.squeeze_inplace,\n",
    "                (lv1,),\n",
    "                inplace_indices=[0],\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "@I.ir_module\n",
    "class Expected:\n",
    "    @T.prim_func(private=True)\n",
    "    def add(\n",
    "        A: T.Buffer((T.int64(10), T.int64(20)), \"float32\"),\n",
    "        B: T.Buffer((), \"float32\"),\n",
    "        Out: T.Buffer((T.int64(10), T.int64(20)), \"float32\"),\n",
    "    ):\n",
    "        T.func_attr({\"tir.noalias\": True, \"op_pattern\": 0})\n",
    "        for ax0, ax1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"T_add\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(A[v_ax0, v_ax1], B[()])\n",
    "                T.writes(Out[v_ax0, v_ax1])\n",
    "                Out[v_ax0, v_ax1] = A[v_ax0, v_ax1] + B[()]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def exp_inplace(A: T.Buffer((T.int64(10), T.int64(20)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True, \"op_pattern\": 0})\n",
    "        for i0, i1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"compute\"):\n",
    "                v_i0, v_i1 = T.axis.remap(\"SS\", [i0, i1])\n",
    "                T.reads(A[v_i0, v_i1])\n",
    "                T.writes(A[v_i0, v_i1])\n",
    "                A[v_i0, v_i1] = T.exp(A[v_i0, v_i1])\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def squeeze_inplace(A: T.Buffer((T.int64(10), T.int64(20)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True, \"op_pattern\": 0})\n",
    "        for ax0, ax1 in T.grid(T.int64(10), T.int64(20)):\n",
    "            with T.block(\"T_squeeze\"):\n",
    "                v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(A[v_ax0, v_ax1])\n",
    "                T.writes(A[v_ax0, v_ax1])\n",
    "                A[v_ax0, v_ax1] = A[v_ax0, v_ax1]\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def fused_add_exp_inplace_squeeze_inplace(\n",
    "        x: R.Tensor((10, 20), dtype=\"float32\"), p0: R.Tensor((), dtype=\"float32\")\n",
    "    ) -> R.Tensor((10, 20), dtype=\"float32\"):\n",
    "        R.func_attr({\"Primitive\": True})\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            lv = R.call_tir(\n",
    "                cls.add,\n",
    "                (x, p0),\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            lv1 = R.call_tir_inplace(\n",
    "                cls.exp_inplace,\n",
    "                (lv,),\n",
    "                inplace_indices=[0],\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            gv = R.call_tir_inplace(\n",
    "                cls.squeeze_inplace,\n",
    "                (lv1,),\n",
    "                inplace_indices=[0],\n",
    "                out_sinfo=R.Tensor((10, 20), dtype=\"float32\"),\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((10, 20), dtype=\"float32\"), p0: R.Tensor((), dtype=\"float32\")\n",
    "    ) -> R.Tensor((10, 20), dtype=\"float32\"):\n",
    "        cls = Expected\n",
    "        with R.dataflow():\n",
    "            gv1: R.Tensor(\n",
    "                (10, 20), dtype=\"float32\"\n",
    "            ) = cls.fused_add_exp_inplace_squeeze_inplace(x, p0)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "_check(Module, Expected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f5d84f82",
   "metadata": {},
   "source": [
    "## 测试包含打包参数的算子融合\n",
    "\n",
    "该测试验证当函数参数是打包的元组时，算子融合是否能正确处理这种情况。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "f26e30e8",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Before:\n",
    "    @T.prim_func(private=True)\n",
    "    def cast(lv: T.Buffer((T.int64(16), T.int64(16)), \"float16\"), compute: T.Buffer((T.int64(16), T.int64(16)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for i0, i1 in T.grid(T.int64(16), T.int64(16)):\n",
    "            with T.block(\"compute\"):\n",
    "                v_i0, v_i1 = T.axis.remap(\"SS\", [i0, i1])\n",
    "                T.reads(lv[v_i0, v_i1])\n",
    "                T.writes(compute[v_i0, v_i1])\n",
    "                compute[v_i0, v_i1] = T.Cast(\"float32\", lv[v_i0, v_i1])\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def matmul(x: T.Buffer((T.int64(16), T.int64(16)), \"float32\"), lv2: T.Buffer((T.int64(16), T.int64(16)), \"float32\"), T_matmul: T.Buffer((T.int64(16), T.int64(16)), \"float32\")):\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        # with T.block(\"root\"):\n",
    "        for ax0, ax1, k in T.grid(T.int64(16), T.int64(16), T.int64(16)):\n",
    "            with T.block(\"T_matmul\"):\n",
    "                v_ax0, v_ax1, v_k = T.axis.remap(\"SSR\", [ax0, ax1, k])\n",
    "                T.reads(x[v_ax0, v_k], lv2[v_k, v_ax1])\n",
    "                T.writes(T_matmul[v_ax0, v_ax1])\n",
    "                with T.init():\n",
    "                    T_matmul[v_ax0, v_ax1] = T.float32(0)\n",
    "                T_matmul[v_ax0, v_ax1] = T_matmul[v_ax0, v_ax1] + x[v_ax0, v_k] * lv2[v_k, v_ax1]\n",
    "\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((16, 16), dtype=\"float32\"), packed_params: R.Tuple(R.Tensor((16, 16), dtype=\"float16\"), R.Tensor((16, 16), dtype=\"float16\"))) -> R.Tensor((16, 16), dtype=\"float32\"):\n",
    "        R.func_attr({\"num_input\": 1})\n",
    "        cls = Before\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor((16, 16), dtype=\"float16\") = packed_params[0]\n",
    "            lv1: R.Tensor((16, 16), dtype=\"float16\") = packed_params[1]\n",
    "            lv2 = R.call_tir(cls.cast, (lv,), out_sinfo=R.Tensor((16, 16), dtype=\"float32\"))\n",
    "            lv3 = R.call_tir(cls.matmul, (x, lv2), out_sinfo=R.Tensor((16, 16), dtype=\"float32\"))\n",
    "            lv4 = R.call_tir(cls.cast, (lv1,), out_sinfo=R.Tensor((16, 16), dtype=\"float32\"))\n",
    "            lv5 = R.call_tir(cls.matmul, (lv3, lv4), out_sinfo=R.Tensor((16, 16), dtype=\"float32\"))\n",
    "            gv: R.Tensor((16, 16), dtype=\"float32\") = lv5\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "# fmt: on\n",
    "\n",
    "Expected = Before\n",
    "_check(Before, Expected)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b592167f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
