{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DPL 模式匹配"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Overwriting demo.py\n"
     ]
    }
   ],
   "source": [
    "%%file demo.py\n",
    "import tvm\n",
    "from tvm.script import relax as R\n",
    "from tvm.script import tir as T\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class Module:\n",
    "    @T.prim_func\n",
    "    def tir_matmul(x: T.handle, y: T.handle, z: T.handle) -> None:\n",
    "        T.func_attr({\"global_symbol\": \"tir_matmul\"})\n",
    "        k = T.int32()\n",
    "        A = T.match_buffer(x, (32, 32))\n",
    "        B = T.match_buffer(y, (32, 32))\n",
    "        C = T.match_buffer(z, (32, 32))\n",
    "\n",
    "        for i0, j0, k0 in T.grid(32, 32, 32):\n",
    "            with T.block():\n",
    "                i, j, k = T.axis.remap(\"SSR\", [i0, j0, k0])\n",
    "                with T.init():\n",
    "                    C[i, j] = 0.0\n",
    "                C[i, j] += A[i, k] * B[j, k]\n",
    "\n",
    "    @T.prim_func\n",
    "    def tir_relu(x: T.handle, y: T.handle):\n",
    "        T.func_attr({\"global_symbol\": \"tir_relu\"})\n",
    "        A = T.match_buffer(x, (32, 32))\n",
    "        B = T.match_buffer(y, (32, 32))\n",
    "        for i, j in T.grid(32, 32):\n",
    "            with T.block():\n",
    "                vi, vj = T.axis.remap(\"SS\", [i, j])\n",
    "                B[vi, vj] = T.max(A[vi, vj], 0.0)\n",
    "\n",
    "    @T.prim_func\n",
    "    def tir_zeros(x: T.handle, n: T.int64):\n",
    "        T.func_attr({\"global_symbol\": \"tir_zeros\"})\n",
    "        A = T.match_buffer(x, [n])\n",
    "        for i in range(n):\n",
    "            with T.block():\n",
    "                vi = T.axis.remap(\"S\", [i])\n",
    "                A[vi] = 1.0\n",
    "\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((32, 32), \"float32\"), w: R.Tensor((32, 32), \"float32\")) -> R.Tuple:\n",
    "        cls = Module\n",
    "        with R.dataflow():\n",
    "            lv0 = R.call_tir(cls.tir_matmul, (x, w), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_tir(cls.tir_relu, (lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv2 = R.call_tir(\n",
    "                cls.tir_zeros, [], R.Tensor((32,), dtype=\"float32\"), tir_vars=R.ShapeExpr([32])\n",
    "            )\n",
    "            gv = (lv1, lv2)\n",
    "            R.output(gv)\n",
    "        return gv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@T</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>prim_func\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">tir_matmul</span>(A: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), B: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), C: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> i0, j0, k0 <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>grid(<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;&quot;</span>):\n",
       "                i, j, k <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>axis<span style=\"color: #AA22FF; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SSR&quot;</span>, [i0, j0, k0])\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>reads(A[i, k], B[j, k])\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>writes(C[i, j])\n",
       "                <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>init():\n",
       "                    C[i, j] <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>float32(<span style=\"color: #008000\">0.0</span>)\n",
       "                C[i, j] <span style=\"color: #AA22FF; font-weight: bold\">=</span> C[i, j] <span style=\"color: #AA22FF; font-weight: bold\">+</span> A[i, k] <span style=\"color: #AA22FF; font-weight: bold\">*</span> B[j, k]\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@T</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>prim_func\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">tir_relu</span>(A: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>), B: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>Buffer((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), <span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> i, j <span style=\"color: #008000; font-weight: bold\">in</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>grid(<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;&quot;</span>):\n",
       "                vi, vj <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>axis<span style=\"color: #AA22FF; font-weight: bold\">.</span>remap(<span style=\"color: #BA2121\">&quot;SS&quot;</span>, [i, j])\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>reads(A[vi, vj])\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>writes(B[vi, vj])\n",
       "                B[vi, vj] <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>max(A[vi, vj], T<span style=\"color: #AA22FF; font-weight: bold\">.</span>float32(<span style=\"color: #008000\">0.0</span>))\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@T</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>prim_func\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">tir_zeros</span>(x: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>handle, n: T<span style=\"color: #AA22FF; font-weight: bold\">.</span>int64):\n",
       "        A <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>match_buffer(x, (n,))\n",
       "        <span style=\"color: #007979; font-style: italic\"># with T.block(&quot;root&quot;):</span>\n",
       "        <span style=\"color: #008000; font-weight: bold\">for</span> i <span style=\"color: #008000; font-weight: bold\">in</span> range(n):\n",
       "            <span style=\"color: #008000; font-weight: bold\">with</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>block(<span style=\"color: #BA2121\">&quot;&quot;</span>):\n",
       "                vi <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>axis<span style=\"color: #AA22FF; font-weight: bold\">.</span>spatial(n, i)\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>reads()\n",
       "                T<span style=\"color: #AA22FF; font-weight: bold\">.</span>writes(A[vi])\n",
       "                A[vi] <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>float32(<span style=\"color: #008000\">1.0</span>)\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">main</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), w: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple:\n",
       "        cls <span style=\"color: #AA22FF; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            lv0 <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #AA22FF; font-weight: bold\">.</span>tir_matmul, (x, w), out_sinfo<span style=\"color: #AA22FF; font-weight: bold\">=</span>R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv1 <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #AA22FF; font-weight: bold\">.</span>tir_relu, (lv0,), out_sinfo<span style=\"color: #AA22FF; font-weight: bold\">=</span>R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>))\n",
       "            lv2 <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>call_tir(cls<span style=\"color: #AA22FF; font-weight: bold\">.</span>tir_zeros, R<span style=\"color: #AA22FF; font-weight: bold\">.</span>tuple(), out_sinfo<span style=\"color: #AA22FF; font-weight: bold\">=</span>R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>,), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), tir_vars<span style=\"color: #AA22FF; font-weight: bold\">=</span>R<span style=\"color: #AA22FF; font-weight: bold\">.</span>shape([<span style=\"color: #008000\">32</span>]))\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>,), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">=</span> lv1, lv2\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "from demo import Module\n",
    "\n",
    "main_fn = Module[\"main\"]\n",
    "bindings = main_fn.body.blocks[0].bindings\n",
    "Module.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[x: R.Tensor((32, 32), dtype=\"float32\")\n",
       "w: R.Tensor((32, 32), dtype=\"float32\")\n",
       "lv0 = R.call_tir(tir_matmul, (x, w), out_sinfo=R.Tensor((32, 32), dtype=\"float32\")), lv0: R.Tensor((32, 32), dtype=\"float32\")\n",
       "lv1 = R.call_tir(tir_relu, (lv0,), out_sinfo=R.Tensor((32, 32), dtype=\"float32\")), lv2 = R.call_tir(tir_zeros, R.tuple(), out_sinfo=R.Tensor((32,), dtype=\"float32\"), tir_vars=R.shape([32])), lv1: R.Tensor((32, 32), dtype=\"float32\")\n",
       "lv2: R.Tensor((32,), dtype=\"float32\")\n",
       "gv: R.Tuple(R.Tensor((32, 32), dtype=\"float32\"), R.Tensor((32,), dtype=\"float32\")) = lv1, lv2]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bindings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm.script import relax as R\n",
    "from tvm.script import tir as T\n",
    "from tvm import relax as rx\n",
    "from tvm import relay, tir\n",
    "from tvm.relax.analysis import get_var2val\n",
    "from tvm.relax.dpl import *"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 节点级匹配"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试表达式模式："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "ep = is_expr(rx.Var(\"x\"))\n",
    "assert isinstance(ep, ExprPattern)\n",
    "assert isinstance(ep.expr, rx.Var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试变量模式："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "v = is_var(\"x\")\n",
    "assert isinstance(v, VarPattern)\n",
    "assert v.name == \"x\"\n",
    "assert v.match(rx.Var(\"x\"))\n",
    "assert is_var().match(rx.Var(\"x\"))\n",
    "assert is_var().match(rx.DataflowVar(\"x\"))  # DataflowVar 也是 Var\n",
    "assert not v.match(rx.GlobalVar(\"x\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "v = is_dfv(\"x\")\n",
    "assert isinstance(v, DataflowVarPattern)\n",
    "assert v.name == \"x\"\n",
    "assert v.match(rx.DataflowVar(\"x\"))\n",
    "assert not v.match(rx.GlobalVar(\"x\"))\n",
    "assert is_dfv().match(bindings[0].var)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert is_gv(\"x\").match(rx.GlobalVar(\"x\"))\n",
    "# TODO: 由于与 PyTorch 的符号冲突，正则表达式功能暂时被禁用\n",
    "# assert is_gv(\"x.*\").match(rx.GlobalVar(\"x_2\"))\n",
    "assert is_gv().match(rx.GlobalVar(\"x\"))\n",
    "assert not is_gv(\"x\").match(rx.GlobalVar(\"y\"))\n",
    "assert not is_gv(\"x\").match(rx.Var(\"x\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配常量："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "c = is_const()\n",
    "assert isinstance(c, ConstantPattern)\n",
    "assert c.match(rx.const([[0.1, 1.1, 2.1], [3.1, 4.1, 5.1]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "模糊匹配："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "wc = wildcard()\n",
    "assert isinstance(wc, WildcardPattern)\n",
    "assert wc.match(rx.Var(\"x\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "回调匹配："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "wc1 = wildcard()\n",
    "wc2 = wildcard()\n",
    "c = is_op(\"relax.add\")(wc1, wc2)\n",
    "assert isinstance(c, CallPattern)\n",
    "assert isinstance(c.args[0], WildcardPattern)\n",
    "assert isinstance(c.args[1], WildcardPattern)\n",
    "assert c.match(rx.op.add(rx.Var(\"x\"), rx.Var(\"y\")))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配函数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "wc1 = wildcard()\n",
    "wc2 = wildcard()\n",
    "f = FunctionPattern([wc1, wc2], is_op(\"relax.add\")(wc1, wc2))\n",
    "assert isinstance(f, FunctionPattern)\n",
    "assert isinstance(f.params[0], WildcardPattern)\n",
    "assert isinstance(f.params[1], WildcardPattern)\n",
    "assert isinstance(f.body, CallPattern)\n",
    "assert isinstance(f.body.args[0], WildcardPattern)\n",
    "assert isinstance(f.body.args[1], WildcardPattern)\n",
    "x = rx.Var(\"x\", R.Tensor(\"float32\"))\n",
    "y = rx.Var(\"y\", R.Tensor(\"float32\"))\n",
    "assert f.match(rx.Function([x, y], rx.op.add(x, y), ret_struct_info=R.Tensor(\"float32\")))\n",
    "assert not f.match(\n",
    "    rx.Function([x, y], rx.op.multiply(x, y), ret_struct_info=R.Tensor(\"float32\"))\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "元组匹配："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "wc1 = wildcard()\n",
    "wc2 = is_dfv()\n",
    "t = is_tuple([wc1, wc2])\n",
    "assert isinstance(t, TuplePattern)\n",
    "assert isinstance(t.fields[0], WildcardPattern)\n",
    "assert isinstance(t.fields[1], DataflowVarPattern)\n",
    "assert t.match(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]))\n",
    "assert not t.match(rx.Tuple([rx.DataflowVar(\"x\"), rx.GlobalVar(\"y\")]))\n",
    "assert not t.match(rx.Tuple([]))\n",
    "assert t[0].match(rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 0))\n",
    "assert t[1].match(rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 1))\n",
    "# Negative index is also allowed\n",
    "assert t[-1].match(rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 1))\n",
    "# None means any index.\n",
    "assert t[None].match(rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 0))\n",
    "assert t[None].match(rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 1))\n",
    "import pytest\n",
    "with pytest.raises(IndexError):\n",
    "    t[2]  # index cannot be greater than or equal to the tuple size.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "t = is_tuple([is_const(), is_dfv()], unordered=True)\n",
    "assert isinstance(t, UnorderedTuplePattern)\n",
    "assert isinstance(t.fields[0], ConstantPattern)\n",
    "assert isinstance(t.fields[1], DataflowVarPattern)\n",
    "assert t.match(rx.Tuple([rx.const([]), rx.DataflowVar(\"x\")]))\n",
    "assert t.match(rx.Tuple([rx.DataflowVar(\"x\"), rx.const([])]))\n",
    "assert not t.match(rx.Tuple([rx.DataflowVar(\"x\"), rx.DataflowVar(\"y\")]))\n",
    "assert not t.match(rx.Tuple([]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert is_tuple_get_item(is_tuple([is_gv(\"x\"), is_dfv(\"y\")]), 0).match(\n",
    "    rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 0)\n",
    ")\n",
    "assert is_tuple_get_item(is_tuple([is_gv(\"x\"), is_dfv(\"y\")]), 0).match(\n",
    "    rx.TupleGetItem(rx.Tuple([rx.GlobalVar(\"x\"), rx.DataflowVar(\"y\")]), 0)\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `or`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "dfv_or_gv = is_dfv(\"x\") | is_gv(\"x\")\n",
    "assert isinstance(dfv_or_gv, OrPattern)\n",
    "assert dfv_or_gv.match(rx.DataflowVar(\"x\"))\n",
    "assert dfv_or_gv.match(rx.GlobalVar(\"x\"))\n",
    "assert not dfv_or_gv.match(rx.Var(\"x\"))\n",
    "assert not dfv_or_gv.match(rx.DataflowVar(\"y\"))\n",
    "assert not dfv_or_gv.match(rx.GlobalVar(\"y\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `and`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# float[2, 3, 3]\n",
    "f32_233 = wildcard().has_shape((2, 3, 3)) & has_dtype(\"float32\")\n",
    "assert isinstance(f32_233, AndPattern)\n",
    "assert f32_233.match(rx.Var(\"x\", R.Tensor((2, 3, 3), \"float32\")))\n",
    "assert not f32_233.match(rx.Var(\"x\", R.Tensor((3, 3, 3), \"float32\")))\n",
    "assert not f32_233.match(rx.Var(\"x\", R.Tensor(\"float32\", ndim=3)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `not`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "no_shape233 = ~wildcard().has_shape((2, 3, 3))\n",
    "assert isinstance(no_shape233, NotPattern)\n",
    "assert no_shape233.match(rx.Var(\"x\", R.Tensor((3, 3, 3), \"float32\")))\n",
    "assert not no_shape233.match(rx.Var(\"x\", R.Tensor((2, 3, 3), \"float32\")))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `type`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert wildcard().has_type(rx.DynTensorType(2, \"float32\")).match(bindings[0].var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `dtype`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "dtype = \"float16\"\n",
    "pattern = has_dtype(dtype)\n",
    "assert isinstance(pattern, DataTypePattern)\n",
    "assert pattern.dtype == dtype\n",
    "assert has_dtype(\"float32\").match(bindings[0].var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `shape`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "shape = [32, 32]\n",
    "pattern = wildcard().has_shape(shape)\n",
    "assert isinstance(pattern, ShapePattern)\n",
    "tvm.ir.structural_equal(pattern.shape, shape)\n",
    "assert pattern.match(bindings[0].var)\n",
    "assert wildcard().has_shape([32, 32]).match(bindings[0].var)\n",
    "n, m = tir.Var(\"n\", dtype=\"int64\"), tir.Var(\"m\", dtype=\"int64\")\n",
    "symsh_var = rx.Var(\"x\", R.Tensor([n, m, n + m], \"float32\"))\n",
    "assert wildcard().has_shape([n, m, n + m]).match(symsh_var)\n",
    "assert wildcard().has_shape([n, m, m + n]).match(symsh_var)  # + is commutative.\n",
    "assert not wildcard().has_shape([1, 2, 3]).match(symsh_var)\n",
    "assert not wildcard().has_shape([m, n, n + m]).match(symsh_var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配：PrimArray\n",
    "\n",
    "```{note}\n",
    ":class: margin\n",
    "\n",
    "`is_shape` 和 `has_shape` 的区别在于：\n",
    "\n",
    "1. `is_shape` 直接匹配形状（例如，作为参数）；\n",
    "2. `has_shape` 匹配张量并对该张量的形状做出假设。\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "pattern = is_shape([32, 32])\n",
    "assert pattern[0] == 32\n",
    "assert pattern[1] == 32\n",
    "assert isinstance(pattern, PrimArrPattern)\n",
    "assert pattern.match(rx.get_shape_of(bindings[0].var))\n",
    "n, m = tir.Var(\"n\", dtype=\"int64\"), tir.Var(\"m\", dtype=\"int64\")\n",
    "symbolic_shape = rx.ShapeExpr([n, m, n + m])\n",
    "assert is_shape([n, m, n + m]).match(symbolic_shape)\n",
    "assert not is_shape([n, m, n * m]).match(symbolic_shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配外部函数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "pattern = ExternFuncPattern(\"test.blockbuilder.nop\")\n",
    "assert pattern.match(rx.ExternFunc(\"test.blockbuilder.nop\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配算子属性："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = rx.Var(\"x\", R.Tensor(\"float32\"))\n",
    "y = rx.Var(\"y\", R.Tensor(\"float32\"))\n",
    "conv2d = relay.nn.conv2d(x, y, kernel_size=(3, 3))\n",
    "xp = is_var(\"x\")\n",
    "yp = is_var(\"y\")\n",
    "# TODO(@yuchen): reenable the assert after figuring out why it fails\n",
    "# assert is_op(\"nn.conv2d\")(xp, yp).has_attr({\"kernel_size\": [3, 3]}).match(conv2d)\n",
    "assert not is_op(\"nn.conv2d\")(xp, yp).has_attr({\"kernel_size\": [4, 3]}).match(conv2d)\n",
    "assert not is_op(\"nn.conv2d\")(xp, yp).has_attr({\"kernel_size_\": [3, 3]}).match(conv2d)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 call 属性："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = rx.Var(\"x\", R.Tensor(\"float32\"))\n",
    "y = rx.Var(\"y\", R.Tensor(\"float32\"))\n",
    "fn = rx.Function([x, y], rx.op.add(x, y), ret_struct_info=R.Tensor(\"float32\"))\n",
    "annotated_fn = fn.with_attr({\"Codegen\": \"test-codegen\", \"global_symbol\": \"test-symbol\"})\n",
    "xp = is_var(\"x\")\n",
    "yp = is_var(\"y\")\n",
    "root_pattern = FunctionPattern([xp, yp], is_op(\"relax.add\")(xp, yp))\n",
    "assert root_pattern.has_attr({\"Codegen\": \"test-codegen\", \"global_symbol\": \"test-symbol\"}).match(\n",
    "    annotated_fn\n",
    ")\n",
    "\n",
    "assert root_pattern.has_attr({\"Codegen\": \"test-codegen\"}).match(annotated_fn)\n",
    "assert not root_pattern.has_attr({\"ping\": \"pong\"}).match(annotated_fn)\n",
    "assert root_pattern.has_attr({}).match(annotated_fn)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `is_call_tir`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "lv1_val = bindings[1].value\n",
    "lv2_val = bindings[2].value\n",
    "var2val = get_var2val(Module[\"main\"])\n",
    "assert is_call_tir(\"tir_relu\").match(lv1_val)\n",
    "assert is_call_tir(\"tir_relu\", [is_call_tir(\"tir_matmul\")]).match(lv1_val, var2val=var2val)\n",
    "assert not is_call_tir(\"tir_relu\", [is_call_tir(\"tir_relu\")]).match(lv1_val, var2val=var2val)\n",
    "assert is_call_tir(\"tir_zeros\", wildcard(), wildcard()).match(lv2_val, var2val=var2val)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "匹配 `call_packed`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "@R.function(pure=False)\n",
    "def simple_call_packed(\n",
    "    x: R.Tensor((32, 32), \"float32\"), w: R.Tensor((32, 32), \"float32\")\n",
    ") -> R.Tensor:\n",
    "    gv0 = R.call_packed(\"test.vm.mul\", x, w, sinfo_args=(R.Tensor(ndim=2, dtype=\"float32\")))\n",
    "    return gv0\n",
    "\n",
    "\n",
    "def test_varg_default_wildcard():\n",
    "    expr = simple_call_packed.body.blocks[0].bindings[0].value\n",
    "    yes_pattern_explicit = ExternFuncPattern(\"test.vm.mul\")(wildcard(), wildcard())\n",
    "    yes_pattern_implicit = ExternFuncPattern(\"test.vm.mul\")(varg_default_wildcard=True)\n",
    "    no_pattern = ExternFuncPattern(\"test.vm.mul\")(wildcard())\n",
    "\n",
    "    assert yes_pattern_explicit.match(expr)\n",
    "    assert yes_pattern_implicit.match(expr)\n",
    "    assert not no_pattern.match(expr)\n",
    "\n",
    "\n",
    "def test_simple_call_packed():\n",
    "    expr = simple_call_packed.body.blocks[0].bindings[0].value\n",
    "    assert is_call_packed(\"test.vm.mul\").match(expr)\n",
    "    assert is_call_packed(\"test.vm.mul\", [is_var(\"x\"), is_var(\"w\")]).match(expr)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 图级匹配"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_simple_used_by():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_var(\"x\")  # x is a free var (fn arg)\n",
    "        n1 = wildcard()\n",
    "        n0 ^ n1\n",
    "        dfb = main_fn.body.blocks[0]\n",
    "        matched = ctx.match_dfb(dfb)\n",
    "        assert matched\n",
    "        assert matched[n0] == main_fn.params[0]\n",
    "        assert matched[n1] == dfb.bindings[0].var\n",
    "\n",
    "\n",
    "def test_simple_call_tir_edge():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_tir(\"tir_matmul\")\n",
    "        n1 = is_call_tir(\"tir_relu\")\n",
    "        n0.used_by(n1)\n",
    "        dfb = main_fn.body.blocks[0]\n",
    "        matched = ctx.match_dfb(dfb)\n",
    "        assert matched\n",
    "        assert matched[n0] == dfb.bindings[0].var\n",
    "        assert matched[n1] == dfb.bindings[1].var\n",
    "\n",
    "\n",
    "def test_simple_oub():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_tir(\"tir_matmul\")\n",
    "        n1 = is_call_tir(\"tir_relu\")\n",
    "        n0 >> n1\n",
    "        dfb = main_fn.body.blocks[0]\n",
    "        matched = ctx.match_dfb(dfb)\n",
    "        assert matched\n",
    "        assert matched[n0] == dfb.bindings[0].var\n",
    "        assert matched[n1] == dfb.bindings[1].var\n",
    "\n",
    "\n",
    "def test_counter_syntax_match():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_dps_packed(\"extern_matmul\")\n",
    "        n1 = is_call_dps_packed(\"extern_impossible\")\n",
    "        n0 >> n1\n",
    "        dfb = main_fn.body.blocks[0]\n",
    "        assert not ctx.match_dfb(dfb)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_dps_packed(\"extern_matmul\")\n",
    "        n1 = is_call_dps_packed(\"extern_impossible\")\n",
    "        n0 ^ n1\n",
    "        dfb = main_fn.body.blocks[0]\n",
    "        assert not ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class Diamond:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((32, 32), \"float32\"), w: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "        with R.dataflow():\n",
    "            #   matmul\n",
    "            #  /      \\\n",
    "            # relu  sigmoid\n",
    "            #  \\      /\n",
    "            #    add\n",
    "            lv0 = R.call_dps_packed(\"extern_matmul\", (x, w), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"extern_relu\", (lv0,), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv2 = R.call_dps_packed(\"extern_sigmoid\", (lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv3 = R.call_dps_packed(\"extern_add\", (lv1, lv2), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            R.output(lv3)\n",
    "        return lv3\n",
    "\n",
    "\n",
    "def test_diamond():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_dps_packed(\"extern_matmul\")\n",
    "        n1 = is_call_dps_packed(\"extern_relu\")\n",
    "        n2 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        n3 = is_call_dps_packed(\"extern_add\")\n",
    "\n",
    "        n0 ^ n1\n",
    "        n0 ^ n2\n",
    "        n1 >> n3\n",
    "        n2 >> n3\n",
    "\n",
    "        dfb = Diamond[\"main\"].body.blocks[0]\n",
    "\n",
    "        assert ctx.match_dfb(dfb)\n",
    "    # simplify it with fork_to\n",
    "    with PatternContext() as ctx:\n",
    "        n1 = is_call_dps_packed(\"extern_relu\")\n",
    "        n2 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        n3 = is_call_dps_packed(\"extern_add\")\n",
    "\n",
    "        is_call_dps_packed(\"extern_matmul\").fork_to(n1, n2)\n",
    "        n1 >> n3\n",
    "        n2 >> n3\n",
    "\n",
    "        dfb = Diamond[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "def test_diamond_counter_oub():\n",
    "    with PatternContext() as ctx:\n",
    "        n0 = is_call_dps_packed(\"extern_matmul\")\n",
    "        n1 = is_call_dps_packed(\"extern_relu\")\n",
    "        n2 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        n3 = is_call_dps_packed(\"extern_add\")\n",
    "\n",
    "        n0 >> n1\n",
    "        n0 >> n2\n",
    "        n1 >> n3\n",
    "        n2 >> n3\n",
    "\n",
    "        dfb = Diamond[\"main\"].body.blocks[0]\n",
    "        assert not ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class SmallDiamond:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "        with R.dataflow():\n",
    "            #    relu\n",
    "            #  /      \\\n",
    "            #  \\      /\n",
    "            #    add\n",
    "            lv0 = R.call_dps_packed(\"my_relu\", (x,), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"my_add\", (lv0, lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            R.output(lv1)\n",
    "        return lv1\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class SmallParallel:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "        with R.dataflow():\n",
    "            # relu   relu\n",
    "            #   \\    /\n",
    "            #    add\n",
    "            lv0 = R.call_dps_packed(\"my_relu\", (x,), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"my_relu\", (x,), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv2 = R.call_dps_packed(\"my_add\", (lv0, lv1), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            R.output(lv2)\n",
    "        return lv2\n",
    "\n",
    "\n",
    "def test_distinguish_diamond_and_parallel():\n",
    "    # relay pattern lang cannot distinguish the two cases above.\n",
    "    diamond = SmallDiamond[\"main\"].body.blocks[0]\n",
    "    parallel = SmallParallel[\"main\"].body.blocks[0]\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        # describe a diamond pattern\n",
    "        fork = is_call_dps_packed(\"my_relu\")\n",
    "        join = is_call_dps_packed(\"my_add\")\n",
    "        fork.only_used_by(join, index=0)\n",
    "        fork.only_used_by(join, index=1)\n",
    "\n",
    "        assert ctx.match_dfb(diamond)\n",
    "        assert not ctx.match_dfb(parallel)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        # describe a parallel pattern\n",
    "        join = is_call_dps_packed(\"my_add\")\n",
    "        # Due to one-one matching:\n",
    "        # is_call_dps_packed(\"my_relu\") creates the 1st relu\n",
    "        is_call_dps_packed(\"my_relu\") >> join\n",
    "        # is_call_dps_packed(\"my_relu\")\n",
    "        # creates the another different relu (obj address is different)\n",
    "        is_call_dps_packed(\"my_relu\") >> join\n",
    "\n",
    "        assert ctx.match_dfb(parallel)\n",
    "        assert not ctx.match_dfb(diamond)\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class CBRx2:\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((32, 32), \"float32\"),\n",
    "        w0: R.Tensor((1, 1), \"float32\"),\n",
    "        bias0: R.Tensor((32, 32), \"float32\"),\n",
    "        w1: R.Tensor((1, 1), \"float32\"),\n",
    "        bias1: R.Tensor((32, 32), \"float32\"),\n",
    "    ) -> R.Tensor:\n",
    "        # R.TensorRT's CBR Optimization Pattern\n",
    "        #     input\n",
    "        #     /   \\\n",
    "        #  cbr0   cbr1\n",
    "        #     \\   /\n",
    "        #     concat\n",
    "        with R.dataflow():\n",
    "            lv0 = R.call_dps_packed(\"conv1x1\", (x, w0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"bias_add\", (lv0, bias0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv2 = R.call_dps_packed(\"my_relu\", (lv1), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv3 = R.call_dps_packed(\"conv1x1\", (x, w1), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv4 = R.call_dps_packed(\"bias_add\", (lv3, bias1), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv5 = R.call_dps_packed(\"my_relu\", (lv4), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv6 = R.call_dps_packed(\"concat\", (lv2, lv5), R.Tensor((32, 64), dtype=\"float32\"))\n",
    "            R.output(lv6)\n",
    "        return lv6\n",
    "\n",
    "\n",
    "def test_nested_context():\n",
    "    dfb = CBRx2[\"main\"].body.blocks[0]\n",
    "    with PatternContext() as ctx0:\n",
    "        (\n",
    "            is_call_dps_packed(\"conv1x1\")\n",
    "            >> is_call_dps_packed(\"bias_add\")\n",
    "            >> is_call_dps_packed(\"my_relu\")\n",
    "        )\n",
    "        with PatternContext() as ctx1:\n",
    "            is_call_dps_packed(\"conv1x1\") >> is_call_dps_packed(\"my_relu\")  # pattern to miss\n",
    "            with PatternContext() as ctx2:\n",
    "                is_call_dps_packed(\"bias_add\") >> is_call_dps_packed(\"my_relu\")\n",
    "                assert ctx2.match_dfb(dfb)\n",
    "                assert PatternContext.current() == ctx2\n",
    "            assert not ctx1.match_dfb(dfb)\n",
    "            assert PatternContext.current() == ctx1\n",
    "        assert ctx0.match_dfb(dfb)\n",
    "        assert PatternContext.current() == ctx0\n",
    "\n",
    "\n",
    "def test_two_cbr():\n",
    "    with PatternContext() as ctx:\n",
    "        cbr0 = (\n",
    "            is_call_dps_packed(\"conv1x1\")\n",
    "            >> is_call_dps_packed(\"bias_add\")\n",
    "            >> is_call_dps_packed(\"my_relu\")\n",
    "        )\n",
    "        cbr1 = cbr0.dup()\n",
    "\n",
    "        assert cbr0.patterns[0] != cbr1.patterns[0]\n",
    "        assert cbr0.patterns[1] != cbr1.patterns[1]\n",
    "        assert cbr0.patterns[2] != cbr1.patterns[2]\n",
    "\n",
    "        is_var(\"x\").fork_to(cbr0, cbr1)\n",
    "        dfb = CBRx2[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        # Deny the pattern\n",
    "        cbr0 = (\n",
    "            is_call_dps_packed(\"conv1x1\")\n",
    "            >> is_call_dps_packed(\"bias_add\")\n",
    "            >> is_call_dps_packed(\"my_relu\")\n",
    "        )\n",
    "        cbr1 = cbr0.dup()\n",
    "\n",
    "        # input has no fork at y.\n",
    "        is_var(\"y\").fork_to(cbr0, cbr1)\n",
    "        dfb = CBRx2[\"main\"].body.blocks[0]\n",
    "        assert not ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "def test_two_matmul():\n",
    "    # Same as Figure 2(a) in TASO paper.\n",
    "    @tvm.script.ir_module\n",
    "    class MatMul2:\n",
    "        @R.function\n",
    "        def main(\n",
    "            a: R.Tensor((32, 16), \"float32\"),\n",
    "            b: R.Tensor((16, 48), \"float32\"),\n",
    "            c: R.Tensor((48, 32), \"float32\"),\n",
    "        ) -> R.Tensor:\n",
    "            with R.dataflow():\n",
    "                lv0 = R.call_dps_packed(\"matmul\", (a, b), R.Tensor((32, 48), dtype=\"float32\"))\n",
    "                lv1 = R.call_dps_packed(\"matmul\", (lv0, c), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "                R.output(lv1)\n",
    "            return lv1\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        is_call_dps_packed(\"matmul\") >> is_call_dps_packed(\"matmul\")\n",
    "        dfb = MatMul2[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        is_call_dps_packed(\"matmul\").has_shape([32, 48]) >> is_call_dps_packed(\"matmul\").has_shape(\n",
    "            [32, 32]\n",
    "        )\n",
    "        dfb = MatMul2[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        is_call_dps_packed(\"matmul\") >> is_call_dps_packed(\"matmul\") >> is_call_dps_packed(\"matmul\")\n",
    "        dfb = MatMul2[\"main\"].body.blocks[0]\n",
    "        # Three MatMul cannot match\n",
    "        assert not ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "def test_concat_mm_split():\n",
    "    # Same as Figure 2(b) in TASO paper.\n",
    "    @tvm.script.ir_module\n",
    "    class CMS:\n",
    "        @R.function\n",
    "        def main(\n",
    "            a: R.Tensor((32, 32), \"float32\"),\n",
    "            b: R.Tensor((16, 32), \"float32\"),\n",
    "            c: R.Tensor((16, 32), \"float32\"),\n",
    "        ) -> R.Tensor:\n",
    "            with R.dataflow():\n",
    "                lv0 = R.call_dps_packed(\"my_concat\", (b, c), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "                lv1 = R.call_dps_packed(\"my_matmul\", (a, lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "                lv2 = R.call_dps_packed(\n",
    "                    \"my_split\",\n",
    "                    (lv1,),\n",
    "                    [R.Tensor((16, 32), dtype=\"float32\"), R.Tensor((16, 32), dtype=\"float32\")],\n",
    "                )\n",
    "                lv3 = R.TupleGetItem(lv2, 0)\n",
    "                lv4 = R.TupleGetItem(lv2, 1)\n",
    "                lv5 = R.add(lv3, lv4)\n",
    "                R.output(lv5)\n",
    "            return lv5\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        (\n",
    "            is_call_dps_packed(\"my_concat\")\n",
    "            >> is_call_dps_packed(\"my_matmul\")\n",
    "            >> is_call_dps_packed(\"my_split\")\n",
    "        )\n",
    "        dfb = CMS[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        split = is_call_dps_packed(\"my_split\")\n",
    "        lv3 = TupleGetItemPattern(split, 0).has_shape([16, 32])\n",
    "        lv4 = TupleGetItemPattern(split, 1).has_shape([16, 32])\n",
    "        split.fork_to(lv3, lv4)\n",
    "        add = is_op(\"relax.add\")(lv3, lv4)\n",
    "        # TODO(@ganler): simplify this through implicit graph pattern.\n",
    "        lv3 >> add\n",
    "        lv4 >> add\n",
    "\n",
    "        dfb = CMS[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "def test_self_attention():\n",
    "    # The example comes from.\n",
    "    # https://developer.nvidia.com/blog/nlu-with-tensorrt-bert/\n",
    "    @tvm.script.ir_module\n",
    "    class SelfAttention:\n",
    "        @R.function\n",
    "        def main(\n",
    "            x: R.Tensor((\"b\", \"s\", \"n\", \"h\"), \"float32\"),\n",
    "            wq: R.Tensor((\"h\", \"h\"), \"float32\"),\n",
    "            wk: R.Tensor((\"h\", \"h\"), \"float32\"),\n",
    "            wv: R.Tensor((\"h\", \"h\"), \"float32\"),\n",
    "        ) -> R.Tensor:\n",
    "            b, s, n, h = T.int64(), T.int64(), T.int64(), T.int64()\n",
    "            with R.dataflow():\n",
    "                fcq = R.call_dps_packed(\"my_fc\", (x, wq), R.Tensor((b, s, n, h), dtype=\"float32\"))\n",
    "                tpq = R.call_dps_packed(\n",
    "                    \"my_transpose\", (fcq,), R.Tensor((b, s, h, n), dtype=\"float32\")\n",
    "                )\n",
    "\n",
    "                fck = R.call_dps_packed(\"my_fc\", (x, wk), R.Tensor((b, s, n, h), dtype=\"float32\"))\n",
    "                tpk = R.call_dps_packed(\n",
    "                    \"my_transpose\", (fck,), R.Tensor((b, s, h, n), dtype=\"float32\")\n",
    "                )\n",
    "\n",
    "                mul = R.multiply(tpq, tpk)\n",
    "                scale = R.multiply(mul, R.const(1.1, \"float32\"))\n",
    "                softmax = R.call_dps_packed(\n",
    "                    \"softmax\", (scale,), R.Tensor((b, s, n, h), dtype=\"float32\")\n",
    "                )\n",
    "\n",
    "                fcv = R.call_dps_packed(\"my_fc\", (x, wv), R.Tensor((b, s, n, h), dtype=\"float32\"))\n",
    "                tpv = R.call_dps_packed(\n",
    "                    \"my_transpose\", (fcv,), R.Tensor((b, s, h, n), dtype=\"float32\")\n",
    "                )\n",
    "\n",
    "                out = R.multiply(softmax, tpv)\n",
    "                R.output(out)\n",
    "\n",
    "            return out\n",
    "\n",
    "    with PatternContext() as ctx:\n",
    "        fc_trans_q = is_call_dps_packed(\"my_fc\") >> is_call_dps_packed(\"my_transpose\")\n",
    "        fc_trans_k = fc_trans_q.dup()\n",
    "        fc_trans_v = fc_trans_q.dup()\n",
    "\n",
    "        is_var(\"x\").fork_to(fc_trans_q, fc_trans_k, fc_trans_v)\n",
    "        dfb = SelfAttention[\"main\"].body.blocks[0]\n",
    "        assert ctx.match_dfb(dfb)\n",
    "\n",
    "\n",
    "def test_nested_diamond():\n",
    "    @tvm.script.ir_module\n",
    "    class DiamondInDiamond:\n",
    "        @R.function\n",
    "        def main(x: R.Tensor((32, 32), \"float32\"), w: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "            with R.dataflow():\n",
    "                #   matmul0      matmul1\n",
    "                #     /    \\    /    \\\n",
    "                # sigmoid2  add4  sigmoid3\n",
    "                #     \\    /    \\    /\n",
    "                #      add5      add6\n",
    "                #          \\    /\n",
    "                #           add7\n",
    "                lv0 = R.call_dps_packed(\n",
    "                    \"extern_matmul\", (x, w), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv1 = R.call_dps_packed(\n",
    "                    \"extern_matmul\", (x, w), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv2 = R.call_dps_packed(\n",
    "                    \"extern_sigmoid\", (lv0), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv3 = R.call_dps_packed(\n",
    "                    \"extern_sigmoid\", (lv1), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv4 = R.call_dps_packed(\n",
    "                    \"extern_add\", (lv0, lv1), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv5 = R.call_dps_packed(\n",
    "                    \"extern_add\", (lv2, lv4), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv6 = R.call_dps_packed(\n",
    "                    \"extern_add\", (lv3, lv4), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                lv7 = R.call_dps_packed(\n",
    "                    \"extern_add\", (lv5, lv6), R.Tensor((32, 32), dtype=\"float32\")\n",
    "                )\n",
    "                R.output(lv7)\n",
    "            return lv7\n",
    "\n",
    "    # match matmul0 diamond\n",
    "    with PatternContext() as ctx:\n",
    "        sigmoid2 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        add4 = is_call_dps_packed(\"extern_add\")\n",
    "        is_call_dps_packed(\"extern_matmul\").fork_to(sigmoid2, add4)\n",
    "        add5 = is_call_dps_packed(\"extern_add\")\n",
    "        sigmoid2 >> add5\n",
    "        add4 ^ add5\n",
    "        assert ctx.match_dfb(DiamondInDiamond[\"main\"].body.blocks[0])\n",
    "\n",
    "    # counter case: mis-match matmul0 diamond\n",
    "    with PatternContext() as ctx:\n",
    "        sigmoid2 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        add4 = is_call_dps_packed(\"extern_add\")\n",
    "        is_call_dps_packed(\"extern_matmul\").fork_to(sigmoid2, add4)\n",
    "        add5 = is_call_dps_packed(\"extern_add\")\n",
    "        sigmoid2 >> add5\n",
    "        add4 >> add5  # not only-used-by relation\n",
    "        assert not ctx.match_dfb(DiamondInDiamond[\"main\"].body.blocks[0])\n",
    "\n",
    "    # match matmul1 diamond\n",
    "    with PatternContext() as ctx:\n",
    "        sigmoid3 = is_call_dps_packed(\"extern_sigmoid\")\n",
    "        add4 = is_call_dps_packed(\"extern_add\")\n",
    "        is_call_dps_packed(\"extern_matmul\").fork_to(sigmoid3, add4)\n",
    "        add6 = is_call_dps_packed(\"extern_add\")\n",
    "        sigmoid3 >> add6\n",
    "        add4 ^ add6\n",
    "        assert ctx.match_dfb(DiamondInDiamond[\"main\"].body.blocks[0])\n",
    "\n",
    "    # match add-4-5-6-7\n",
    "    with PatternContext() as ctx:\n",
    "        add5, add6, add7 = (\n",
    "            is_call_dps_packed(\"extern_add\"),\n",
    "            is_call_dps_packed(\"extern_add\"),\n",
    "            is_call_dps_packed(\"extern_add\"),\n",
    "        )\n",
    "        is_call_dps_packed(\"extern_add\").fork_to(add5, add6)  # add4\n",
    "        add5 >> add7\n",
    "        add6 >> add7\n",
    "        assert ctx.match_dfb(DiamondInDiamond[\"main\"].body.blocks[0])\n",
    "\n",
    "\n",
    "def test_incremental_solving():\n",
    "    @R.function\n",
    "    def simple_chain(x: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "        with R.dataflow():\n",
    "            # relu -> sigmoid -> neg\n",
    "            lv0 = R.call_dps_packed(\"extern_relu\", (x), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"extern_sigmoid\", (lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv2 = R.call_dps_packed(\"extern_neg\", (lv1), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            R.output(lv2)\n",
    "        return lv2\n",
    "\n",
    "    relu = is_call_dps_packed(\"extern_relu\")\n",
    "    sigmoid = is_call_dps_packed(\"extern_sigmoid\")\n",
    "    neg = is_call_dps_packed(\"extern_neg\")\n",
    "\n",
    "    with PatternContext() as ctx0:\n",
    "        relu >> sigmoid\n",
    "        with PatternContext(incremental=True) as ctx1:\n",
    "            # because we are doing incremental solving\n",
    "            # relu >> sigmoid is still a constraint in this context.\n",
    "            # that said the total constraint is:\n",
    "            # relu >> sigmoid >> neg\n",
    "            sigmoid >> neg\n",
    "            assert ctx1.match_dfb(simple_chain.body.blocks[0])\n",
    "\n",
    "        # match relue -> sigmoid\n",
    "        assert ctx0.match_dfb(simple_chain.body.blocks[0])\n",
    "\n",
    "\n",
    "def test_incremental_solving_counter():\n",
    "    @R.function\n",
    "    def simple_chain(x: R.Tensor((32, 32), \"float32\")) -> R.Tensor:\n",
    "        with R.dataflow():\n",
    "            # sigmoid -> neg\n",
    "            lv0 = R.call_dps_packed(\"extern_sigmoid\", (x), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            lv1 = R.call_dps_packed(\"extern_neg\", (lv0), R.Tensor((32, 32), dtype=\"float32\"))\n",
    "            R.output(lv1)\n",
    "        return lv1\n",
    "\n",
    "    relu = is_call_dps_packed(\"extern_relu\")\n",
    "    sigmoid = is_call_dps_packed(\"extern_sigmoid\")\n",
    "    neg = is_call_dps_packed(\"extern_neg\")\n",
    "\n",
    "    with PatternContext() as ctx0:\n",
    "        relu >> sigmoid  # cannot match\n",
    "\n",
    "        with PatternContext(incremental=False) as ctx1:\n",
    "            # total constraint: sigmoid >> neg\n",
    "            sigmoid >> neg\n",
    "            assert ctx1.match_dfb(simple_chain.body.blocks[0])\n",
    "\n",
    "        with PatternContext(incremental=True) as ctx1:\n",
    "            # total constraint: relu >> sigmoid >> neg\n",
    "            sigmoid >> neg\n",
    "            assert not ctx1.match_dfb(simple_chain.body.blocks[0])\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
