{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "2d573ca5",
   "metadata": {},
   "source": [
    "# Conv2D 测试"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f658c822",
   "metadata": {},
   "source": [
    "定义前端网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b5fe7840",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "\n",
    "class M(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.conv2d = torch.nn.Conv2d(3, 16, 3, 1, 1, bias=False)\n",
    "        self.relu = torch.nn.ReLU()\n",
    "        self.conv2d2 = torch.nn.Conv2d(16, 16, 3, 1, 1, bias=True)\n",
    "        self.relu2 = torch.nn.ReLU()\n",
    "        self.conv2d3 = torch.nn.Conv2d(16, 16, 3, 1, 1, bias=True)\n",
    "        self.conv2d4 = torch.nn.Conv2d(16, 16, 3, 1, 1, bias=True)\n",
    "    def forward(self, x):\n",
    "        x = self.conv2d(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.conv2d2(x)\n",
    "        x = self.relu2(x)\n",
    "        x = self.conv2d3(x)\n",
    "        x = self.conv2d4(x)\n",
    "        return x\n",
    "input_shape = [1, 3, 32, 32]\n",
    "torch_model = M().eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3a6dac03",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "from tvm import relax\n",
    "from tvm.relax.frontend.torch import from_exported_program\n",
    "from torch.export import export\n",
    "\n",
    "# Give an example argument to torch.export\n",
    "example_args = (torch.randn(1, 3, 32, 32, dtype=torch.float32),)\n",
    "\n",
    "# Convert the model to IRModule\n",
    "with torch.no_grad():\n",
    "    exported_program = export(torch_model, example_args)\n",
    "    run_mod = from_exported_program(exported_program, keep_params_as_input=False)\n",
    "# run_mod, params = relax.frontend.detach_params(run_mod)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1de21295",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm.ir import IRModule\n",
    "from tvm.relax import transform\n",
    "from tvm.relax.dpl.pattern import (\n",
    "    DFPattern,\n",
    "    wildcard,\n",
    "    is_op,\n",
    ")\n",
    "from tvm.relax.transform import PatternCheckContext\n",
    "from tvm.relax.backend.patterns import (\n",
    "    make_conv2d_pattern, \n",
    "    # make_attention_pattern,\n",
    "    # make_stacked_attention_pattern,\n",
    "    # make_layer_norm_pattern,\n",
    "    # make_rms_norm_pattern,\n",
    "    # make_matmul_dequantize_pattern,\n",
    "    # make_matmul_multiply_pattern,\n",
    "    # make_attention_rewrite_pattern,\n",
    "    # make_fused_bias_activation_pattern,\n",
    "    # make_residual_block_pattern,\n",
    ")\n",
    "from tvm.relax.backend.pattern_registry import get_patterns_with_prefix, register_patterns\n",
    "from tvm.relax import expr as _expr\n",
    "def _check_conv2d(context: PatternCheckContext) -> bool:\n",
    "    # lhs = context.annotated_expr[\"lhs\"]\n",
    "    # rhs = context.annotated_expr[\"rhs\"]\n",
    "    # expr = context.annotated_expr[\"root\"]\n",
    "    # assert isinstance(lhs, relax.expr.Var) and lhs.name_hint == \"data\"\n",
    "    # assert isinstance(rhs, relax.expr.Var) and rhs.name_hint == \"weight1\"\n",
    "    # assert isinstance(expr, relax.expr.Call) and expr.op.name == \"relax.nn.conv2d\"\n",
    "    # return False\n",
    "    return True\n",
    "\n",
    "register_patterns(\n",
    "    [\n",
    "        (\n",
    "            \"vta.conv2d\",\n",
    "            *make_conv2d_pattern(\n",
    "                with_bias=False,\n",
    "            ),\n",
    "            _check_conv2d,\n",
    "        ),\n",
    "        (\n",
    "            \"vta.conv2d_bias\",\n",
    "            *make_conv2d_pattern(\n",
    "                with_bias=True,\n",
    "            ),\n",
    "            _check_conv2d,\n",
    "        ),\n",
    "        (\n",
    "            \"vta.conv2d_bias_relu\",\n",
    "            *make_conv2d_pattern(\n",
    "                with_bias=True,\n",
    "                activation=\"relax.nn.relu\",\n",
    "            ),\n",
    "            _check_conv2d,\n",
    "        ),\n",
    "        (\n",
    "            \"vta.conv2d_relu\",\n",
    "            *make_conv2d_pattern(\n",
    "                with_bias=False,\n",
    "                activation=\"relax.nn.relu\",\n",
    "            ),\n",
    "            _check_conv2d,\n",
    "        ),\n",
    "        # (\n",
    "        #     \"vta.attention.BS3NH\",\n",
    "        #     *make_stacked_attention_pattern(start_op=\"split\", layout=\"BS3NH\"),\n",
    "        #     partial(_check_stacked_attention, layout=\"BS3NH\"),\n",
    "        # ),\n",
    "        # (\n",
    "        #     \"vta.attention.SBN3H\",\n",
    "        #     *make_stacked_attention_pattern(start_op=\"split\", layout=\"SBN3H\"),\n",
    "        #     partial(_check_stacked_attention, layout=\"SBN3H\"),\n",
    "        # ),\n",
    "    ]\n",
    ")\n",
    "patterns = get_patterns_with_prefix(\"vta\")\n",
    "seq = tvm.transform.Sequential(\n",
    "    [\n",
    "        # transform.LegalizeOps(enable_warning=enable_warning),\n",
    "        # transform.RewriteDataflowReshape(),\n",
    "        transform.AnnotateTIROpPattern(),\n",
    "        transform.FoldConstant(),\n",
    "        transform.FuseOps(),\n",
    "        transform.FuseTIR(),\n",
    "        transform.FuseOpsByPattern(patterns, bind_constants=False, annotate_codegen=False)\n",
    "    ]\n",
    ")\n",
    "mod = seq(run_mod)\n",
    "mod = (mod)\n",
    "# mod = MergeCompositeFunctions()(mod)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f3a80e5f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">fused_relax_nn_conv2d_relax_add</span>(lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_0: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;Composite&quot;</span>: <span style=\"color: #BA2121\">&quot;vta.conv2d_bias&quot;</span>, <span style=\"color: #BA2121\">&quot;Primitive&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv6: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>conv2d(lv, param_0, strides<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], dilation<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], groups<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000\">1</span>, data_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, kernel_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;OIHW&quot;</span>, out_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, out_dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>add(lv6, param_1)\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">fused_relax_nn_conv2d_relax_add_relax_nn_relu</span>(lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_0: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;Composite&quot;</span>: <span style=\"color: #BA2121\">&quot;vta.conv2d_bias_relu&quot;</span>, <span style=\"color: #BA2121\">&quot;Primitive&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv2: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>conv2d(lv, param_0, strides<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], dilation<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], groups<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000\">1</span>, data_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, kernel_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;OIHW&quot;</span>, out_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, out_dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)\n",
       "            lv4: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>add(lv2, param_1)\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>relu(lv4)\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">fused_relax_nn_conv2d_relax_nn_relu</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), param_0: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #A2F; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;Composite&quot;</span>: <span style=\"color: #BA2121\">&quot;vta.conv2d_relu&quot;</span>, <span style=\"color: #BA2121\">&quot;Primitive&quot;</span>: <span style=\"color: #008000; font-weight: bold\">True</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>conv2d(x, param_0, strides<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], padding<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], dilation<span style=\"color: #A2F; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>], groups<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000\">1</span>, data_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, kernel_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;OIHW&quot;</span>, out_layout<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;NCHW&quot;</span>, out_dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>relu(lv)\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tuple(R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)):\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>dataflow():\n",
       "            lv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>fused_relax_nn_conv2d_relax_nn_relu(x, metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">0</span>])\n",
       "            lv_1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>fused_relax_nn_conv2d_relax_add_relax_nn_relu(lv, metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">1</span>], metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">2</span>])\n",
       "            lv_2: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>fused_relax_nn_conv2d_relax_add(lv_1, metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">3</span>], metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">4</span>])\n",
       "            lv1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>fused_relax_nn_conv2d_relax_add(lv_2, metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">5</span>], metadata[<span style=\"color: #BA2121\">&quot;relax.expr.Constant&quot;</span>][<span style=\"color: #008000\">6</span>])\n",
       "            gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tuple(R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">16</span>, <span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">=</span> (lv1,)\n",
       "            R<span style=\"color: #A2F; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "<span style=\"color: #007979; font-style: italic\"># Metadata omitted. Use show_meta=True in script() method to show it.</span>\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "122f70c0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"Composite\": \"vta.conv2d_relu\", \"Primitive\": True, \"op_pattern\": 2} False\n",
      "{\"Composite\": \"vta.conv2d_bias_relu\", \"Primitive\": True, \"op_pattern\": 2} False\n",
      "{\"Composite\": \"vta.conv2d_bias\", \"Primitive\": True, \"op_pattern\": 2} False\n",
      "{\"Composite\": \"vta.conv2d_bias\", \"Primitive\": True, \"op_pattern\": 2} False\n"
     ]
    }
   ],
   "source": [
    "def fvisit(expr: tvm.relax.Expr):\n",
    "    # print(type(expr), expr)\n",
    "    if isinstance(expr, _expr.GlobalVar):\n",
    "        func = mod[expr]\n",
    "        func = func.with_attr(\"op_pattern\", 2)\n",
    "        print(func.attrs, isinstance(func, tvm.tir.PrimExpr))\n",
    "    # if isinstance(expr, _expr.Call):\n",
    "    #     # print(expr.attrs)\n",
    "    #     print(expr.op)\n",
    "expr = mod[\"main\"]\n",
    "relax.analysis.post_order_visit(expr, fvisit)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e97edac",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4926b827",
   "metadata": {},
   "outputs": [],
   "source": [
    "# op = tvm.ir.Op.get(\"relax.add\")\n",
    "# op.set_attr(\"op_pattern\", OpPatternKind.kElemWise)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1efa3a59",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c148a43",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75469faf",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1122f120",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
