{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "53598b9c",
   "metadata": {},
   "source": [
    "# exporter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "60220063",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import tvm.testing\n",
    "\n",
    "from tvm import relax, tir\n",
    "from tvm.ir import assert_structural_equal\n",
    "from tvm.relax.frontend import nn\n",
    "from tvm.script import ir as I, relax as R, tir as T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5632d497",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            relu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>relu(x)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> relu\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "\"\"\"The nn.modules.* may be exported from nn.Module to Relax\"\"\"\n",
    "\n",
    "slm_mod = nn.modules.ReLU()\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\"forward\": {\"x\": nn.spec.Tensor((3, 3), \"float32\")}},\n",
    "    debug=False,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e8888c56",
   "metadata": {},
   "source": [
    "A user can define their own nn.Module subclasses\n",
    "\n",
    "Like the built-in subclasses, these can be exported from nn.Module\n",
    "to Relax."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "79ec09eb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            relu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>relu(x)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> relu\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class Before(nn.Module):\n",
    "    def forward(self, x: R.Tensor):\n",
    "        return nn.op.relu(x)\n",
    "\n",
    "slm_mod = Before()\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\"forward\": {\"x\": nn.spec.Tensor((3, 3), \"float32\")}},\n",
    "    debug=False,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "17de6368",
   "metadata": {},
   "source": [
    "Passing debug=True provides an argument for IO effects"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9033e15b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">_initialize_effect</span>() <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object):\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>null_value()\n",
       "            lv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (_io,)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> lv\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">2</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            relu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>relu(x)\n",
       "            gv1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">3</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)) <span style=\"color: #AA22FF; font-weight: bold\">=</span> relu, (_io,)\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv1)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "slm_mod = nn.modules.ReLU()\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\"forward\": {\"x\": nn.spec.Tensor((3, 3), \"float32\")}},\n",
    "    debug=True,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "400e32c1",
   "metadata": {},
   "source": [
    "An argument may have a dynamic shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "04d8afc8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>int64()\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            relu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>relu(x)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> relu\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "slm_mod = nn.modules.ReLU()\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\"forward\": {\"x\": nn.spec.Tensor([tir.Var(\"batch_size\", \"int64\"), 8], \"float32\")}},\n",
    "    debug=False,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ab718da",
   "metadata": {},
   "source": [
    "A dynamic shape may be used in multiple functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "6f611c51",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward_relu</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>int64()\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            relu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>relu(x)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> relu\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward_silu</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>int64()\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            silu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>silu(x)\n",
       "            gv1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">8</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> silu\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv1)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class Before(nn.Module):\n",
    "    def forward_relu(self, x: nn.Tensor):\n",
    "        return nn.relu(x)\n",
    "\n",
    "    def forward_silu(self, x: nn.Tensor):\n",
    "        return nn.silu(x)\n",
    "\n",
    "slm_mod = Before()\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\n",
    "        \"forward_relu\": {\"x\": nn.spec.Tensor((tir.Var(\"batch_size\", \"int64\"), 8), \"float32\")},\n",
    "        \"forward_silu\": {\"x\": nn.spec.Tensor((tir.Var(\"batch_size\", \"int64\"), 8), \"float32\")},\n",
    "    },\n",
    "    debug=False,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5103e9b1",
   "metadata": {},
   "source": [
    "nn.Module instances may contain other nn.Module\n",
    "\n",
    "When exporting to a Relax IRModule, all `nn.Parameter` instances\n",
    "within the `nn.Module` become Relax function parameters.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d08f61b8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">forward</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>), gate_proj_weight: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">11008</span>, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>), up_proj_weight: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">11008</span>, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>), down_proj_weight: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4096</span>, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>)) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>):\n",
       "        batch_size <span style=\"color: #AA22FF; font-weight: bold\">=</span> T<span style=\"color: #AA22FF; font-weight: bold\">.</span>int64()\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">1</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            permute_dims: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4096</span>, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>permute_dims(gate_proj_weight, axes<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>)\n",
       "            matmul: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>matmul(x, permute_dims, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;void&quot;</span>)\n",
       "            permute_dims1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">4096</span>, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>permute_dims(up_proj_weight, axes<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>)\n",
       "            matmul1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>matmul(x, permute_dims1, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;void&quot;</span>)\n",
       "            silu: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>nn<span style=\"color: #AA22FF; font-weight: bold\">.</span>silu(matmul)\n",
       "            mul: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">11008</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>multiply(silu, matmul1)\n",
       "            permute_dims2: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">11008</span>, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>permute_dims(down_proj_weight, axes<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">None</span>)\n",
       "            matmul2: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>matmul(mul, permute_dims2, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;void&quot;</span>)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">4096</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float16&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> matmul2\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class LlamaMLP(nn.Module):\n",
    "    def __init__(self, hidden_size: int, intermediate_size: int):\n",
    "        super().__init__()\n",
    "        self.gate_proj = nn.Linear(\n",
    "            in_features=hidden_size,\n",
    "            out_features=intermediate_size,\n",
    "            dtype=\"float16\",\n",
    "            bias=False,\n",
    "        )\n",
    "        self.up_proj = nn.Linear(\n",
    "            in_features=hidden_size,\n",
    "            out_features=intermediate_size,\n",
    "            dtype=\"float16\",\n",
    "            bias=False,\n",
    "        )\n",
    "        self.down_proj = nn.Linear(\n",
    "            intermediate_size,\n",
    "            hidden_size,\n",
    "            dtype=\"float16\",\n",
    "            bias=False,\n",
    "        )\n",
    "\n",
    "    def forward(self, x: nn.Tensor):\n",
    "        gate = self.gate_proj(x)\n",
    "        up = self.up_proj(x)\n",
    "        return self.down_proj(nn.op.silu(gate) * up)\n",
    "\n",
    "hidden_size = 4096\n",
    "intermediate_size = 11008\n",
    "slm_mod = LlamaMLP(hidden_size=hidden_size, intermediate_size=intermediate_size)\n",
    "exported_mod, _ = slm_mod.export_tvm(\n",
    "    spec={\n",
    "        \"forward\": {\n",
    "            \"x\": nn.spec.Tensor((tir.Var(\"batch_size\", \"int64\"), hidden_size), \"float16\")\n",
    "        },\n",
    "    },\n",
    "    debug=False,\n",
    ")\n",
    "exported_mod.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "89b2c6fe",
   "metadata": {},
   "outputs": [],
   "source": [
    "@pytest.mark.xfail(reason=\"Not yet supported.  See revert https://github.com/apache/tvm/pull/16777\")\n",
    "def test_generate_parameters():\n",
    "    \"\"\"Weights may be expressions in terms of other parameters\n",
    "\n",
    "    Optimizations often require preprocessing of the model weights.\n",
    "\n",
    "    1. Declare the `nn.Module` members that contain the original model\n",
    "       weights.  These are used to define the parameter names when\n",
    "       reading from a Pytorch or Safetensors file.\n",
    "\n",
    "    2. Declare the `nn.Module` members, with the `weight` field\n",
    "       in terms of the un-optimized weights.  These `nn.Module`\n",
    "       do not generate any parameters in the Relax function.\n",
    "\n",
    "    3. Define the `forward` function in terms of the `nn.Module`\n",
    "       members for the updated weight tensors.\n",
    "\n",
    "    The exported Relax function accepts the original model parameters,\n",
    "    computes the pre-processed weights, and then performs computations\n",
    "    using the pre-processed weights.\n",
    "\n",
    "    In this example, the `LiftTransformParams` transform is applied\n",
    "    immediately, splitting the Relax function into a pre-processing\n",
    "    step and an execution step.  In practice, this transform would be\n",
    "    applied much later in an optimization pipeline, to allow optimized\n",
    "    compute kernels to be recognized.  For example, in some cases\n",
    "    `R.matmul(x, R.permute_dims(weight))` may be computed more\n",
    "    efficiently than `R.matmul(x, weight_transpose)`.  For this\n",
    "    reason, we do *not* apply `LiftTransformParams` as part of the\n",
    "    export from `nn.Module` to Relax.\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "    class LlamaMLP(nn.Module):\n",
    "        def __init__(self, hidden_size: int, intermediate_size: int):\n",
    "            super().__init__()\n",
    "            # The nn.Linear for the original parameters are present in\n",
    "            # the model definition, and are still found when\n",
    "            # collecting a function's parameters.\n",
    "            self.gate_proj = nn.Linear(\n",
    "                in_features=hidden_size,\n",
    "                out_features=intermediate_size,\n",
    "                dtype=\"float16\",\n",
    "                bias=False,\n",
    "            )\n",
    "            self.up_proj = nn.Linear(\n",
    "                in_features=hidden_size,\n",
    "                out_features=intermediate_size,\n",
    "                dtype=\"float16\",\n",
    "                bias=False,\n",
    "            )\n",
    "            self.down_proj = nn.Linear(\n",
    "                intermediate_size,\n",
    "                hidden_size,\n",
    "                dtype=\"float16\",\n",
    "                bias=False,\n",
    "            )\n",
    "\n",
    "            # At runtime, we'd like to have a single concatenated\n",
    "            # tensor containing both the gate and up projection\n",
    "            # weights.  We also want to use it in the `forward`\n",
    "            # function as if it owned its own weights.\n",
    "            self.gate_up_proj = nn.Linear(\n",
    "                in_features=hidden_size,\n",
    "                out_features=intermediate_size,\n",
    "                dtype=\"float16\",\n",
    "                bias=False,\n",
    "            )\n",
    "\n",
    "            # The weight tensor of `gate_up_proj` can be overwritten\n",
    "            # in terms of the original `gate_proj` and `up_proj`\n",
    "            # tensors.\n",
    "            self.gate_up_proj.weight = nn.op.concat(\n",
    "                [self.gate_proj.weight, self.up_proj.weight], dim=0, name=\"gate_up_proj_weights\"\n",
    "            )\n",
    "\n",
    "        def forward(self, x: nn.Tensor):\n",
    "            # Even though the `gate_up_proj` weights are defined as an\n",
    "            # expression rather than a `nn.Parameter`, the `forward`\n",
    "            # function does not require any special handling for it.\n",
    "            concat_gate_up = self.gate_up_proj(x)\n",
    "            gate, up = nn.op.split(concat_gate_up, 2, axis=-1)\n",
    "            return self.down_proj(nn.op.silu(gate) * up)\n",
    "\n",
    "    hidden_size = 4096\n",
    "    intermediate_size = 11008\n",
    "    slm_mod = LlamaMLP(hidden_size=hidden_size, intermediate_size=intermediate_size)\n",
    "    exported_mod, _ = slm_mod.export_tvm(\n",
    "        spec={\n",
    "            \"forward\": {\n",
    "                \"x\": nn.spec.Tensor((tir.Var(\"batch_size\", \"int64\"), hidden_size), \"float16\")\n",
    "            },\n",
    "        },\n",
    "        debug=False,\n",
    "    )\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @R.function\n",
    "        def forward(\n",
    "            x: R.Tensor([\"batch_size\", hidden_size], \"float16\"),\n",
    "            # The function's parameters are defined by the\n",
    "            # `nn.Parameter` instances, and still reference the\n",
    "            # original `gate_proj` and `up_proj` weights.  This\n",
    "            # maintains compatibility with named model weights in a\n",
    "            # Pytorch or Safetensors file.\n",
    "            gate_proj_weights: R.Tensor([intermediate_size, hidden_size], \"float16\"),\n",
    "            up_proj_weights: R.Tensor([intermediate_size, hidden_size], \"float16\"),\n",
    "            down_proj_weights: R.Tensor([hidden_size, intermediate_size], \"float16\"),\n",
    "        ):\n",
    "            R.func_attr({\"num_input\": 1})\n",
    "            batch_size = T.int64()\n",
    "            with R.dataflow():\n",
    "                # At this stage of compilation, the concatenation is\n",
    "                # written within the body of the function.  This will\n",
    "                # later be extracted into a pre-processing step using\n",
    "                # `relax.transform.LiftTransformParams`.\n",
    "                gate_up_proj_weights: R.Tensor(\n",
    "                    [intermediate_size * 2, hidden_size], \"float16\"\n",
    "                ) = R.concat([gate_proj_weights, up_proj_weights], axis=0)\n",
    "                gate_up: R.Tensor([batch_size, intermediate_size * 2], \"float16\") = R.matmul(\n",
    "                    x, R.permute_dims(gate_up_proj_weights)\n",
    "                )\n",
    "                gate_up_split = R.split(gate_up, 2, axis=-1)\n",
    "                gate = gate_up_split[0]\n",
    "                up = gate_up_split[1]\n",
    "                down: R.Tensor([batch_size, hidden_size], \"float16\") = R.matmul(\n",
    "                    R.nn.silu(gate) * up, R.permute_dims(down_proj_weights)\n",
    "                )\n",
    "                R.output(down)\n",
    "            return down\n",
    "\n",
    "    assert_structural_equal(exported_mod, Expected)\n",
    "\n",
    "    @I.ir_module\n",
    "    class ExpectedAfterLift:\n",
    "        @R.function\n",
    "        def forward(\n",
    "            x: R.Tensor([\"batch_size\", hidden_size], \"float16\"),\n",
    "            # After `relax.transform.LiftTransformParams`, the\n",
    "            # `gate_proj` and `up_proj` weights have been concatenated\n",
    "            # together.\n",
    "            gate_up_proj_weights_transpose: R.Tensor(\n",
    "                [hidden_size, intermediate_size * 2], \"float16\"\n",
    "            ),\n",
    "            down_proj_weights_transpose: R.Tensor([intermediate_size, hidden_size], \"float16\"),\n",
    "        ):\n",
    "            R.func_attr({\"num_input\": 1})\n",
    "            batch_size = T.int64()\n",
    "            with R.dataflow():\n",
    "                gate_up: R.Tensor([batch_size, intermediate_size * 2], \"float16\") = R.matmul(\n",
    "                    x, gate_up_proj_weights_transpose\n",
    "                )\n",
    "                gate_up_split = R.split(gate_up, 2, axis=-1)\n",
    "                gate = gate_up_split[0]\n",
    "                up = gate_up_split[1]\n",
    "                down: R.Tensor([batch_size, hidden_size], \"float16\") = R.matmul(\n",
    "                    R.nn.silu(gate) * up, down_proj_weights_transpose\n",
    "                )\n",
    "                R.output(down)\n",
    "            return down\n",
    "\n",
    "        @R.function\n",
    "        def transform_params(\n",
    "            model_params: R.Tuple(\n",
    "                R.Tensor([intermediate_size, hidden_size], \"float16\"),\n",
    "                R.Tensor([intermediate_size, hidden_size], \"float16\"),\n",
    "                R.Tensor([hidden_size, intermediate_size], \"float16\"),\n",
    "            )\n",
    "        ):\n",
    "            R.func_attr({\"num_input\": 0})\n",
    "            with R.dataflow():\n",
    "                gate_proj_weights: R.Tensor(\n",
    "                    [intermediate_size, hidden_size], \"float16\"\n",
    "                ) = model_params[0]\n",
    "                up_proj_weights: R.Tensor(\n",
    "                    [intermediate_size, hidden_size], \"float16\"\n",
    "                ) = model_params[1]\n",
    "                gate_up_proj_weights: R.Tensor(\n",
    "                    [intermediate_size * 2, hidden_size], \"float16\"\n",
    "                ) = R.concat([gate_proj_weights, up_proj_weights], axis=0)\n",
    "                gate_up_proj_weights_transpose: R.Tensor(\n",
    "                    [hidden_size, intermediate_size * 2], \"float16\"\n",
    "                ) = R.permute_dims(gate_up_proj_weights)\n",
    "                down_proj_weights: R.Tensor(\n",
    "                    [hidden_size, intermediate_size], \"float16\"\n",
    "                ) = model_params[2]\n",
    "                down_proj_weights_transpose: R.Tensor(\n",
    "                    [intermediate_size, hidden_size], \"float16\"\n",
    "                ) = R.permute_dims(down_proj_weights)\n",
    "                output = (gate_up_proj_weights_transpose, down_proj_weights_transpose)\n",
    "                R.output(output)\n",
    "            return output\n",
    "\n",
    "    lifted_mod = relax.transform.LiftTransformParams(shared_transform=True)(exported_mod)\n",
    "    assert_structural_equal(lifted_mod, ExpectedAfterLift)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee27bd3b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_linear_dynamic_shape():\n",
    "    \"\"\"The weight and bias of nn.Linear have the same out_features\n",
    "\n",
    "    Even if dynamic, the weight/bias must be the same value.\n",
    "    \"\"\"\n",
    "\n",
    "    @R.function\n",
    "    def forward(\n",
    "        x: R.Tensor((1, 4), dtype=\"float32\"),\n",
    "        _io: R.Object,\n",
    "        weight: R.Tensor((\"n\", 4), dtype=\"float32\"),\n",
    "        bias: R.Tensor((\"n\",), dtype=\"float32\"),\n",
    "    ) -> R.Tuple(R.Tensor((1, \"n\"), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        n = T.int64()\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            permute_dims: R.Tensor((4, n), dtype=\"float32\") = R.permute_dims(weight, axes=None)\n",
    "            matmul: R.Tensor((1, n), dtype=\"float32\") = R.matmul(x, permute_dims, out_dtype=\"void\")\n",
    "            add: R.Tensor((1, n), dtype=\"float32\") = R.add(matmul, bias)\n",
    "            gv1: R.Tuple(R.Tensor((1, n), dtype=\"float32\"), R.Tuple(R.Object)) = add, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    mod = nn.modules.Linear(in_features=4, out_features=\"n\", bias=True)\n",
    "    tvm_mod, _ = mod.export_tvm(\n",
    "        spec={\"forward\": {\"x\": nn.spec.Tensor((1, 4), \"float32\")}}, debug=True\n",
    "    )\n",
    "    assert_structural_equal(tvm_mod[\"forward\"], forward, True)\n",
    "\n",
    "\n",
    "@pytest.mark.parametrize(\n",
    "    \"dynamic_type\",\n",
    "    [\n",
    "        \"same_python_string\",\n",
    "        \"different_python_string\",\n",
    "        \"same_tir_var\",\n",
    "        \"distinct_tir_vars_with_distinct_names\",\n",
    "        pytest.param(\n",
    "            \"distinct_tir_vars_with_same_name\",\n",
    "            marks=pytest.mark.xfail(\n",
    "                reason=\"Not yet supported.  See revert https://github.com/apache/tvm/pull/16777\"\n",
    "            ),\n",
    "        ),\n",
    "    ],\n",
    ")\n",
    "def test_duplicate_names(dynamic_type):\n",
    "    class Linear(nn.Module):\n",
    "        def __init__(self, input_size, output_size):\n",
    "            self.weights = nn.Parameter([output_size, input_size], dtype=\"float32\")\n",
    "\n",
    "        def forward(self, state: nn.Tensor):\n",
    "            matmul_weights = nn.op.permute_dims(self.weights)\n",
    "            return nn.op.matmul(state, matmul_weights)\n",
    "\n",
    "    class Model(nn.Module):\n",
    "        def __init__(self, hidden_size, intermediate_size):\n",
    "            self.embedding = Linear(1024, hidden_size)\n",
    "            self.up = Linear(hidden_size, intermediate_size)\n",
    "            self.down = Linear(intermediate_size, hidden_size)\n",
    "\n",
    "        def forward(self, state: nn.Tensor):\n",
    "            state = self.embedding(state)\n",
    "            state = self.up(state)\n",
    "            state = nn.op.silu(state)\n",
    "            assert state.dtype == \"float32\"\n",
    "            state = self.down(state)\n",
    "            return state\n",
    "\n",
    "    if dynamic_type == \"same_python_string\":\n",
    "        # Python strings have value equality.  Providing the same name\n",
    "        # for two different shape parameters results in a single\n",
    "        # symbolic variable.\n",
    "        args = [\"hidden_size\", \"hidden_size\"]\n",
    "        expected_num_symbolic_vars = 1\n",
    "    elif dynamic_type == \"different_python_string\":\n",
    "        # Providing two distinct variable names for the two different\n",
    "        # shape parameters results in two distinct symbolic variables.\n",
    "        args = [\"hidden_size\", \"intermediate_size\"]\n",
    "        expected_num_symbolic_vars = 2\n",
    "    elif dynamic_type == \"same_tir_var\":\n",
    "        # Symbolic variables can be specified as tir.Var instances.\n",
    "        # Providing the same variable for the two different shape\n",
    "        # parameters uses the symbolic variable in both locations.\n",
    "        dim = tir.Var(\"hidden_size\", \"int64\")\n",
    "        args = [dim, dim]\n",
    "        expected_num_symbolic_vars = 1\n",
    "    elif dynamic_type == \"distinct_tir_vars_with_distinct_names\":\n",
    "        # Providing distinct TIR variables for the two different shape\n",
    "        # parameters uses each TIR variable in the specified location.\n",
    "        args = [tir.Var(\"hidden_size\", \"int64\"), tir.Var(\"intermediate_size\", \"int64\")]\n",
    "        expected_num_symbolic_vars = 2\n",
    "    elif dynamic_type == \"distinct_tir_vars_with_same_name\":\n",
    "        # TIR variable have reference equality.  Even if two different\n",
    "        # TIR variables have the same name, providing two distinct TIR\n",
    "        # variables still results in two distinct symbolic variables.\n",
    "        args = [tir.Var(\"hidden_size\", \"int64\"), tir.Var(\"hidden_size\", \"int64\")]\n",
    "        expected_num_symbolic_vars = 2\n",
    "    else:\n",
    "        raise ValueError(f\"Unexpected dynamic_type: {dynamic_type}\")\n",
    "\n",
    "    slm_mod = Model(*args)\n",
    "\n",
    "    exported_mod, _ = slm_mod.export_tvm(\n",
    "        spec={\n",
    "            \"forward\": {\"state\": nn.spec.Tensor([\"batch_size\", 1024], dtype=\"float32\")},\n",
    "        },\n",
    "        debug=False,\n",
    "    )\n",
    "\n",
    "    def get_expected_with_intermediate_size():\n",
    "        @I.ir_module\n",
    "        class Expected:\n",
    "            @R.function\n",
    "            def forward(\n",
    "                state: R.Tensor([\"batch_size\", 1024], \"float32\"),\n",
    "                embedding_weights: R.Tensor([\"hidden_size\", 1024], \"float32\"),\n",
    "                up_weights: R.Tensor([\"intermediate_size\", \"hidden_size\"], \"float32\"),\n",
    "                down_weights: R.Tensor([\"hidden_size\", \"intermediate_size\"], \"float32\"),\n",
    "            ):\n",
    "                R.func_attr({\"num_input\": 1})\n",
    "                batch_size = T.int64()\n",
    "                hidden_size = T.int64()\n",
    "                intermediate_size = T.int64()\n",
    "                with R.dataflow():\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(embedding_weights)\n",
    "                    )\n",
    "                    state: R.Tensor([batch_size, intermediate_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(up_weights)\n",
    "                    )\n",
    "                    state: R.Tensor([batch_size, intermediate_size], \"float32\") = R.nn.silu(state)\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(down_weights)\n",
    "                    )\n",
    "                    state = state\n",
    "                    R.output(state)\n",
    "                return state\n",
    "\n",
    "        return Expected\n",
    "\n",
    "    def get_expected_without_intermediate_size():\n",
    "        @I.ir_module\n",
    "        class Expected:\n",
    "            @R.function\n",
    "            def forward(\n",
    "                state: R.Tensor([\"batch_size\", 1024], \"float32\"),\n",
    "                embedding_weights: R.Tensor([\"hidden_size\", 1024], \"float32\"),\n",
    "                up_weights: R.Tensor([\"hidden_size\", \"hidden_size\"], \"float32\"),\n",
    "                down_weights: R.Tensor([\"hidden_size\", \"hidden_size\"], \"float32\"),\n",
    "            ):\n",
    "                R.func_attr({\"num_input\": 1})\n",
    "                batch_size = T.int64()\n",
    "                hidden_size = T.int64()\n",
    "                with R.dataflow():\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(embedding_weights)\n",
    "                    )\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(up_weights)\n",
    "                    )\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.nn.silu(state)\n",
    "                    state: R.Tensor([batch_size, hidden_size], \"float32\") = R.matmul(\n",
    "                        state, R.permute_dims(down_weights)\n",
    "                    )\n",
    "                    state = state\n",
    "                    R.output(state)\n",
    "                return state\n",
    "\n",
    "        return Expected\n",
    "\n",
    "    if expected_num_symbolic_vars == 1:\n",
    "        expected = get_expected_without_intermediate_size()\n",
    "    elif expected_num_symbolic_vars == 2:\n",
    "        expected = get_expected_with_intermediate_size()\n",
    "    else:\n",
    "        raise ValueError(f\"Unexpected number of symbolic vars: {expected_num_symbolic_vars}\")\n",
    "\n",
    "    assert_structural_equal(exported_mod[\"forward\"], expected[\"forward\"], True)\n",
    "\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
