{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "26cf1c27",
   "metadata": {},
   "source": [
    "# {mod}`tvm.relax.testing.nn`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3944da2a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "from tvm import relax\n",
    "from tvm.relax.testing import nn\n",
    "from tvm.script import ir as I, relax as R, tir as T"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "da34ebe9",
   "metadata": {},
   "source": [
    "## `emit`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "36d72da9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ReLU(nn.Module):\n",
    "    def forward(self, x: relax.Expr) -> relax.Var:\n",
    "        return nn.emit(R.nn.relu(x), name_hint=\"x\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "071a8723",
   "metadata": {},
   "outputs": [],
   "source": [
    "bb = relax.BlockBuilder()\n",
    "with bb.function(\"main\"):\n",
    "    model = ReLU()\n",
    "    x = nn.Placeholder((32, 32), dtype=\"float32\", name=\"x\")\n",
    "    output = model(x)\n",
    "    params = [x] + model.parameters()\n",
    "    bb.emit_func_output(output, params)\n",
    "mod = bb.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f204d700",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        x1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">32</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>relu(x)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> x1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d2ea04d2",
   "metadata": {},
   "source": [
    "## 参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b5321286",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Plus1(nn.Module):\n",
    "    def __init__(self):\n",
    "        self.const_1 = relax.const(1, \"float32\")\n",
    "\n",
    "    def forward(self, x: relax.Expr) -> relax.Var:\n",
    "        return nn.emit(R.add(x, self.const_1))\n",
    "\n",
    "model = Plus1()\n",
    "assert model.parameters() == []"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18dcdcdd",
   "metadata": {},
   "source": [
    "## {data}`tvm.realx.testing.nn.Module.define_subroutine`"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a57bf309",
   "metadata": {},
   "source": [
    "当 `nn.Module` 类的 `define_subroutine` 属性设置为 `True` 时，模块会被正确定义为子例程（`subroutine`），而不是内联到调用者代码中。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "6353da72",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Activation(nn.Module):\n",
    "    define_subroutine = True\n",
    "\n",
    "    def forward(self, state: relax.Expr) -> relax.Var:\n",
    "        return R.nn.relu(state)\n",
    "\n",
    "class Layer(nn.Module):\n",
    "    define_subroutine = True\n",
    "\n",
    "    def __init__(self, in_features, out_features):\n",
    "        self.weights = nn.Parameter(\n",
    "            (in_features, out_features), dtype=\"float32\", name=\"weights\"\n",
    "        )\n",
    "        self.activation = Activation()\n",
    "\n",
    "    def forward(self, x: relax.Expr) -> relax.Var:\n",
    "        state = R.matmul(x, self.weights)\n",
    "        return self.activation(state)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d9591348",
   "metadata": {},
   "source": [
    "- `Activation` 类：简单的 `ReLU` 激活函数层，设置` define_subroutine = True`\n",
    "- `Layer` 类：包含权重参数和激活函数的神经网络层，同样设置 `define_subroutine = True`"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26d7f5db",
   "metadata": {},
   "source": [
    "通过 `define_subroutine = True` 设置，确保：\n",
    "\n",
    "1. `Activation` 和 `Layer` 类被编译为独立的子例程函数\n",
    "2. 生成的 IR 模块中包含私有函数 `layer` 和 `activation`\n",
    "3. 主函数 `main` 正确调用这些子例程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f0f59e9b",
   "metadata": {},
   "source": [
    "该功能允许开发者将复杂的模型分解为可重用的子例程，提高代码的模块化和可维护性，同时有助于 TVM 优化器进行更有效的优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9ebc9bfb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import tir as T</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #A2F\">@I</span><span style=\"color: #A2F; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #00F; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">activation</span>(state: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>int64()\n",
       "        gv1: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>nn<span style=\"color: #A2F; font-weight: bold\">.</span>relu(state)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function(private<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">layer</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">64</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), weights: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>int64()\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        gv: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>matmul(x, weights, out_dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;void&quot;</span>)\n",
       "        gv2: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>activation(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv2\n",
       "\n",
       "    <span style=\"color: #A2F\">@R</span><span style=\"color: #A2F; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #00F\">main</span>(x: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">64</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), weights: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">64</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)) <span style=\"color: #A2F; font-weight: bold\">-&gt;</span> R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((<span style=\"color: #BA2121\">&quot;batch_size&quot;</span>, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>):\n",
       "        batch_size <span style=\"color: #A2F; font-weight: bold\">=</span> T<span style=\"color: #A2F; font-weight: bold\">.</span>int64()\n",
       "        cls <span style=\"color: #A2F; font-weight: bold\">=</span> Module\n",
       "        gv3: R<span style=\"color: #A2F; font-weight: bold\">.</span>Tensor((batch_size, <span style=\"color: #008000\">32</span>), dtype<span style=\"color: #A2F; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #A2F; font-weight: bold\">=</span> cls<span style=\"color: #A2F; font-weight: bold\">.</span>layer(x, weights)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv3\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "model = Layer(64, 32)\n",
    "batch_size = tvm.tir.Var(\"batch_size\", \"int64\")\n",
    "x = nn.Placeholder((batch_size, 64), dtype=\"float32\", name=\"x\")\n",
    "\n",
    "bb = relax.BlockBuilder()\n",
    "with bb.function(\"main\", params=[x, *model.parameters()]):\n",
    "    output = model(x)\n",
    "    bb.emit_func_output(output)\n",
    "mod = bb.get()\n",
    "mod.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
