{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "5b8f1dec",
   "metadata": {},
   "source": [
    "# 算子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d874924d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import relax, tir\n",
    "from tvm.relax.frontend.nn import Module, Tensor, op, spec\n",
    "from tvm.script import ir as I\n",
    "from tvm.script import relax as R\n",
    "from tvm.script import tir as T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f3500136",
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">_initialize_effect</span>() <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object):\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>null_value()\n",
       "            lv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (_io,)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> lv\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">test</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">2</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            square: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>square(x)\n",
       "            sqrt: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>sqrt(x)\n",
       "            gv1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>)), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (square, sqrt), (_io,)\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv1)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class Model(Module):\n",
    "    def test(self, x: Tensor):\n",
    "        z0 = op.square(x)\n",
    "        z1 = op.sqrt(x)\n",
    "        return (z0, z1)\n",
    "\n",
    "m = Model()\n",
    "irmodule, _ = m.export_tvm(\n",
    "    spec={\"test\": {\"x\": spec.Tensor([1, 10], \"float32\")}},\n",
    "    debug=True,\n",
    ")\n",
    "irmodule.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "1843b8af",
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">_initialize_effect</span>() <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object):\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>null_value()\n",
       "            lv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (_io,)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> lv\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">test</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), y: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>)), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">3</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            add: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>add(x, y)\n",
       "            mul: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>multiply(x, y)\n",
       "            divide: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>divide(x, y)\n",
       "            matmul: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>matmul(x, y, out_dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;void&quot;</span>)\n",
       "            maximum: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>maximum(x, y)\n",
       "            minimum: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>minimum(x, y)\n",
       "            subtract: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>subtract(x, y)\n",
       "            greater: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>greater(x, y)\n",
       "            greater_equal: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>greater_equal(x, y)\n",
       "            less: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>less(x, y)\n",
       "            less_equal: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>less_equal(x, y)\n",
       "            equal: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>equal(x, y)\n",
       "            not_equal: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>not_equal(x, y)\n",
       "            gv1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">10</span>, <span style=\"color: #008000\">10</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;bool&quot;</span>)), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (add, mul, divide, matmul, maximum, minimum, subtract, greater, greater_equal, less, less_equal, equal, not_equal), (_io,)\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv1)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class Model(Module):\n",
    "    def test(self, x: Tensor, y: Tensor):\n",
    "        z0 = op.add(x, y)\n",
    "        z1 = op.multiply(x, y)\n",
    "        z2 = op.divide(x, y)\n",
    "        z3 = op.matmul(x, y)\n",
    "        z4 = op.maximum(x, y)\n",
    "        z5 = op.minimum(x, y)\n",
    "        z6 = op.subtract(x, y)\n",
    "        z7 = op.greater(x, y)\n",
    "        z8 = op.greater_equal(x, y)\n",
    "        z9 = op.less(x, y)\n",
    "        z10 = op.less_equal(x, y)\n",
    "        z11 = op.equal(x, y)\n",
    "        z12 = op.not_equal(x, y)\n",
    "\n",
    "        return (z0, z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12)\n",
    "\n",
    "m = Model()\n",
    "irmodule, _ = m.export_tvm(\n",
    "    spec={\"test\": {\"x\": spec.Tensor([1, 10], \"float32\"), \"y\": spec.Tensor([10, 1], \"float32\")}},\n",
    "    debug=True,\n",
    ")\n",
    "irmodule.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "b0123a18",
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div class=\"highlight\" style=\"background: \"><pre style=\"line-height: 125%;\"><span></span><span style=\"color: #007979; font-style: italic\"># from tvm.script import ir as I</span>\n",
       "<span style=\"color: #007979; font-style: italic\"># from tvm.script import relax as R</span>\n",
       "\n",
       "<span style=\"color: #AA22FF\">@I</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>ir_module\n",
       "<span style=\"color: #008000; font-weight: bold\">class</span> <span style=\"color: #0000FF; font-weight: bold\">Module</span>:\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">_initialize_effect</span>() <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object):\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>null_value()\n",
       "            lv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> (_io,)\n",
       "            gv: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">=</span> lv\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv\n",
       "\n",
       "    <span style=\"color: #AA22FF\">@R</span><span style=\"color: #AA22FF; font-weight: bold\">.</span>function\n",
       "    <span style=\"color: #008000; font-weight: bold\">def</span> <span style=\"color: #0000FF\">test</span>(x: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">5</span>, <span style=\"color: #008000\">2</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), _io: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object) <span style=\"color: #AA22FF; font-weight: bold\">-&gt;</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)):\n",
       "        R<span style=\"color: #AA22FF; font-weight: bold\">.</span>func_attr({<span style=\"color: #BA2121\">&quot;num_input&quot;</span>: <span style=\"color: #008000\">2</span>})\n",
       "        <span style=\"color: #008000; font-weight: bold\">with</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>dataflow():\n",
       "            sum: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>) <span style=\"color: #AA22FF; font-weight: bold\">=</span> R<span style=\"color: #AA22FF; font-weight: bold\">.</span>sum(x, axis<span style=\"color: #AA22FF; font-weight: bold\">=</span>[<span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">2</span>], keepdims<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #008000; font-weight: bold\">True</span>)\n",
       "            gv1: R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tensor((<span style=\"color: #008000\">3</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">1</span>, <span style=\"color: #008000\">4</span>), dtype<span style=\"color: #AA22FF; font-weight: bold\">=</span><span style=\"color: #BA2121\">&quot;float32&quot;</span>), R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Tuple(R<span style=\"color: #AA22FF; font-weight: bold\">.</span>Object)) <span style=\"color: #AA22FF; font-weight: bold\">=</span> sum, (_io,)\n",
       "            R<span style=\"color: #AA22FF; font-weight: bold\">.</span>output(gv1)\n",
       "        <span style=\"color: #008000; font-weight: bold\">return</span> gv1\n",
       "</pre></div>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "class Model(Module):\n",
    "    def test(self, x: Tensor):\n",
    "        z0 = op.sum(x, axis=[1, 2], keepdims=True)\n",
    "        return z0\n",
    "\n",
    "m = Model()\n",
    "irmodule, _ = m.export_tvm(\n",
    "    spec={\"test\": {\"x\": spec.Tensor([3, 5, 2, 4], \"float32\")}}, debug=True\n",
    ")\n",
    "irmodule.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eea24ee5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_min():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            z0 = op.min(x, axis=[1, 2], keepdims=True)\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @R.function\n",
    "    def test(x: R.Tensor((3, 5, 2, 4), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tensor((3, 1, 1, 4), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            min: R.Tensor((3, 1, 1, 4), dtype=\"float32\") = R.min(x, axis=[1, 2], keepdims=True)\n",
    "            gv1: R.Tuple(R.Tensor((3, 1, 1, 4), dtype=\"float32\"), R.Tuple(R.Object)) = min, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\"test\": {\"x\": spec.Tensor([3, 5, 2, 4], \"float32\")}}, debug=True\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_manipulate():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            z0 = op.broadcast_to(x, [2, 5, 2])\n",
    "            z1 = op.permute_dims(x, [2, 1, 0])\n",
    "            z2 = op.reshape(x, [1, 10])\n",
    "            z3 = op.repeat(x, repeats=2, axis=1)\n",
    "            z4 = op.squeeze(x, 0)\n",
    "            z5 = op.unsqueeze(x, 0)\n",
    "            z6 = op.concat([x, x], dim=0)\n",
    "            return (z0, z1, z2, z3, z4, z5, z6)\n",
    "\n",
    "    # fmt: off\n",
    "    @R.function\n",
    "    def test(x: R.Tensor((1, 5, 2), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tuple(R.Tensor((2, 5, 2), dtype=\"float32\"), R.Tensor((2, 5, 1), dtype=\"float32\"), R.Tensor((1, 10), dtype=\"float32\"), R.Tensor((1, 10, 2), dtype=\"float32\"), R.Tensor((5, 2), dtype=\"float32\"), R.Tensor((1, 1, 5, 2), dtype=\"float32\"), R.Tensor((2, 5, 2), dtype=\"float32\")), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            broadcast_to: R.Tensor((2, 5, 2), dtype=\"float32\") = R.broadcast_to(x, R.shape([2, 5, 2]))\n",
    "            permute_dims: R.Tensor((2, 5, 1), dtype=\"float32\") = R.permute_dims(x, axes=[2, 1, 0])\n",
    "            reshape: R.Tensor((1, 10), dtype=\"float32\") = R.reshape(x, R.shape([1, 10]))\n",
    "            repeat: R.Tensor((1, 10, 2), dtype=\"float32\") = R.repeat(x, repeats=2, axis=1)\n",
    "            squeeze: R.Tensor((5, 2), dtype=\"float32\") = R.squeeze(x, axis=[0])\n",
    "            unsqueeze: R.Tensor((1, 1, 5, 2), dtype=\"float32\") = R.expand_dims(x, axis=0)\n",
    "            concat: R.Tensor((2, 5, 2), dtype=\"float32\") = R.concat([x, x], axis=0)\n",
    "            gv1: R.Tuple(R.Tuple(R.Tensor((2, 5, 2), dtype=\"float32\"), R.Tensor((2, 5, 1), dtype=\"float32\"), R.Tensor((1, 10), dtype=\"float32\"), R.Tensor((1, 10, 2), dtype=\"float32\"), R.Tensor((5, 2), dtype=\"float32\"), R.Tensor((1, 1, 5, 2), dtype=\"float32\"), R.Tensor((2, 5, 2), dtype=\"float32\")), R.Tuple(R.Object)) = (broadcast_to, permute_dims, reshape, repeat, squeeze, unsqueeze, concat), (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"x\": spec.Tensor([1, 5, 2], \"float32\")}}, debug=True)\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_index():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor, y: Tensor):\n",
    "            z0 = op.take(x, y, axis=2)\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @R.function\n",
    "    def test(x: R.Tensor((2, 1, 10), dtype=\"float32\"), y: R.Tensor((5,), dtype=\"int32\"), _io: R.Object) -> R.Tuple(R.Tensor((2, 1, 5), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 3})\n",
    "        with R.dataflow():\n",
    "            take: R.Tensor((2, 1, 5), dtype=\"float32\") = R.take(x, y, axis=2)\n",
    "            gv1: R.Tuple(R.Tensor((2, 1, 5), dtype=\"float32\"), R.Tuple(R.Object)) = take, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, params = m.export_tvm(\n",
    "        spec={\"test\": {\"x\": spec.Tensor([2, 1, 10], \"float32\"), \"y\": spec.Tensor([5], \"int32\")}},\n",
    "        debug=True,\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_datatype():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            z0 = op.astype(x, \"float16\")\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @R.function\n",
    "    def test(x: R.Tensor((2, 1, 10), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tensor((2, 1, 10), dtype=\"float16\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            astype: R.Tensor((2, 1, 10), dtype=\"float16\") = R.astype(x, dtype=\"float16\")\n",
    "            gv1: R.Tuple(R.Tensor((2, 1, 10), dtype=\"float16\"), R.Tuple(R.Object)) = astype, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"x\": spec.Tensor([2, 1, 10], \"float32\")}}, debug=True)\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_image():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor, weight: Tensor, bias: Tensor):\n",
    "            padded = op.pad(x, [0, 0, 0, 0, 1, 1, 1, 1])\n",
    "            conv2d = op.conv2d(padded, weight, bias)\n",
    "            interpolate = op.interpolate(x, size=[40, 40])  # type: ignore\n",
    "            return (conv2d, interpolate)\n",
    "\n",
    "    @R.function\n",
    "    def test(\n",
    "        x: R.Tensor((1, 3, 32, 32), dtype=\"float32\"),\n",
    "        weight: R.Tensor((32, 3, 3, 3), dtype=\"float32\"),\n",
    "        bias: R.Tensor((32,), dtype=\"float32\"),\n",
    "        _io: R.Object,\n",
    "    ) -> R.Tuple(\n",
    "        R.Tuple(\n",
    "            R.Tensor((1, 32, 32, 32), dtype=\"float32\"), R.Tensor((1, 3, 40, 40), dtype=\"float32\")\n",
    "        ),\n",
    "        R.Tuple(R.Object),\n",
    "    ):\n",
    "        R.func_attr({\"num_input\": 4})\n",
    "        with R.dataflow():\n",
    "            lv0: R.Tensor((1, 3, 34, 34), dtype=\"float32\") = R.nn.pad(x, (0, 0, 0, 0, 1, 1, 1, 1))\n",
    "            lv1: R.Tensor((1, 32, 32, 32), dtype=\"float32\") = R.nn.conv2d(\n",
    "                lv0,\n",
    "                weight,\n",
    "                strides=[1, 1],\n",
    "                padding=[0, 0, 0, 0],\n",
    "                dilation=[1, 1],\n",
    "                groups=1,\n",
    "                data_layout=\"NCHW\",\n",
    "                kernel_layout=\"OIHW\",\n",
    "                out_layout=\"NCHW\",\n",
    "                out_dtype=\"void\",\n",
    "            )\n",
    "            lv2: R.Tensor((1, 32, 1, 1), dtype=\"float32\") = R.reshape(bias, R.shape([1, 32, 1, 1]))\n",
    "            conv2d: R.Tensor((1, 32, 32, 32), dtype=\"float32\") = R.add(lv1, lv2)\n",
    "            interpolate: R.Tensor((1, 3, 40, 40), dtype=\"float32\") = R.image.resize2d(\n",
    "                x,\n",
    "                R.shape([40, 40]),\n",
    "                roi=[T.float32(0), T.float32(0), T.float32(0), T.float32(0)],\n",
    "                layout=\"NCHW\",\n",
    "                method=\"nearest_neighbor\",\n",
    "                coordinate_transformation_mode=\"asymmetric\",\n",
    "                rounding_method=\"round\",\n",
    "                cubic_alpha=-0.5,\n",
    "                cubic_exclude=0,\n",
    "                extrapolation_value=0,\n",
    "                out_dtype=\"void\",\n",
    "            )\n",
    "            gv1: R.Tuple(\n",
    "                R.Tuple(\n",
    "                    R.Tensor((1, 32, 32, 32), dtype=\"float32\"),\n",
    "                    R.Tensor((1, 3, 40, 40), dtype=\"float32\"),\n",
    "                ),\n",
    "                R.Tuple(R.Object),\n",
    "            ) = (conv2d, interpolate), (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\n",
    "                \"x\": spec.Tensor([1, 3, 32, 32], \"float32\"),\n",
    "                \"weight\": spec.Tensor([32, 3, 3, 3], \"float32\"),\n",
    "                \"bias\": spec.Tensor([32], \"float32\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_chunk():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            chunk = op.chunk(x, chunks=4)\n",
    "            return chunk\n",
    "\n",
    "    @R.function\n",
    "    def test(\n",
    "        x: R.Tensor((8,), dtype=\"float32\"), _io: R.Object\n",
    "    ) -> R.Tuple(\n",
    "        R.Tuple(\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "            R.Tensor((2,), dtype=\"float32\"),\n",
    "        ),\n",
    "        R.Tuple(R.Object),\n",
    "    ):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            chunk: R.Tuple(\n",
    "                R.Tensor((2,), dtype=\"float32\"),\n",
    "                R.Tensor((2,), dtype=\"float32\"),\n",
    "                R.Tensor((2,), dtype=\"float32\"),\n",
    "                R.Tensor((2,), dtype=\"float32\"),\n",
    "            ) = R.split(x, indices_or_sections=4, axis=0)\n",
    "            chunk_0: R.Tensor((2,), dtype=\"float32\") = chunk[0]\n",
    "            chunk_1: R.Tensor((2,), dtype=\"float32\") = chunk[1]\n",
    "            chunk_2: R.Tensor((2,), dtype=\"float32\") = chunk[2]\n",
    "            chunk_3: R.Tensor((2,), dtype=\"float32\") = chunk[3]\n",
    "            gv1: R.Tuple(\n",
    "                R.Tuple(\n",
    "                    R.Tensor((2,), dtype=\"float32\"),\n",
    "                    R.Tensor((2,), dtype=\"float32\"),\n",
    "                    R.Tensor((2,), dtype=\"float32\"),\n",
    "                    R.Tensor((2,), dtype=\"float32\"),\n",
    "                ),\n",
    "                R.Tuple(R.Object),\n",
    "            ) = (chunk_0, chunk_1, chunk_2, chunk_3), (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"x\": spec.Tensor([8], \"float32\")}}, debug=True)\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_nn():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor, weight: Tensor, bias: Tensor):\n",
    "            relu_out = op.relu(x)\n",
    "            silu_out = op.silu(x)\n",
    "            gelu_out = op.gelu(x)\n",
    "            sigmoid_out = op.sigmoid(x)\n",
    "            tanh_out = op.tanh(x)\n",
    "            exp_out = op.exp(x)\n",
    "            negative_out = op.negative(x)\n",
    "            softplus_out = op.softplus(x, beta=1.0, threshold=20.0)\n",
    "            softmax_out = op.softmax(x, axis=2)\n",
    "            prelu_out = op.prelu(x, alpha=bias)\n",
    "            rms_norm_out = op.rms_norm(x, weight, axes=[-2, -1])\n",
    "            rms_norm_with_bias_out = op.rms_norm(x, weight, axes=[-2, -1])\n",
    "            group_norm_out = op.group_norm(x, num_groups=1, weight=bias, bias=bias)\n",
    "            return x\n",
    "\n",
    "    @R.function\n",
    "    def test(\n",
    "        x: R.Tensor((2, 3, 4, 5), dtype=\"float32\"),\n",
    "        weight: R.Tensor((4, 5), dtype=\"float32\"),\n",
    "        bias: R.Tensor((3,), dtype=\"float32\"),\n",
    "        _io: R.Object,\n",
    "    ) -> R.Tuple(R.Tensor((2, 3, 4, 5), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 4})\n",
    "        with R.dataflow():\n",
    "            relu: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.relu(x)\n",
    "            silu: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.silu(x)\n",
    "            gelu: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.gelu(x)\n",
    "            sigmoid: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.sigmoid(x)\n",
    "            tanh: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.tanh(x)\n",
    "            exp: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.exp(x)\n",
    "            negative: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.negative(x)\n",
    "            softplus: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.softplus(\n",
    "                x, beta=1.0, threshold=20.0\n",
    "            )\n",
    "            softmax: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.softmax(x, axis=2)\n",
    "            prelu: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.prelu(x, bias)\n",
    "            rms_norm: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.rms_norm(\n",
    "                x, weight, axes=[-2, -1], epsilon=1.0000000000000001e-05\n",
    "            )\n",
    "            rms_norm1: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.rms_norm(\n",
    "                x, weight, axes=[-2, -1], epsilon=1.0000000000000001e-05\n",
    "            )\n",
    "            group_norm: R.Tensor((2, 3, 4, 5), dtype=\"float32\") = R.nn.group_norm(\n",
    "                x, bias, bias, num_groups=1, channel_axis=1, axes=[2, 3]\n",
    "            )\n",
    "            gv1: R.Tuple(R.Tensor((2, 3, 4, 5), dtype=\"float32\"), R.Tuple(R.Object)) = x, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, params = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\n",
    "                \"x\": spec.Tensor([2, 3, 4, 5], \"float32\"),\n",
    "                \"weight\": spec.Tensor([4, 5], \"float32\"),\n",
    "                \"bias\": spec.Tensor([3], \"float32\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_create():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            triu_out = op.triu(x)\n",
    "            full_with_scalar_out = op.full([10, 10], fill_value=10)  # type: ignore\n",
    "            full_with_FloatImm_out = op.full(\n",
    "                [10, 10], fill_value=tir.FloatImm(dtype=\"float32\", value=10)\n",
    "            )\n",
    "            full_with_Tensor_out = op.full(\n",
    "                [10, 10], fill_value=Tensor.from_scalar(10, dtype=\"float32\")\n",
    "            )\n",
    "            zeros_out = op.zeros([10, 10])\n",
    "            zeros_fp16_out = op.zeros([10, 10], dtype=\"float16\")\n",
    "            return x\n",
    "\n",
    "    # fmt: off\n",
    "    @R.function\n",
    "    def test(x: R.Tensor((10, 10), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tensor((10, 10), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            triu: R.Tensor((10, 10), dtype=\"float32\") = R.triu(x, k=0)\n",
    "            full: R.Tensor((10, 10), dtype=\"float32\") = R.full(R.shape([10, 10]), R.const(10, \"float32\"), dtype=\"float32\")\n",
    "            full1: R.Tensor((10, 10), dtype=\"float32\") = R.full(R.shape([10, 10]), R.const(10, \"float32\"), dtype=\"float32\")\n",
    "            full2: R.Tensor((10, 10), dtype=\"float32\") = R.full(R.shape([10, 10]), R.const(10, \"float32\"), dtype=\"float32\")\n",
    "            zeros: R.Tensor((10, 10), dtype=\"float32\") = R.zeros(R.shape([10, 10]), dtype=\"float32\")\n",
    "            zeros1: R.Tensor((10, 10), dtype=\"float16\") = R.zeros(R.shape([10, 10]), dtype=\"float16\")\n",
    "            gv1: R.Tuple(R.Tensor((10, 10), dtype=\"float32\"), R.Tuple(R.Object)) = x, (_io,)\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, params = m.export_tvm(\n",
    "        spec={\"test\": {\"x\": spec.Tensor([10, 10], \"float32\")}}, debug=True\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_timestep_embedding():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            get_timestep_out = op.get_timestep_embedding(x, 10)\n",
    "            return get_timestep_out\n",
    "\n",
    "    @R.function\n",
    "    def test(\n",
    "        x: R.Tensor((3,), dtype=\"float32\"), _io: R.Object\n",
    "    ) -> R.Tuple(R.Tensor((3, 10), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 2})\n",
    "        with R.dataflow():\n",
    "            lv1: R.Tensor((3,), dtype=\"float32\") = R.astype(x, dtype=\"float32\")\n",
    "            lv2: R.Tensor((3, 1), dtype=\"float32\") = R.expand_dims(lv1, axis=[1])\n",
    "            lv3: R.Tensor((5,), dtype=\"float32\") = R.arange(\n",
    "                R.prim_value(0), R.prim_value(5), R.prim_value(1), dtype=\"float32\"\n",
    "            )\n",
    "            lv4: R.Tensor((5,), dtype=\"float32\") = R.multiply(\n",
    "                R.const(-9.2103404998779297, \"float32\"), lv3\n",
    "            )\n",
    "            lv5: R.Tensor((5,), dtype=\"float32\") = R.divide(lv4, R.const(4, \"float32\"))\n",
    "            lv6: R.Tensor((5,), dtype=\"float32\") = R.exp(lv5)\n",
    "            lv7: R.Tensor((1, 5), dtype=\"float32\") = R.expand_dims(lv6, axis=[0])\n",
    "            lv8: R.Tensor((3, 5), dtype=\"float32\") = R.multiply(lv2, lv7)\n",
    "            lv9: R.Tensor((3, 5), dtype=\"float32\") = R.sin(lv8)\n",
    "            lv10: R.Tensor((3, 5), dtype=\"float32\") = R.cos(lv8)\n",
    "            lv11: R.Tensor((3, 10), dtype=\"float32\") = R.concat((lv9, lv10), axis=-1)\n",
    "            get_timestep_embedding: R.Tensor((3, 10), dtype=\"float32\") = R.astype(\n",
    "                lv11, dtype=\"float32\"\n",
    "            )\n",
    "            gv1: R.Tuple(R.Tensor((3, 10), dtype=\"float32\"), R.Tuple(R.Object)) = (\n",
    "                get_timestep_embedding,\n",
    "                (_io,),\n",
    "            )\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"x\": spec.Tensor([3], \"float32\")}}, debug=True)\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_scaled_dot_product_attention():\n",
    "    class Model(Module):\n",
    "        def test(self, query: Tensor, key: Tensor, value: Tensor):\n",
    "            scaled_dot_product_attention = op.scaled_dot_product_attention(query, key, value)\n",
    "            return scaled_dot_product_attention\n",
    "\n",
    "    @R.function\n",
    "    def test(\n",
    "        query: R.Tensor((1, 32, 32, 32), dtype=\"float32\"),\n",
    "        key: R.Tensor((1, 32, 32, 32), dtype=\"float32\"),\n",
    "        value: R.Tensor((1, 32, 32, 32), dtype=\"float32\"),\n",
    "        _io: R.Object,\n",
    "    ) -> R.Tuple(R.Tensor((1, 32, 32, 32), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "        R.func_attr({\"num_input\": 4})\n",
    "        with R.dataflow():\n",
    "            scaled_dot_product_attention: R.Tensor(\n",
    "                (1, 32, 32, 32), dtype=\"float32\"\n",
    "            ) = R.nn.attention(query, key, value, scale=None, causal_mask=None)\n",
    "            gv1: R.Tuple(R.Tensor((1, 32, 32, 32), dtype=\"float32\"), R.Tuple(R.Object)) = (\n",
    "                scaled_dot_product_attention,\n",
    "                (_io,),\n",
    "            )\n",
    "            R.output(gv1)\n",
    "        return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\n",
    "                \"query\": spec.Tensor([1, 32, 32, 32], \"float32\"),\n",
    "                \"key\": spec.Tensor([1, 32, 32, 32], \"float32\"),\n",
    "                \"value\": spec.Tensor([1, 32, 32, 32], \"float32\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule[\"test\"], test)\n",
    "\n",
    "\n",
    "def test_tensor_expr_op():\n",
    "    class Model(Module):\n",
    "        def test(self, x: Tensor):\n",
    "            tensor_expr_op_out = op.tensor_expr_op(\n",
    "                tensor_expr_func=lambda x: x + 1, name_hint=\"add_one\", args=[x]\n",
    "            )\n",
    "            return tensor_expr_op_out\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def add_one(A: T.Buffer((T.int64(10), T.int64(10)), \"float32\"), T_add: T.Buffer((T.int64(10), T.int64(10)), \"float32\")):\n",
    "            T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "            # with T.block(\"root\"):\n",
    "            for ax0, ax1 in T.grid(T.int64(10), T.int64(10)):\n",
    "                with T.block(\"T_add\"):\n",
    "                    v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                    T.reads(A[v_ax0, v_ax1])\n",
    "                    T.writes(T_add[v_ax0, v_ax1])\n",
    "                    T_add[v_ax0, v_ax1] = A[v_ax0, v_ax1] + T.float32(1)\n",
    "\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def test(x: R.Tensor((10, 10), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tensor((10, 10), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "            cls = Expected\n",
    "            R.func_attr({\"num_input\": 2})\n",
    "            with R.dataflow():\n",
    "                lv1 = R.call_tir(cls.add_one, (x,), out_sinfo=R.Tensor((10, 10), dtype=\"float32\"))\n",
    "                gv1: R.Tuple(R.Tensor((10, 10), dtype=\"float32\"), R.Tuple(R.Object)) = lv1, (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"x\": spec.Tensor([10, 10], \"float32\")}}, debug=True)\n",
    "\n",
    "    tvm.ir.assert_structural_equal(irmodule, Expected)\n",
    "\n",
    "\n",
    "def test_tensor_ir_op():\n",
    "    num_q_heads, num_kv_heads, head_dim = 8, 8, 16\n",
    "    fused_heads = num_q_heads + num_kv_heads * 2\n",
    "    dtype = \"float16\"\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def fused_rope(  # pylint: disable=too-many-locals\n",
    "        var_qkv: T.handle,\n",
    "        var_q: T.handle,\n",
    "        var_k: T.handle,\n",
    "        var_v: T.handle,\n",
    "        # Scalar arguments must be specified after tensor arguments,\n",
    "        # including the output tensor arguments\n",
    "        #\n",
    "        # TODO(Lunderberg): Update\n",
    "        # `tvm.relax.frontend.nn.op.tensor_ir_op` to use `PrimValue`\n",
    "        # instead of `tir_vars`, so that the order can be consistent\n",
    "        # between the function definition and the arguments in\n",
    "        # `op.tensor_ir_op`.\n",
    "        offset: T.int64,\n",
    "    ):\n",
    "        batch_size = T.int64()\n",
    "        seq_len = T.int64()\n",
    "        qkv = T.match_buffer(var_qkv, (batch_size, seq_len, fused_heads, head_dim), dtype)\n",
    "        q = T.match_buffer(var_q, (batch_size, seq_len, num_q_heads, head_dim), dtype)\n",
    "        k = T.match_buffer(var_k, (batch_size, seq_len, num_kv_heads, head_dim), dtype)\n",
    "        v = T.match_buffer(var_v, (batch_size, seq_len, num_kv_heads, head_dim), dtype)\n",
    "        T.evaluate(offset)\n",
    "\n",
    "    class Model(Module):\n",
    "        def test(self, qkv: Tensor, offset: tir.Var):\n",
    "            tensor_expr_op_out = op.tensor_ir_op(\n",
    "                fused_rope,\n",
    "                \"llama_fused_rope\",\n",
    "                args=[qkv, offset],\n",
    "                out=[\n",
    "                    Tensor.placeholder((1, 1, num_q_heads, head_dim), dtype),\n",
    "                    Tensor.placeholder((1, 1, num_kv_heads, head_dim), dtype),\n",
    "                    Tensor.placeholder((1, 1, num_kv_heads, head_dim), dtype),\n",
    "                ],\n",
    "            )\n",
    "            return tensor_expr_op_out\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def llama_fused_rope(var_qkv: T.handle, var_q: T.handle, var_k: T.handle, var_v: T.handle, offset: T.int64):\n",
    "            batch_size, seq_len = T.int64(), T.int64()\n",
    "            qkv = T.match_buffer(var_qkv, (batch_size, seq_len, 24, 16), \"float16\")\n",
    "            q = T.match_buffer(var_q, (batch_size, seq_len, 8, 16), \"float16\")\n",
    "            k = T.match_buffer(var_k, (batch_size, seq_len, 8, 16), \"float16\")\n",
    "            v = T.match_buffer(var_v, (batch_size, seq_len, 8, 16), \"float16\")\n",
    "            T.evaluate(offset)\n",
    "\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def test(qkv: R.Tensor((1, 1, 24, 16), dtype=\"float16\"), offset: R.Shape([\"offset_1\"]), _io: R.Object) -> R.Tuple(R.Tuple(R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\")), R.Tuple(R.Object)):\n",
    "            offset_1 = T.int64()\n",
    "            R.func_attr({\"num_input\": 3})\n",
    "            cls = Expected\n",
    "            with R.dataflow():\n",
    "                lv1 = R.call_tir(cls.llama_fused_rope, (qkv,), out_sinfo=[R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\")], tir_vars=R.shape([offset_1]))\n",
    "                llama_fused_rope_0: R.Tensor((1, 1, 8, 16), dtype=\"float16\") = lv1[0]\n",
    "                llama_fused_rope_1: R.Tensor((1, 1, 8, 16), dtype=\"float16\") = lv1[1]\n",
    "                llama_fused_rope_2: R.Tensor((1, 1, 8, 16), dtype=\"float16\") = lv1[2]\n",
    "                gv1: R.Tuple(R.Tuple(R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\"), R.Tensor((1, 1, 8, 16), dtype=\"float16\")), R.Tuple(R.Object)) = (llama_fused_rope_0, llama_fused_rope_1, llama_fused_rope_2), (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\"qkv\": spec.Tensor([1, 1, fused_heads, head_dim], \"float16\"), \"offset\": int}\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule, Expected)\n",
    "\n",
    "\n",
    "def test_tensor_ir_inplace_op():\n",
    "    hidden_size = 4096\n",
    "    dtype = \"float16\"\n",
    "\n",
    "    @T.prim_func\n",
    "    def inplace_take(\n",
    "        var_weight: T.handle, var_pos: T.handle, var_embeddings: T.handle, offset: T.int64\n",
    "    ):\n",
    "        T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "        vocab_size = T.int64()\n",
    "        weight = T.match_buffer(var_weight, (vocab_size, hidden_size), dtype)\n",
    "        seq_len = T.int64()\n",
    "        total_seq_len = T.int64()\n",
    "        pos = T.match_buffer(var_pos, (seq_len,), \"int32\")\n",
    "        embeddings = T.match_buffer(var_embeddings, (total_seq_len, hidden_size), dtype)\n",
    "        for ax0, ax1 in T.grid(seq_len, hidden_size):\n",
    "            with T.block(\"T_take\"):\n",
    "                v0, v1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                T.reads(weight[pos[v0], v1], pos[v0])\n",
    "                T.writes(embeddings[v0, v1])\n",
    "                embeddings[v0 + offset, v1] = weight[pos[v0], v1]\n",
    "\n",
    "    class Model(Module):\n",
    "        def test(\n",
    "            self, embedding_table: Tensor, input_ids: Tensor, embedding_dst: Tensor, offset: int\n",
    "        ):\n",
    "            tensor_expr_op_out = op.tensor_ir_inplace_op(\n",
    "                inplace_take,\n",
    "                \"inplace_take\",\n",
    "                args=[embedding_table, input_ids, embedding_dst, offset],\n",
    "                inplace_indices=[2],\n",
    "                out=Tensor.placeholder(embedding_dst.shape, embedding_dst.dtype),\n",
    "            )\n",
    "            return tensor_expr_op_out\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func\n",
    "        def inplace_take(\n",
    "            var_weight: T.handle, var_pos: T.handle, var_embeddings: T.handle, offset: T.int64\n",
    "        ):\n",
    "            T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "            vocab_size = T.int64()\n",
    "            weight = T.match_buffer(var_weight, (vocab_size, hidden_size), dtype)\n",
    "            seq_len = T.int64()\n",
    "            total_seq_len = T.int64()\n",
    "            pos = T.match_buffer(var_pos, (seq_len,), \"int32\")\n",
    "            embeddings = T.match_buffer(var_embeddings, (total_seq_len, hidden_size), dtype)\n",
    "            for ax0, ax1 in T.grid(seq_len, hidden_size):\n",
    "                with T.block(\"T_take\"):\n",
    "                    v0, v1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                    T.reads(weight[pos[v0], v1], pos[v0])\n",
    "                    T.writes(embeddings[v0, v1])\n",
    "                    embeddings[v0 + offset, v1] = weight[pos[v0], v1]\n",
    "\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def test(\n",
    "            embedding_table: R.Tensor((\"vocab_size\", hidden_size), dtype),\n",
    "            input_ids: R.Tensor((\"seq_len\",), \"int32\"),\n",
    "            embedding_dst: R.Tensor((\"total_seq_len\", hidden_size), dtype),\n",
    "            offset: R.Shape([\"offset_1\"]),\n",
    "            packed_params: R.Tuple,\n",
    "        ) -> R.Tensor((\"total_seq_len\", hidden_size), dtype):\n",
    "            total_seq_len = T.int64()\n",
    "            offset_1 = T.int64()\n",
    "            R.func_attr({\"num_input\": 4})\n",
    "            cls = Expected\n",
    "            with R.dataflow():\n",
    "                lv1 = R.call_tir_inplace(\n",
    "                    cls.inplace_take,\n",
    "                    (embedding_table, input_ids, embedding_dst),\n",
    "                    out_sinfo=R.Tensor((total_seq_len, hidden_size), dtype),\n",
    "                    inplace_indices=[2],\n",
    "                    tir_vars=R.shape([offset_1]),\n",
    "                )\n",
    "                gv1: R.Tensor((total_seq_len, hidden_size), dtype) = lv1\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\n",
    "                \"embedding_table\": spec.Tensor([\"vocab_size\", hidden_size], dtype),\n",
    "                \"input_ids\": spec.Tensor([\"seq_len\"], \"int32\"),\n",
    "                \"embedding_dst\": spec.Tensor([\"total_seq_len\", hidden_size], dtype),\n",
    "                \"offset\": int,\n",
    "                \"$\": {\n",
    "                    \"param_mode\": \"packed\",\n",
    "                    \"effect_mode\": \"none\",\n",
    "                },\n",
    "            },\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule, Expected)\n",
    "\n",
    "\n",
    "def test_tensor_ir_op_no_tir_var():\n",
    "    @T.prim_func(private=True)\n",
    "    def tir_func(A: T.Buffer((16, 16), \"float32\"), B: T.Buffer((16, 16), \"float32\")):\n",
    "        T.evaluate(0)\n",
    "\n",
    "    class Model(Module):\n",
    "        def test(self, A: Tensor):\n",
    "            tensor_expr_op_out = op.tensor_ir_op(\n",
    "                tir_func,\n",
    "                \"tir_func\",\n",
    "                args=[A],\n",
    "                out=[Tensor.placeholder((16, 16), \"float32\")],\n",
    "            )\n",
    "            return tensor_expr_op_out\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def tir_func(A: T.Buffer((16, 16), \"float32\"), B: T.Buffer((16, 16), \"float32\")):\n",
    "            T.evaluate(0)\n",
    "\n",
    "        @R.function\n",
    "        def test(A: R.Tensor((16, 16), dtype=\"float32\")) -> R.Tensor((16, 16), dtype=\"float32\"):\n",
    "            R.func_attr({\"num_input\": 1})\n",
    "            cls = Expected\n",
    "            with R.dataflow():\n",
    "                lv = R.call_tir(cls.tir_func, (A,), out_sinfo=R.Tensor((16, 16), dtype=\"float32\"))\n",
    "                gv: R.Tensor((16, 16), dtype=\"float32\") = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(spec={\"test\": {\"A\": spec.Tensor([16, 16], \"float32\")}})\n",
    "    tvm.ir.assert_structural_equal(irmodule, Expected)\n",
    "\n",
    "\n",
    "def test_extern():\n",
    "    class Model(Module):\n",
    "        def test(self, q: Tensor, k: Tensor, v: Tensor):\n",
    "            b, s, h_q, d = q.shape\n",
    "            tensor_expr_op_out = op.extern(\n",
    "                name=\"flashinfer.single_decode\",\n",
    "                args=[q, k, v, 0, 0, 1.0, 10000.0],\n",
    "                out=Tensor.placeholder((b, s, h_q * d), dtype=\"float16\"),\n",
    "            )\n",
    "            return tensor_expr_op_out\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def test(q: R.Tensor((1, 1, 16, 8), dtype=\"float32\"), k: R.Tensor((64, 16, 8), dtype=\"float32\"), v: R.Tensor((64, 16, 8), dtype=\"float32\"), _io: R.Object) -> R.Tuple(R.Tensor((1, 1, 128), dtype=\"float16\"), R.Tuple(R.Object)):\n",
    "            R.func_attr({\"num_input\": 4})\n",
    "            with R.dataflow():\n",
    "                flashinfer_single_decode = R.call_dps_packed(\"flashinfer.single_decode\", (q, k, v, R.prim_value(0), R.prim_value(0), R.prim_value(T.float64(1)), R.prim_value(T.float64(10000))), out_sinfo=R.Tensor((1, 1, 128), dtype=\"float16\"))\n",
    "                gv1: R.Tuple(R.Tensor((1, 1, 128), dtype=\"float16\"), R.Tuple(R.Object)) = flashinfer_single_decode, (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    batch, seq, t, d, h_q, h_kv = 1, 1, 64, 8, 16, 16\n",
    "    m = Model()\n",
    "    irmodule, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"test\": {\n",
    "                \"q\": spec.Tensor([batch, seq, h_q, d], \"float32\"),\n",
    "                \"k\": spec.Tensor([t, h_kv, d], \"float32\"),\n",
    "                \"v\": spec.Tensor([t, h_kv, d], \"float32\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(irmodule, Expected)\n",
    "\n",
    "\n",
    "def test_empty():\n",
    "    @tvm.register_func(\"test_empty_assert\", override=True)\n",
    "    def test_empty_assert(_lineo, x):\n",
    "        assert x.shape == (10, 10)\n",
    "        assert x.dtype == \"float32\"\n",
    "\n",
    "    class Model(Module):\n",
    "        def test(self):\n",
    "            result = op.empty([10, 10], dtype=\"float32\")\n",
    "            op.debug_func(\"test_empty_assert\", result)\n",
    "            return result\n",
    "\n",
    "    irmodule, _ = Model().export_tvm(spec={\"test\": {}}, debug=True)\n",
    "    ex = tvm.compile(irmodule, \"llvm\")\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    effects = vm[\"_initialize_effect\"]()\n",
    "    vm[\"test\"](*effects)\n",
    "\n",
    "\n",
    "@tvm.testing.requires_cuda\n",
    "def test_multinomial_from_uniform():\n",
    "\n",
    "    prob_shape = (3, 5)\n",
    "    sample_shape = (6, 1)\n",
    "\n",
    "    class Model(Module):\n",
    "        def foo(self, prob: Tensor, uniform_sample: Tensor, sample_indices: Tensor):\n",
    "            z0 = op.multinomial_from_uniform(prob, uniform_sample, sample_indices)\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def foo(prob: R.Tensor((3, 5), dtype=\"float32\"), uniform_sample: R.Tensor((6, 1), dtype=\"float32\"), sample_indices: R.Tensor((6, 1), dtype=\"int64\"), _io: R.Object) -> R.Tuple(R.Tensor((6, 1), dtype=\"int64\"), R.Tuple(R.Object)):\n",
    "            R.func_attr({\"num_input\": 4})\n",
    "            with R.dataflow():\n",
    "                multinomial_from_uniform: R.Tensor((6, 1), dtype=\"int64\") = R.multinomial_from_uniform(prob, uniform_sample, sample_indices, dtype=\"int64\")\n",
    "                gv1: R.Tuple(R.Tensor((6, 1), dtype=\"int64\"), R.Tuple(R.Object)) = multinomial_from_uniform, (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    mod, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"foo\": {\n",
    "                \"prob\": spec.Tensor(prob_shape, \"float32\"),\n",
    "                \"uniform_sample\": spec.Tensor(sample_shape, \"float32\"),\n",
    "                \"sample_indices\": spec.Tensor(sample_shape, \"int64\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(mod, Expected)\n",
    "\n",
    "    target = tvm.target.Target(\"cuda\", host=\"llvm\")\n",
    "    with target:\n",
    "        mod = relax.backend.DispatchSampling()(mod)\n",
    "        mod = tir.transform.DefaultGPUSchedule()(mod)\n",
    "    ex = tvm.compile(mod, target)\n",
    "    dev = tvm.device(str(target), 0)\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    effects = vm[\"_initialize_effect\"]()\n",
    "\n",
    "    np_rand = np.random.rand(*prob_shape).astype(np.float32)\n",
    "    # normalize it to get the random prob\n",
    "    np_prob = np_rand / np_rand.sum(axis=1, keepdims=True)\n",
    "    nd_prob = tvm.nd.array(np_prob, dev)\n",
    "    # special sample to get deterministic results\n",
    "    nd_sample = tvm.nd.array(np.array([[1], [0], [1], [1], [0], [1]]).astype(np.float32), dev)\n",
    "    nd_sample_indices = tvm.nd.array(np.array([[0], [1], [1], [2], [2], [2]]).astype(np.int64), dev)\n",
    "    inputs = [nd_prob, nd_sample, nd_sample_indices, effects]\n",
    "    res = vm[\"foo\"](*inputs)\n",
    "    tvm.testing.assert_allclose(\n",
    "        res[0].numpy(), np.array([[4], [0], [4], [4], [0], [4]]).astype(np.int64)\n",
    "    )\n",
    "\n",
    "\n",
    "@tvm.testing.requires_gpu\n",
    "def test_sample_top_p_top_k_from_sorted_prob():\n",
    "    prob_shape = (2, 3)\n",
    "    sample_shape = (3, 1)\n",
    "\n",
    "    class Model(Module):\n",
    "        def foo(\n",
    "            self,\n",
    "            prob: Tensor,\n",
    "            index: Tensor,\n",
    "            top_p: Tensor,\n",
    "            top_k: Tensor,\n",
    "            uniform_sample: Tensor,\n",
    "            sample_indices: Tensor,\n",
    "        ):\n",
    "            z0 = op.sample_top_p_top_k_from_sorted_prob(\n",
    "                prob, index, top_p, top_k, uniform_sample, sample_indices\n",
    "            )\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def get_index_from_sorted(A: T.handle, B: T.handle, C: T.handle, D: T.handle, E: T.handle, F: T.handle):\n",
    "            batch, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True)\n",
    "            cumsum_sorted = T.match_buffer(A, (batch, vocab_size))\n",
    "            indices = T.match_buffer(B, (batch, vocab_size), \"int64\")\n",
    "            renorm_prob = T.match_buffer(C, (batch, 1))\n",
    "            out_batch = T.int64(is_size_var=True)\n",
    "            usample = T.match_buffer(D, (out_batch, 1))\n",
    "            sample_indices = T.match_buffer(E, (out_batch, 1), \"int64\")\n",
    "            output_index = T.match_buffer(F, (out_batch, 1), \"int64\")\n",
    "            # with T.block(\"root\"):\n",
    "            for ax0, ax1 in T.grid(out_batch, vocab_size):\n",
    "                with T.block(\"T_get_index_from_sorted\"):\n",
    "                    v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                    T.reads(usample[v_ax0, T.int64(0)], cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1):v_ax1 - T.int64(1) + T.int64(2)], sample_indices[v_ax0, T.int64(0)], renorm_prob[sample_indices[v_ax0, T.int64(0)], 0], indices[sample_indices[v_ax0, T.int64(0)], T.min(T.int64(0), v_ax1):T.min(T.int64(0), v_ax1) + (T.max(T.int64(0), v_ax1) + T.int64(1) - T.min(T.int64(0), v_ax1))])\n",
    "                    T.writes(output_index[v_ax0, 0])\n",
    "                    if usample[v_ax0, T.int64(0)] < cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0] or v_ax1 + T.int64(1) == vocab_size:\n",
    "                        if v_ax1 == T.int64(0):\n",
    "                            output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], 0]\n",
    "                        else:\n",
    "                            if usample[v_ax0, T.int64(0)] >= cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1)] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0]:\n",
    "                                output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], v_ax1]\n",
    "\n",
    "        @T.prim_func(private=True)\n",
    "        def get_renorm_prob(A: T.handle, B: T.handle, C: T.handle, D: T.handle):\n",
    "            batch, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True)\n",
    "            cumsum_sorted = T.match_buffer(A, (batch, vocab_size))\n",
    "            top_p = T.match_buffer(B, (batch, 1))\n",
    "            top_k = T.match_buffer(C, (batch, 1), \"int64\")\n",
    "            renorm_prob = T.match_buffer(D, (batch, 1))\n",
    "            # with T.block(\"root\"):\n",
    "            for ax0, ax1 in T.grid(batch, vocab_size):\n",
    "                with T.block(\"T_get_renorm_prob\"):\n",
    "                    v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                    T.reads(cumsum_sorted[v_ax0, T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)):T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + (T.max(T.max(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + T.int64(1) - T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)))], top_p[v_ax0, 0], top_k[v_ax0, 0])\n",
    "                    T.writes(renorm_prob[v_ax0, 0])\n",
    "                    if not (cumsum_sorted[v_ax0, 0] < top_p[v_ax0, 0] and top_k[v_ax0, 0] > T.int64(1)):\n",
    "                        renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, 0]\n",
    "                    else:\n",
    "                        if cumsum_sorted[v_ax0, v_ax1] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) < top_k[v_ax0, 0]:\n",
    "                            if v_ax1 + T.int64(1) == vocab_size:\n",
    "                                renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1]\n",
    "                            else:\n",
    "                                if not (cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) + T.int64(1) < top_k[v_ax0, 0]):\n",
    "                                    renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1 + T.int64(1)]\n",
    "\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def foo(prob: R.Tensor((2, 3), dtype=\"float32\"), index: R.Tensor((2, 3), dtype=\"int64\"), top_p: R.Tensor((2, 1), dtype=\"float32\"), top_k: R.Tensor((2, 1), dtype=\"int64\"), uniform_sample: R.Tensor((3, 1), dtype=\"float32\"), sample_indices: R.Tensor((3, 1), dtype=\"int64\"), _io: R.Object,) -> R.Tuple(R.Tensor((3, 1), dtype=\"int64\"), R.Tuple(R.Object)):\n",
    "            R.func_attr({\"num_input\": 7})\n",
    "            cls = Expected\n",
    "            with R.dataflow():\n",
    "                cumsum: R.Tensor((2, 3), dtype=\"float32\") = R.cumsum(prob, axis=1, dtype=\"void\", exclusive=None)\n",
    "                lv1 = R.call_tir(cls.get_renorm_prob, (cumsum, top_p, top_k), out_sinfo=R.Tensor((2, 1), dtype=\"float32\"))\n",
    "                lv2 = R.call_tir(cls.get_index_from_sorted, (cumsum, index, lv1, uniform_sample, sample_indices), out_sinfo=R.Tensor((3, 1), dtype=\"int64\"))\n",
    "                gv1: R.Tuple(R.Tensor((3, 1), dtype=\"int64\"), R.Tuple(R.Object)) = lv2, (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    mod, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"foo\": {\n",
    "                \"prob\": spec.Tensor(prob_shape, \"float32\"),\n",
    "                \"index\": spec.Tensor(prob_shape, \"int64\"),\n",
    "                \"top_p\": spec.Tensor((prob_shape[0], 1), \"float32\"),\n",
    "                \"top_k\": spec.Tensor((prob_shape[0], 1), \"int64\"),\n",
    "                \"uniform_sample\": spec.Tensor(sample_shape, \"float32\"),\n",
    "                \"sample_indices\": spec.Tensor(sample_shape, \"int64\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(mod, Expected)\n",
    "\n",
    "    target = tvm.target.Target(\"cuda -libs=thrust\", host=\"llvm\")\n",
    "    with target:\n",
    "        mod = tir.transform.DefaultGPUSchedule()(mod)\n",
    "\n",
    "    ex = tvm.compile(mod, target)\n",
    "    dev = tvm.cuda(0)\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    effects = vm[\"_initialize_effect\"]()\n",
    "    sorted_prob = tvm.nd.array(np.array([[0.5, 0.4, 0.1], [0.4, 0.3, 0.3]]).astype(np.float32), dev)\n",
    "    indices = tvm.nd.array(np.array([[2, 1, 0], [2, 0, 1]]).astype(np.int64), dev)\n",
    "    top_p = tvm.nd.array(np.array([[0.6], [0.9]]).astype(np.float32), dev)\n",
    "    top_k = tvm.nd.array(np.array([[3], [2]]).astype(np.int64), dev)\n",
    "    usample = tvm.nd.array(np.array([[0.5], [0.6], [0.7]]).astype(np.float32), dev)\n",
    "    sample_indices = tvm.nd.array(np.array([[0], [1], [1]]).astype(np.int64), dev)\n",
    "\n",
    "    inputs = [sorted_prob, indices, top_p, top_k, usample, sample_indices, effects]\n",
    "\n",
    "    res = vm[\"foo\"](*inputs)\n",
    "    tvm.testing.assert_allclose(res[0].numpy(), np.array([[2], [0], [0]]).astype(np.int64))\n",
    "\n",
    "\n",
    "@tvm.testing.requires_gpu\n",
    "def test_renormalize_top_p_top_k_prob():\n",
    "    prob_shape = (2, 3)\n",
    "    sample_shape = (2, 1)\n",
    "\n",
    "    class Model(Module):\n",
    "        def foo(\n",
    "            self,\n",
    "            prob: Tensor,\n",
    "            sorted_prob: Tensor,\n",
    "            top_p: Tensor,\n",
    "            top_k: Tensor,\n",
    "        ):\n",
    "            z0 = op.renormalize_top_p_top_k_prob(prob, sorted_prob, top_p, top_k)\n",
    "            return z0\n",
    "\n",
    "    # fmt: off\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def filter_with_top_p_top_k(A: T.Buffer((T.int64(2), T.int64(3)), \"float32\"), B: T.Buffer((T.int64(2), T.int64(1)), \"float32\"), filter_with_top_p_top_k: T.Buffer((T.int64(2), T.int64(3)), \"float32\")):\n",
    "            T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "            # with T.block(\"root\"):\n",
    "            for i, j in T.grid(T.int64(2), T.int64(3)):\n",
    "                with T.block(\"filter_with_top_p_top_k\"):\n",
    "                    v_i, v_j = T.axis.remap(\"SS\", [i, j])\n",
    "                    T.reads(B[v_i, T.int64(0)], A[v_i, v_j])\n",
    "                    T.writes(filter_with_top_p_top_k[v_i, v_j])\n",
    "                    filter_with_top_p_top_k[v_i, v_j] = T.Select(B[v_i, T.int64(0)] <= A[v_i, v_j], A[v_i, v_j], T.float32(0))\n",
    "\n",
    "        @T.prim_func(private=True)\n",
    "        def get_renorm_cutoff(A: T.handle, B: T.handle, C: T.handle, D: T.handle, E: T.handle):\n",
    "            batch, vocab_size = T.int64(), T.int64()\n",
    "            sorted_prob = T.match_buffer(A, (batch, vocab_size))\n",
    "            cumsum_sorted = T.match_buffer(B, (batch, vocab_size))\n",
    "            top_p = T.match_buffer(C, (batch, 1))\n",
    "            top_k = T.match_buffer(D, (batch, 1), \"int64\")\n",
    "            cutoff = T.match_buffer(E, (batch, 1))\n",
    "            # with T.block(\"root\"):\n",
    "            for ax0, ax1 in T.grid(batch, vocab_size):\n",
    "                with T.block(\"T_get_renorm_prob\"):\n",
    "                    v_ax0, v_ax1 = T.axis.remap(\"SS\", [ax0, ax1])\n",
    "                    T.reads(cumsum_sorted[v_ax0, T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)):T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + (T.max(T.max(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + T.int64(1) - T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)))], top_p[v_ax0, 0], top_k[v_ax0, 0], sorted_prob[v_ax0, T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)):T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + (T.max(T.max(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + T.int64(1) - T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)))])\n",
    "                    T.writes(cutoff[v_ax0, 0])\n",
    "                    if (cumsum_sorted[v_ax0, 0] < top_p[v_ax0, 0] and top_k[v_ax0, 0] > T.int64(1)) == T.bool(False):\n",
    "                        cutoff[v_ax0, 0] = sorted_prob[v_ax0, 0]\n",
    "                    else:\n",
    "                        if (cumsum_sorted[v_ax0, v_ax1] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) < top_k[v_ax0, 0]) == T.bool(True):\n",
    "                            if v_ax1 + T.int64(1) == vocab_size:\n",
    "                                cutoff[v_ax0, 0] = sorted_prob[v_ax0, v_ax1]\n",
    "                            else:\n",
    "                                if (cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) + T.int64(1) < top_k[v_ax0, 0]) == T.bool(False):\n",
    "                                    cutoff[v_ax0, 0] = sorted_prob[v_ax0, v_ax1 + T.int64(1)]\n",
    "\n",
    "        @R.function\n",
    "        def _initialize_effect() -> R.Tuple(R.Object):\n",
    "            with R.dataflow():\n",
    "                _io: R.Object = R.null_value()\n",
    "                lv: R.Tuple(R.Object) = (_io,)\n",
    "                gv: R.Tuple(R.Object) = lv\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def foo(prob: R.Tensor((2, 3), dtype=\"float32\"), sorted_prob: R.Tensor((2, 3), dtype=\"float32\"), top_p: R.Tensor((2, 1), dtype=\"float32\"), top_k: R.Tensor((2, 1), dtype=\"int64\"), _io: R.Object) -> R.Tuple(R.Tensor((2, 3), dtype=\"float32\"), R.Tuple(R.Object)):\n",
    "            R.func_attr({\"num_input\": 5})\n",
    "            cls = Expected\n",
    "            with R.dataflow():\n",
    "                cumsum: R.Tensor((2, 3), dtype=\"float32\") = R.cumsum(sorted_prob, axis=1, dtype=\"void\", exclusive=None)\n",
    "                lv1 = R.call_tir(cls.get_renorm_cutoff, (sorted_prob, cumsum, top_p, top_k), out_sinfo=R.Tensor((2, 1), dtype=\"float32\"))\n",
    "                lv2 = R.call_tir(cls.filter_with_top_p_top_k, (prob, lv1), out_sinfo=R.Tensor((2, 3), dtype=\"float32\"))\n",
    "                sum: R.Tensor((2, 1), dtype=\"float32\") = R.sum(lv2, axis=[1], keepdims=True)\n",
    "                divide: R.Tensor((2, 3), dtype=\"float32\") = R.divide(lv2, sum)\n",
    "                gv1: R.Tuple(R.Tensor((2, 3), dtype=\"float32\"), R.Tuple(R.Object)) = divide, (_io,)\n",
    "                R.output(gv1)\n",
    "            return gv1\n",
    "    # fmt: on\n",
    "\n",
    "    m = Model()\n",
    "    mod, _ = m.export_tvm(\n",
    "        spec={\n",
    "            \"foo\": {\n",
    "                \"prob\": spec.Tensor(prob_shape, \"float32\"),\n",
    "                \"sorted_prob\": spec.Tensor(prob_shape, \"float32\"),\n",
    "                \"top_p\": spec.Tensor(sample_shape, \"float32\"),\n",
    "                \"top_k\": spec.Tensor(sample_shape, \"int64\"),\n",
    "            }\n",
    "        },\n",
    "        debug=True,\n",
    "    )\n",
    "\n",
    "    tvm.ir.assert_structural_equal(mod, Expected)\n",
    "\n",
    "    target = tvm.target.Target(\"cuda -libs=thrust\", host=\"llvm\")\n",
    "    with target:\n",
    "        mod = relax.transform.LegalizeOps()(mod)\n",
    "        mod = tir.transform.DefaultGPUSchedule()(mod)\n",
    "\n",
    "    ex = tvm.compile(mod, target)\n",
    "    dev = tvm.cuda(0)\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    effects = vm[\"_initialize_effect\"]()\n",
    "    prob = tvm.nd.array(np.array([[0.2, 0.3, 0.5], [0.3, 0.3, 0.4]]).astype(np.float32), dev)\n",
    "    sorted_prob = tvm.nd.array(np.array([[0.5, 0.3, 0.2], [0.4, 0.3, 0.3]]).astype(np.float32), dev)\n",
    "    top_p = tvm.nd.array(np.array([[0.6], [0.9]]).astype(np.float32), dev)\n",
    "    top_k = tvm.nd.array(np.array([[3], [2]]).astype(np.int64), dev)\n",
    "\n",
    "    inputs = [prob, sorted_prob, top_p, top_k, effects]\n",
    "\n",
    "    res = vm[\"foo\"](*inputs)\n",
    "    tvm.testing.assert_allclose(\n",
    "        res[0].numpy(), np.array([[0, 0.375, 0.625], [0.3, 0.3, 0.4]]).astype(np.float32)\n",
    "    )\n",
    "\n",
    "\n",
    "def test_sort_argsort_topk():\n",
    "    class Model(Module):\n",
    "        def foo(self, x: Tensor):\n",
    "            z0 = op.sort(x, axis=-1, descending=True)\n",
    "            z1 = op.argsort(x, axis=-1, descending=False)\n",
    "            z2 = op.topk(x, k=2, axis=-1)\n",
    "            return z0, z1, z2\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @R.function\n",
    "        def foo(x: R.Tensor((\"seq_len\", 64), dtype=\"float16\")):\n",
    "            R.func_attr({\"num_input\": 1})\n",
    "            with R.dataflow():\n",
    "                sort = R.sort(x, axis=-1, descending=True)\n",
    "                argsort = R.argsort(x, axis=-1, descending=False, dtype=\"int32\")\n",
    "                topk = R.topk(x, k=2, axis=-1, ret_type=\"both\", largest=True, dtype=\"int32\")\n",
    "                topk_0 = topk[0]\n",
    "                topk_1 = topk[1]\n",
    "                gv = sort, argsort, (topk_0, topk_1)\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "    m = Model()\n",
    "    mod, _ = m.export_tvm({\"foo\": {\"x\": spec.Tensor((\"seq_len\", 64), \"float16\")}})\n",
    "\n",
    "    tvm.ir.assert_structural_equal(mod, Expected)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
