{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "552b6d86",
   "metadata": {},
   "source": [
    "# RunCodegen"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "904609b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import tempfile\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import relax, tir\n",
    "from tvm.relax.dpl import is_op, wildcard\n",
    "from tvm.relax.testing import transform\n",
    "from tvm.relax.transform.tuning_api import Trace\n",
    "from tvm.script import ir as I\n",
    "from tvm.script import relax as R\n",
    "from tvm.script import tir as T\n",
    "\n",
    "env_checker_codegen = tvm.get_global_func(\"relax.ext.tensorrt\", True)\n",
    "env_checker_runtime = tvm.get_global_func(\"relax.is_tensorrt_runtime_enabled\", True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4ee88a1f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Target gpu\n",
    "target_str = \"nvidia/nvidia-t4\"\n",
    "target = tvm.target.Target(target_str)\n",
    "dev = tvm.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "a1253458",
   "metadata": {},
   "outputs": [],
   "source": [
    "def check_executable(exec, dev, inputs, expected, entry_func_name):\n",
    "    vm = relax.VirtualMachine(exec, dev)\n",
    "    out = vm[entry_func_name](*inputs)\n",
    "    tvm.testing.assert_allclose(out.numpy(), expected.numpy(), atol=1e-5, rtol=1e-5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "38095524",
   "metadata": {},
   "outputs": [],
   "source": [
    "def check_roundtrip(exec0, dev, inputs, expected, entry_func_name=\"main\"):\n",
    "    exec0.mod.export_library(\"exec.so\")\n",
    "    exec1 = tvm.runtime.load_module(\"exec.so\")\n",
    "    os.remove(\"exec.so\")\n",
    "    assert exec0.stats() == exec1[\"stats\"]()\n",
    "    assert exec0.as_text() == exec1[\"as_text\"]()\n",
    "\n",
    "    check_executable(exec0, dev, inputs, expected, entry_func_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "ed4a7429",
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_ground_truth(mod, target, dev, inputs):\n",
    "    # Lower and run tuning\n",
    "    # Since there is no default schedule for GPU in MS yet, this is necessary\n",
    "    with target:\n",
    "        seq = tvm.transform.Sequential(\n",
    "            [relax.transform.LegalizeOps(), tir.transform.DefaultGPUSchedule()]\n",
    "        )\n",
    "        new_mod = seq(mod)\n",
    "    assert relax.analysis.well_formed(new_mod)\n",
    "    exec = tvm.compile(new_mod, target, params={})\n",
    "    vm = relax.VirtualMachine(exec, dev)\n",
    "    return vm[\"main\"](*inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "04d01eed",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "@tvm.script.ir_module\n",
    "class InputModule:\n",
    "    @R.function\n",
    "    def main(\n",
    "        x: R.Tensor((16, 16), \"float32\"), y: R.Tensor((16, 16), \"float32\")\n",
    "    ) -> R.Tensor((16, 16), \"float32\"):\n",
    "        with R.dataflow():\n",
    "            z1 = R.multiply(x, y)\n",
    "            z2 = R.add(z1, x)\n",
    "            z3 = R.add(z1, z2)\n",
    "            z4 = R.multiply(z3, z2)\n",
    "            z5 = R.add(z4, z1)\n",
    "            R.output(z5)\n",
    "        return z5\n",
    "\n",
    "\n",
    "def setup_test():\n",
    "    # Prepare IRModule and its input\n",
    "    mod = InputModule\n",
    "    assert isinstance(mod, tvm.IRModule)\n",
    "\n",
    "    np0 = np.random.rand(16, 16).astype(np.float32)\n",
    "    np1 = np.random.rand(16, 16).astype(np.float32)\n",
    "    data0 = tvm.nd.array(np0, dev)\n",
    "    data1 = tvm.nd.array(np1, dev)\n",
    "    inputs = [data0, data1]\n",
    "\n",
    "    # Ground truth should be generated before annotation\n",
    "    # due to the conflict with MS task extraction\n",
    "    # TODO(@sunggg): Sort this out\n",
    "    expected = gen_ground_truth(mod, target, dev, inputs)\n",
    "    return mod, inputs, expected\n",
    "\n",
    "\n",
    "entry_func_name = tvm.testing.parameter(\"main\", \"func\")\n",
    "\n",
    "\n",
    "@tvm.testing.requires_gpu\n",
    "@requires_tensorrt_runtime\n",
    "def test_tensorrt_only(entry_func_name):\n",
    "    mod, inputs, expected = setup_test()\n",
    "\n",
    "    if entry_func_name != \"main\":\n",
    "        mod[entry_func_name] = mod\n",
    "        del mod[\"main\"]\n",
    "\n",
    "    # Define patterns that we want to offload to byoc\n",
    "    # This test will offload entire model\n",
    "    # Thus, define patterns for both `multiply` and `add` ops\n",
    "    patterns = [\n",
    "        (\"tensorrt.multiply\", is_op(\"relax.multiply\")(wildcard(), wildcard())),\n",
    "        (\"tensorrt.add\", is_op(\"relax.add\")(wildcard(), wildcard())),\n",
    "    ]\n",
    "\n",
    "    new_mod = tvm.transform.Sequential(\n",
    "        [\n",
    "            relax.transform.FuseOpsByPattern(patterns),\n",
    "            relax.transform.MergeCompositeFunctions(),\n",
    "            relax.transform.RunCodegen(),\n",
    "        ]\n",
    "    )(mod)\n",
    "\n",
    "    ex0 = tvm.compile(new_mod, target, params={})\n",
    "    # Sanity check for the correctness and roundtrip\n",
    "    check_roundtrip(ex0, dev, inputs, expected, entry_func_name)\n",
    "\n",
    "\n",
    "@tvm.testing.requires_gpu\n",
    "@requires_tensorrt_runtime\n",
    "def test_mix_use_tensorrt_and_tvm():\n",
    "    mod, inputs, expected = setup_test()\n",
    "\n",
    "    # Define patterns that we want to offload to byoc\n",
    "    # This test will only offload `add` op to tensorrt\n",
    "    # and tune `multiply` op with MetaSchedule\n",
    "    patterns = [\n",
    "        (\"tensorrt.add\", is_op(\"relax.add\")(wildcard(), wildcard())),\n",
    "    ]\n",
    "\n",
    "    # Run Codegen pass\n",
    "    with tempfile.TemporaryDirectory() as work_dir:\n",
    "        with target, tvm.transform.PassContext(trace=Trace(mod), opt_level=0):\n",
    "            new_mod = tvm.transform.Sequential(\n",
    "                [\n",
    "                    relax.transform.FuseOpsByPattern(patterns),\n",
    "                    relax.transform.MergeCompositeFunctions(),\n",
    "                    relax.transform.RunCodegen(),\n",
    "                    relax.transform.LegalizeOps(),\n",
    "                    relax.transform.MetaScheduleTuneIRMod(\n",
    "                        params={}, work_dir=work_dir, max_trials_global=8\n",
    "                    ),\n",
    "                    relax.transform.MetaScheduleApplyDatabase(work_dir),\n",
    "                ]\n",
    "            )(mod)\n",
    "    assert relax.analysis.well_formed(new_mod)\n",
    "    with transform.PassContext(opt_level=0):\n",
    "        ex0 = tvm.compile(new_mod, target, params={})\n",
    "\n",
    "    # Sanity check for the correctness and roundtrip\n",
    "    check_roundtrip(ex0, dev, inputs, expected)\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class Conv2dx2:\n",
    "    @R.function\n",
    "    def main(\n",
    "        data: R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "        weight1: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "        weight2: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "    ) -> R.Tensor((16, 32, 32, 16), dtype=\"float16\"):\n",
    "        cls = Conv2dx2\n",
    "        with R.dataflow():\n",
    "            lv: R.Tensor((16, 32, 32, 16), dtype=\"float16\") = cls.fused_relax_nn_conv2d_tensorrt(\n",
    "                data, weight1\n",
    "            )\n",
    "            gv: R.Tensor((16, 32, 32, 16), dtype=\"float16\") = cls.fused_relax_nn_conv2d_tensorrt(\n",
    "                lv, weight2\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "    @R.function\n",
    "    def fused_relax_nn_conv2d_tensorrt(\n",
    "        data: R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "        weight1: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "    ) -> R.Tensor((16, 32, 32, 16), dtype=\"float16\"):\n",
    "        R.func_attr({\"Codegen\": \"tensorrt\", \"global_symbol\": \"fused_relax_nn_conv2d_tensorrt\"})\n",
    "\n",
    "        @R.function\n",
    "        def gv(\n",
    "            data_1: R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "            weight1_1: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "        ) -> R.Tensor((16, 32, 32, 16), dtype=\"float16\"):\n",
    "            R.func_attr({\"Composite\": \"tensorrt.conv2d\", \"Primitive\": 1})\n",
    "            with R.dataflow():\n",
    "                gv_1: R.Tensor((16, 32, 32, 16), dtype=\"float16\") = R.nn.conv2d(\n",
    "                    data_1,\n",
    "                    weight1_1,\n",
    "                    padding=[1, 1, 1, 1],\n",
    "                    data_layout=\"NHWC\",\n",
    "                    kernel_layout=\"OHWI\",\n",
    "                    out_layout=\"NHWC\",\n",
    "                )\n",
    "                R.output(gv_1)\n",
    "            return gv_1\n",
    "\n",
    "        gv1: R.Tensor((16, 32, 32, 16), dtype=\"float16\") = gv(data, weight1)\n",
    "        return gv1\n",
    "\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class Conv2dx2_after:\n",
    "    @R.function\n",
    "    def main(\n",
    "        data: R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "        weight1: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "        weight2: R.Tensor((16, 3, 3, 16), dtype=\"float16\"),\n",
    "    ) -> R.Tensor((16, 32, 32, 16), dtype=\"float16\"):\n",
    "        with R.dataflow():\n",
    "            lv = R.call_dps_packed(\n",
    "                \"fused_relax_nn_conv2d_tensorrt\",\n",
    "                (data, weight1),\n",
    "                out_sinfo=R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "            )\n",
    "            gv = R.call_dps_packed(\n",
    "                \"fused_relax_nn_conv2d_tensorrt\",\n",
    "                (lv, weight2),\n",
    "                out_sinfo=R.Tensor((16, 32, 32, 16), dtype=\"float16\"),\n",
    "            )\n",
    "            R.output(gv)\n",
    "        return gv\n",
    "\n",
    "\n",
    "def test_multiple_calls_same_extern():\n",
    "    mod = relax.transform.RunCodegen()(Conv2dx2)\n",
    "    tvm.ir.assert_structural_equal(mod[\"main\"], Conv2dx2_after[\"main\"])\n",
    "\n",
    "\n",
    "def test_default_entry_func():\n",
    "    \"\"\"The entry function is not necessarily named \"main\"\n",
    "\n",
    "    Like `test_multiple_calls_same_extern`, but the main function is\n",
    "    named \"func\".\n",
    "    \"\"\"\n",
    "    before_with_main = Conv2dx2\n",
    "    after_with_main = relax.transform.RunCodegen()(before_with_main)\n",
    "\n",
    "    def rename_main(mod):\n",
    "        mod = mod.clone()\n",
    "        mod[\"func\"] = mod[\"main\"].with_attr(\"global_symbol\", \"func\")\n",
    "        del mod[\"main\"]\n",
    "        return mod\n",
    "\n",
    "    before_with_func = rename_main(before_with_main)\n",
    "    expected_with_func = rename_main(after_with_main)\n",
    "    after_with_func = relax.transform.RunCodegen()(before_with_func)\n",
    "\n",
    "    tvm.ir.assert_structural_equal(expected_with_func[\"func\"], after_with_func[\"func\"])\n",
    "\n",
    "\n",
    "def test_dynamic_shape():\n",
    "    import tvm.relax.backend.cuda.cublas\n",
    "\n",
    "    @I.ir_module\n",
    "    class Before:\n",
    "        @R.function\n",
    "        def main(\n",
    "            x: R.Tensor((1, 4096), dtype=\"float16\"),\n",
    "            w1: R.Tensor((4096, \"r1\"), dtype=\"float16\"),\n",
    "            w2: R.Tensor((4096, \"r2\"), dtype=\"float16\"),\n",
    "        ) -> R.Tuple(R.Tensor((1, \"r1\"), dtype=\"float16\"), R.Tensor((1, \"r2\"), dtype=\"float16\")):\n",
    "            r1 = T.int64()\n",
    "            r2 = T.int64()\n",
    "            cls = Before\n",
    "            with R.dataflow():\n",
    "                lv: R.Tensor((1, r1), dtype=\"float16\") = cls.fused_relax_matmul_cublas(x, w1)\n",
    "                lv1: R.Tensor((1, r2), dtype=\"float16\") = cls.fused_relax_matmul_cublas(x, w2)\n",
    "                gv: R.Tuple(\n",
    "                    R.Tensor((1, r1), dtype=\"float16\"), R.Tensor((1, r2), dtype=\"float16\")\n",
    "                ) = (lv, lv1)\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "        @R.function\n",
    "        def fused_relax_matmul_cublas(\n",
    "            x: R.Tensor((1, 4096), dtype=\"float16\"), w1: R.Tensor((4096, \"r1\"), dtype=\"float16\")\n",
    "        ) -> R.Tensor((1, \"r1\"), dtype=\"float16\"):\n",
    "            r1 = T.int64()\n",
    "            R.func_attr({\"Codegen\": \"cublas\"})\n",
    "\n",
    "            @R.function\n",
    "            def gv(\n",
    "                x_1: R.Tensor((1, 4096), dtype=\"float16\"),\n",
    "                w1_1: R.Tensor((4096, r1), dtype=\"float16\"),\n",
    "            ) -> R.Tensor((1, r1), dtype=\"float16\"):\n",
    "                R.func_attr({\"Composite\": \"cublas.matmul\"})\n",
    "                with R.dataflow():\n",
    "                    gv_1: R.Tensor((1, r1), dtype=\"float16\") = R.matmul(x_1, w1_1, out_dtype=\"void\")\n",
    "                    R.output(gv_1)\n",
    "                return gv_1\n",
    "\n",
    "            gv1: R.Tensor((1, r1), dtype=\"float16\") = gv(x, w1)\n",
    "            return gv1\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @R.function\n",
    "        def main(\n",
    "            x: R.Tensor((1, 4096), dtype=\"float16\"),\n",
    "            w1: R.Tensor((4096, \"r1\"), dtype=\"float16\"),\n",
    "            w2: R.Tensor((4096, \"r2\"), dtype=\"float16\"),\n",
    "        ) -> R.Tuple(R.Tensor((1, \"r1\"), dtype=\"float16\"), R.Tensor((1, \"r2\"), dtype=\"float16\")):\n",
    "            r1 = T.int64()\n",
    "            r2 = T.int64()\n",
    "            with R.dataflow():\n",
    "                lv = R.call_dps_packed(\n",
    "                    \"fused_relax_matmul_cublas\",\n",
    "                    (x, w1),\n",
    "                    out_sinfo=R.Tensor((1, r1), dtype=\"float16\"),\n",
    "                )\n",
    "                lv1 = R.call_dps_packed(\n",
    "                    \"fused_relax_matmul_cublas\",\n",
    "                    (x, w2),\n",
    "                    out_sinfo=R.Tensor((1, r2), dtype=\"float16\"),\n",
    "                )\n",
    "                gv: R.Tuple(\n",
    "                    R.Tensor((1, r1), dtype=\"float16\"), R.Tensor((1, r2), dtype=\"float16\")\n",
    "                ) = (lv, lv1)\n",
    "                R.output(gv)\n",
    "            return gv\n",
    "\n",
    "    after = relax.transform.RunCodegen()(Before)\n",
    "    tvm.ir.assert_structural_equal(after[\"main\"], Expected[\"main\"])\n",
    "\n",
    "\n",
    "def test_no_op_for_call_to_tir():\n",
    "    \"\"\"Calls to PrimFunc are ignored\n",
    "\n",
    "    RunCodegen should only update calls to Relax functions annotated\n",
    "    with the `\"Codegen\"` attribute.  Calls to any other function type\n",
    "    should be ignored.\n",
    "\n",
    "    This is a regression test.  Previous implementations performed an\n",
    "    unconditional cast from `tvm::BaseFunc` to `tvm::relax::Function`,\n",
    "    which produced an error.\n",
    "    \"\"\"\n",
    "\n",
    "    @tvm.script.ir_module\n",
    "    class Before:\n",
    "        @R.function\n",
    "        def main(x: R.Tensor([4], \"int64\")):\n",
    "            R.func_attr({\"relax.force_pure\": True})\n",
    "            _ = Before.shape_func(x)\n",
    "            return x\n",
    "\n",
    "        @T.prim_func(private=True)\n",
    "        def shape_func(H: T.Buffer(T.int64(4), \"int64\")):\n",
    "            H[T.int64(0)] = H[T.int64(0)] + T.int64(1)\n",
    "\n",
    "    Expected = Before\n",
    "    After = relax.transform.RunCodegen()(Before)\n",
    "    tvm.ir.assert_structural_equal(Expected, After)\n",
    "\n",
    "\n",
    "# TODO(@sunggg):  test with more complex patterns (e.g., multiple annots, mixed codegens, different ops, const binding)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    pytest.main([__file__])\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
