{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "7a7ed5f4",
   "metadata": {},
   "source": [
    "# VM 代码生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2a06a7f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入必要的包\n",
    "import numpy as np  # 用于生成随机数据和数组操作\n",
    "import pytest       # 测试框架\n",
    "import tvm          # TVM核心库\n",
    "import tvm.testing  # TVM测试工具\n",
    "from tvm import relax  # Relax框架\n",
    "from tvm.relax.testing.runtime_builtin import MakeShapeCode, MatchShapeCode  # 运行时内置函数支持\n",
    "from tvm.relax.testing.vm import check_saved_func  # VM测试辅助函数\n",
    "from tvm.script import ir as I  # IR脚本支持\n",
    "from tvm.script import relax as R  # Relax脚本支持\n",
    "from tvm.script import tir as T  # TIR脚本支持"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c234a330",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 代码生成辅助函数\n",
    "def codegen(mod, target, exec_mode=\"bytecode\"):\n",
    "    \"\"\"\n",
    "    将Relax IR模块转换为可执行的VM代码\n",
    "    \n",
    "    参数:\n",
    "        mod: 输入的IR模块\n",
    "        target: 目标执行平台\n",
    "        exec_mode: 执行模式，可选值为\"bytecode\"（字节码模式）或\"compiled\"（编译模式）\n",
    "    \n",
    "    返回值:\n",
    "        编译后的可执行模块\n",
    "    \"\"\"\n",
    "    builder = relax.ExecBuilder()\n",
    "    # 通过VM代码生成器将模块转换为TIR模块\n",
    "    tir_mod = relax.vm_build._vmcodegen(builder, mod, exec_mode=exec_mode)\n",
    "    # 将TIR模块链接成可执行模块\n",
    "    return relax.vm_build._vmlink(builder, target, tir_mod)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "4055a3ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义执行模式参数化列表，测试将在字节码模式和编译模式下分别运行\n",
    "EXEC_MODE = [\"bytecode\", \"compiled\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d85cc0f7",
   "metadata": {},
   "source": [
    "## 测试 VM 内置的复制算子功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0cdfb3a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMMove:\n",
    "    @R.function(pure=False)\n",
    "    def foo(x: R.Tensor((3, 4), \"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"foo\"})  # 设置函数的全局符号名称\n",
    "        # 调用VM内置的复制函数\n",
    "        z = R.call_packed(\"vm.builtin.copy\", x, sinfo_args=(R.Tensor((3, 4), dtype=\"float32\")))\n",
    "        return z"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "dbff9c98",
   "metadata": {},
   "outputs": [],
   "source": [
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMMove\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")  # 设置目标平台为LLVM\n",
    "    ex = codegen(mod, target, exec_mode)  # 生成可执行模块\n",
    "    inp = tvm.nd.array(np.random.rand(3, 4).astype(np.float32))  # 生成随机测试数据\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())  # 创建虚拟机实例\n",
    "    res = check_saved_func(vm, \"foo\", inp)  # 检查保存的函数执行结果\n",
    "    # 验证复制结果与原始数据是否一致\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy(), rtol=1e-7, atol=1e-7)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c55042bd",
   "metadata": {},
   "source": [
    "## 测试 VM 设备间数据传输"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "33e66d6c",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMToDevice:\n",
    "    @R.function(pure=False)\n",
    "    def foo(x: R.Tensor((3, 4), \"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"foo\"})\n",
    "        # 将x复制到第一个CPU：device_type=1和device_id=0\n",
    "        z = R.call_packed(\n",
    "            \"vm.builtin.to_device\", x, 1, 0, sinfo_args=(R.Tensor((3, 4), dtype=\"float32\"))\n",
    "        )\n",
    "        return z\n",
    "        \n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMToDevice\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    inp = tvm.nd.array(np.random.rand(3, 4).astype(np.float32))\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    res = check_saved_func(vm, \"foo\", inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy(), rtol=1e-7, atol=1e-7)\n",
    "    # 检查结果张量是否在cpu:0上\n",
    "    assert res.device == tvm.cpu(0)\n",
    "    assert res.device.device_type == 1\n",
    "    assert res.device.device_id == 0"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "23631f2f",
   "metadata": {},
   "source": [
    "## 测试常量条件的if语句"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "b4eb41d3",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMIfCondConst:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor(ndim=2, dtype=\"float32\")) -> R.Tensor(ndim=2, dtype=\"float32\"):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        # 使用常量布尔值作为条件\n",
    "        if relax.const(True, dtype=\"bool\"):\n",
    "            ret = x\n",
    "        else:\n",
    "            ret = x\n",
    "        return ret\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMIfCondConst\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    inp = tvm.nd.array(np.random.rand(3, 4))\n",
    "    res = vm[\"main\"](inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "efda4100",
   "metadata": {},
   "source": [
    "## 测试VM可执行文件的序列化和导出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "047fcc14",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMMove:\n",
    "    @R.function(pure=False)\n",
    "    def foo(x: R.Tensor((3, 4), \"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"foo\"})\n",
    "        z = R.call_packed(\"vm.builtin.copy\", x, sinfo_args=(R.Tensor((3, 4), dtype=\"float32\")))\n",
    "        return z\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMMove\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target)\n",
    "    from tvm.contrib import utils\n",
    "\n",
    "    temp_dir = utils.tempdir()  # 创建临时目录\n",
    "    path_exec = temp_dir.relpath(\"exec.so\")  # 设置导出路径\n",
    "    ex.export_library(path_exec)  # 导出库文件\n",
    "\n",
    "    loaded_exec = tvm.runtime.load_module(path_exec)  # 加载导出的库文件\n",
    "    # 验证加载的库与原始库内容是否一致\n",
    "    assert ex.as_text() == loaded_exec[\"as_text\"]()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb6c30b7",
   "metadata": {},
   "source": [
    "## 测试条件分支执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "fda09a09",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMCompileIf:\n",
    "    @R.function(pure=False)\n",
    "    def ife(cond: R.Tensor((), \"bool\"), x: R.Tensor((3, 4), \"float32\")) -> R.Tensor:\n",
    "        R.func_attr({\"global_symbol\": \"ife\"})\n",
    "        # 根据条件执行不同的操作\n",
    "        if cond:\n",
    "            w = R.call_packed(\"test.vm.add\", x, x, sinfo_args=(R.Tensor))\n",
    "        else:\n",
    "            w = R.call_packed(\"test.vm.mul\", x, x, sinfo_args=(R.Tensor))\n",
    "        return w\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMCompileIf\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    inp = tvm.nd.array(np.random.rand(3, 4))\n",
    "    # 测试条件为整数1（True）的情况\n",
    "    res = vm[\"ife\"](tvm.nd.array(1), inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy() + inp.numpy(), rtol=1e-7, atol=1e-7)\n",
    "    # 测试条件为布尔True的情况\n",
    "    res = vm[\"ife\"](tvm.nd.array(True), inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy() + inp.numpy(), rtol=1e-7, atol=1e-7)\n",
    "    # 测试条件为整数0（False）的情况\n",
    "    res = vm[\"ife\"](tvm.nd.array(0), inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy() * inp.numpy(), rtol=1e-7, atol=1e-7)\n",
    "    # 测试条件为布尔False的情况\n",
    "    res = vm[\"ife\"](tvm.nd.array(False), inp)\n",
    "    tvm.testing.assert_allclose(res.numpy(), inp.numpy() * inp.numpy(), rtol=1e-7, atol=1e-7)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "76239e55",
   "metadata": {},
   "source": [
    "## 测试VM返回常量元组"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "3eb519a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class ReturnConstTuple:\n",
    "    @R.function\n",
    "    def main(x: R.Tensor(ndim=2, dtype=\"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        y = R.const([1, 2])  # 创建常量数组\n",
    "        z = (y, R.const([3, 4]), x)  # 创建包含常量和变量的元组\n",
    "        return z\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = ReturnConstTuple\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    inp = tvm.nd.array(np.random.rand(2, 3))\n",
    "    res0, res1, res2 = vm[\"main\"](inp)  # 解包返回的元组\n",
    "    # 验证各个返回值是否符合预期\n",
    "    tvm.testing.assert_allclose(res0.numpy(), np.array([1, 2]))\n",
    "    tvm.testing.assert_allclose(res1.numpy(), np.array([3, 4]))\n",
    "    tvm.testing.assert_allclose(res2.numpy(), inp.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e836219d",
   "metadata": {},
   "source": [
    "## 测试常量作为函数调用参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "5da89e99",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMConstAsCallArg:\n",
    "    @R.function(pure=False)\n",
    "    def main(x: R.Tensor(ndim=2, dtype=\"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        # 调用函数并传递常量作为参数\n",
    "        a = R.call_packed(\n",
    "            \"test.vm.add\",\n",
    "            relax.const([1, 2]),\n",
    "            relax.const([3, 4]),\n",
    "            sinfo_args=(R.Tensor(ndim=2, dtype=\"float32\")),\n",
    "        )\n",
    "        b = R.call_packed(\n",
    "            \"test.vm.add\",\n",
    "            a,\n",
    "            x,\n",
    "            sinfo_args=(R.Tensor(ndim=2, dtype=\"float32\")),\n",
    "        )\n",
    "        return b\n",
    "\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMConstAsCallArg\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    inp = tvm.nd.array(np.random.rand(1, 2))\n",
    "    res = vm[\"main\"](inp)\n",
    "    # 验证计算结果是否正确：常量加法[1,2]+[3,4]=[4,6]，再加上输入张量\n",
    "    tvm.testing.assert_allclose(res.numpy(), np.array([4, 6]) + inp.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6237e44",
   "metadata": {},
   "source": [
    "## 测试形状检查内置函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "2644a914",
   "metadata": {},
   "outputs": [],
   "source": [
    "MS = MatchShapeCode  # 形状匹配代码\n",
    "MK = MakeShapeCode   # 形状创建代码\n",
    "# 形状变量在形状堆中的槽位分配\n",
    "# 0: n, 1: m\n",
    "sindex = {\"n\": 0, \"m\": 1}\n",
    "\n",
    "@tvm.script.ir_module\n",
    "class TestVMShapeCheck:\n",
    "    @R.function(pure=False)\n",
    "    def main(x: R.Tensor([\"n\", \"m\"], \"float32\")) -> R.Shape(ndim=3):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        n = T.int64()\n",
    "        k = T.int64()\n",
    "        # 分配形状堆\n",
    "        shape_heap = R.call_builtin_with_ctx(\n",
    "            \"vm.builtin.alloc_shape_heap\",\n",
    "            [R.prim_value(3)],\n",
    "            sinfo_args=[R.Tensor(ndim=1, dtype=\"int64\")],\n",
    "        )\n",
    "        # 检查张量信息\n",
    "        _ = R.call_packed(\n",
    "            \"vm.builtin.check_tensor_info\", x, 2, R.dtype(\"float32\"), \"\", sinfo_args=[R.Tuple()]\n",
    "        )\n",
    "        # 匹配张量形状并存储到形状堆中\n",
    "        _ = R.call_packed(\n",
    "            \"vm.builtin.match_shape\",\n",
    "            x,\n",
    "            shape_heap,\n",
    "            2,\n",
    "            MS.STORE_TO_HEAP,\n",
    "            sindex[\"n\"],\n",
    "            MS.STORE_TO_HEAP,\n",
    "            sindex[\"m\"],\n",
    "            \"\",\n",
    "            sinfo_args=[R.Tuple()],\n",
    "        )\n",
    "        # 构造返回的形状值\n",
    "        s = R.call_packed(\n",
    "            \"vm.builtin.make_shape\",\n",
    "            shape_heap,\n",
    "            3,\n",
    "            MK.LOAD_SHAPE,\n",
    "            sindex[\"m\"],\n",
    "            MK.LOAD_SHAPE,\n",
    "            sindex[\"n\"],\n",
    "            MK.USE_IMM,\n",
    "            2,\n",
    "            sinfo_args=[R.Shape(ndim=3)],\n",
    "        )\n",
    "        return s\n",
    "\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMShapeCheck\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    # 测试正常情况：输入形状为(1, 2)的张量\n",
    "    x = tvm.nd.array(np.zeros((1, 2)).astype(\"float32\"))\n",
    "    res = vm[\"main\"](x)\n",
    "    # 预期返回形状为(m, n, 2)，即(2, 1, 2)\n",
    "    assert res == tvm.runtime.container.ShapeTuple([2, 1, 2])\n",
    "\n",
    "    # 测试错误情况：错误的输入类型\n",
    "    with pytest.raises(TypeError):\n",
    "        vm[\"main\"]([])\n",
    "\n",
    "    # 测试错误情况：错误的维度数\n",
    "    with pytest.raises(ValueError, match=r\".*ndim.*\"):\n",
    "        vm[\"main\"](tvm.nd.array(np.zeros(1).astype(\"float32\")))\n",
    "\n",
    "    # 测试错误情况：错误的数据类型\n",
    "    with pytest.raises(ValueError, match=r\".*dtype.*\"):\n",
    "        vm[\"main\"](tvm.nd.array(np.zeros((1, 2)).astype(\"int32\")))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07b4a09f",
   "metadata": {},
   "source": [
    "## 测试原语值处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "8cab2ce3",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMPrimValue:\n",
    "    @R.function\n",
    "    def main():\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        ret = R.prim_value(T.int64(1))  # 创建原语整数值\n",
    "        return ret\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMPrimValue\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    res = vm[\"main\"]()\n",
    "    assert res == 1"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f4ea9b0c",
   "metadata": {},
   "source": [
    "## 测试字符串常量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "32e4e5f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMStringImm:\n",
    "    @R.function\n",
    "    def main():\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        ret = R.str(\"hello\")  # 创建一个字符串常量\n",
    "        return ret\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMStringImm\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    res = vm[\"main\"]()\n",
    "    assert res == \"hello\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d347bc0d",
   "metadata": {},
   "source": [
    "## 测试数据类型常量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "ace7cabe",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestDataTypeImm:\n",
    "    @R.function\n",
    "    def main():\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        ret = R.dtype(\"float32\")  # 创建一个数据类型常量\n",
    "        return ret\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestDataTypeImm\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    vm = relax.VirtualMachine(ex, tvm.cpu())\n",
    "    res = vm[\"main\"]()\n",
    "    assert res == \"float32\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e629fca1",
   "metadata": {},
   "source": [
    "## 测试VM内置的reshape操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "f80cd0a3",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.script.ir_module\n",
    "class TestVMBuiltinReshape:\n",
    "    @R.function(pure=False)\n",
    "    def main(x: R.Tensor((3, 4), \"float32\")):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        # 调用VM内置的reshape函数，将(3,4)张量变形为(6,2)\n",
    "        y = R.call_packed(\n",
    "            \"vm.builtin.reshape\", x, R.shape([6, 2]), sinfo_args=R.Tensor((6, 2), \"float32\")\n",
    "        )\n",
    "        return y\n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestVMBuiltinReshape\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    dev = tvm.cpu()\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    input_np = np.random.rand(3, 4).astype(\"float32\")\n",
    "    input = tvm.nd.array(input_np, dev)\n",
    "    res = vm[\"main\"](input)\n",
    "    expected = input_np.reshape(6, 2)  # 预期的变形结果\n",
    "    tvm.testing.assert_allclose(res.numpy(), expected, rtol=1e-7, atol=1e-7)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "60a484a3",
   "metadata": {},
   "source": [
    "## 测试VM对象销毁（内存释放）功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0951bf80",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class TestKillObject:\n",
    "    # TIR原始函数：将输入缓冲区填充为0\n",
    "    @T.prim_func\n",
    "    def full(T_full: T.Buffer((T.int64(4),), \"float32\")):\n",
    "        T.func_attr({\"global_symbol\": \"full\", \"tir.noalias\": True})\n",
    "        for ax0 in range(T.int64(4)):\n",
    "            with T.block(\"T_full\"):\n",
    "                v_ax0 = T.axis.spatial(T.int64(4), ax0)\n",
    "                T.reads()\n",
    "                T.writes(T_full[v_ax0])\n",
    "                T_full[v_ax0] = T.float32(0)\n",
    "\n",
    "    # TIR原始函数：将输入缓冲区填充为1\n",
    "    @T.prim_func\n",
    "    def full1(T_full: T.Buffer((T.int64(4),), \"float32\")):\n",
    "        T.func_attr({\"global_symbol\": \"full1\", \"tir.noalias\": True})\n",
    "        for ax0 in range(T.int64(4)):\n",
    "            with T.block(\"T_full\"):\n",
    "                v_ax0 = T.axis.spatial(T.int64(4), ax0)\n",
    "                T.reads()\n",
    "                T.writes(T_full[v_ax0])\n",
    "                T_full[v_ax0] = T.float32(1)\n",
    "\n",
    "    # 直接调用的PrimFuncs被视为不纯函数\n",
    "    @R.function(pure=False)\n",
    "    def main() -> R.Tensor((4,), dtype=\"float32\"):\n",
    "        R.func_attr({\"global_symbol\": \"main\"})\n",
    "        cls = TestKillObject\n",
    "        # 分配存储\n",
    "        storage: R.Object = R.vm.alloc_storage(R.shape([16]), R.prim_value(0), R.dtype(\"uint8\"))\n",
    "        # 基于存储分配张量\n",
    "        alloc: R.Tensor((4,), dtype=\"float32\") = R.vm.alloc_tensor(\n",
    "            storage, R.prim_value(0), R.shape([4]), R.dtype(\"float32\")\n",
    "        )\n",
    "        _: R.Tuple = cls.full(alloc)  # 将张量填充为0\n",
    "        __1: R.Tuple = R.vm.kill_object(alloc)  # 销毁张量对象\n",
    "        x: R.Tensor((4,), dtype=\"float32\") = alloc  # 尝试访问已销毁的对象\n",
    "        # 重新使用同一块存储分配新张量\n",
    "        alloc1: R.Tensor((4,), dtype=\"float32\") = R.vm.alloc_tensor(\n",
    "            storage, R.prim_value(0), R.shape([4]), R.dtype(\"float32\")\n",
    "        )\n",
    "        _1: R.Tuple = cls.full(alloc1)  # 将新张量填充为0\n",
    "        _1_1: R.Tuple = R.vm.kill_object(alloc1)  # 销毁新张量\n",
    "        y: R.Tensor((4,), dtype=\"float32\") = alloc1  # 尝试访问已销毁的对象\n",
    "        # 分配新的存储\n",
    "        storage_1: R.Object = R.vm.alloc_storage(\n",
    "            R.shape([16]), R.prim_value(0), R.dtype(\"uint8\")\n",
    "        )\n",
    "        # 基于新存储分配张量\n",
    "        alloc2: R.Tensor((4,), dtype=\"float32\") = R.vm.alloc_tensor(\n",
    "            storage_1, R.prim_value(0), R.shape([4]), R.dtype(\"float32\")\n",
    "        )\n",
    "        _2: R.Tuple = cls.full1(alloc2)  # 将张量填充为1\n",
    "        z: R.Tensor((4,), dtype=\"float32\") = alloc2\n",
    "        _2_1: R.Tuple = R.vm.kill_object(storage)  # 销毁第一个存储对象\n",
    "        return z\n",
    "        \n",
    "for exec_mode in EXEC_MODE:\n",
    "    mod = TestKillObject\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    dev = tvm.cpu()\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    res = vm[\"main\"]()\n",
    "    # 验证结果是否为全1数组，确保内存管理正常工作\n",
    "    tvm.testing.assert_allclose(res.numpy(), np.ones((4,), \"float32\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "93b30a43",
   "metadata": {},
   "source": [
    "## 测试VM保留简单绑定"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "05bf9078",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_2018648/809391981.py:48: UserWarning: Returning type `vm.Storage` which is not registered via register_object, fallback to Object\n",
      "  result_list = vm[\"main\"]()\n"
     ]
    }
   ],
   "source": [
    "@I.ir_module\n",
    "class mod:\n",
    "    @R.function(pure=False)\n",
    "    def main():\n",
    "        # 外部函数用于检查对象是否已定义\n",
    "        callback = R.ExternFunc(\"test.vm.check_if_defined\")\n",
    "\n",
    "        # 分配存储和张量\n",
    "        storage = R.vm.alloc_storage(R.shape([16]), R.prim_value(0), R.dtype(\"uint8\"))\n",
    "        alloc = R.vm.alloc_tensor(storage, R.prim_value(0), R.shape([4]), R.dtype(\"float32\"))\n",
    "        storage_alias = storage  # 创建存储的别名\n",
    "        alloc_alias = alloc      # 创建张量的别名\n",
    "\n",
    "        # 检查对象销毁前各对象的状态\n",
    "        storage_before = callback(storage)\n",
    "        alloc_before = callback(alloc)\n",
    "        storage_alias_before = callback(storage_alias)\n",
    "        alloc_alias_before = callback(alloc_alias)\n",
    "\n",
    "        # 销毁原始对象\n",
    "        _ = R.vm.kill_object(storage)\n",
    "        _ = R.vm.kill_object(alloc)\n",
    "\n",
    "        # 检查对象销毁后各对象的状态\n",
    "        storage_after = callback(storage)\n",
    "        alloc_after = callback(alloc)\n",
    "        storage_alias_after = callback(storage_alias)\n",
    "        alloc_alias_after = callback(alloc_alias)\n",
    "\n",
    "        # 返回所有检查结果\n",
    "        return (\n",
    "            storage_before,\n",
    "            alloc_before,\n",
    "            storage_alias_before,\n",
    "            alloc_alias_before,\n",
    "            storage_after,\n",
    "            alloc_after,\n",
    "            storage_alias_after,\n",
    "            alloc_alias_after,\n",
    "        )\n",
    "\n",
    "for exec_mode in EXEC_MODE:\n",
    "    target = tvm.target.Target(\"llvm\", host=\"llvm\")\n",
    "    ex = codegen(mod, target, exec_mode)\n",
    "    dev = tvm.cpu()\n",
    "    vm = relax.VirtualMachine(ex, dev)\n",
    "\n",
    "    result_list = vm[\"main\"]()\n",
    "\n",
    "    # 创建预期结果字典，以提高测试失败时的可读性\n",
    "    # 这相当于对结果数组的每个元素进行断言，但允许pytest在失败时给出字典差异\n",
    "    expected_results = {\n",
    "        \"storage_before\": True,       # 销毁前，原始存储对象存在\n",
    "        \"alloc_before\": True,         # 销毁前，原始张量对象存在\n",
    "        \"storage_alias_before\": True, # 销毁前，存储别名存在\n",
    "        \"alloc_alias_before\": True,   # 销毁前，张量别名存在\n",
    "        \"storage_after\": False,       # 销毁后，原始存储对象不存在\n",
    "        \"alloc_after\": False,         # 销毁后，原始张量对象不存在\n",
    "        \"storage_alias_after\": True,  # 销毁后，存储别名仍然存在\n",
    "        \"alloc_alias_after\": True,    # 销毁后，张量别名仍然存在\n",
    "    }\n",
    "\n",
    "    # 将观察结果转换为字典\n",
    "    observed_results = {\n",
    "        name: bool(tir_bool) for name, tir_bool in zip(expected_results.keys(), result_list)\n",
    "    }\n",
    "\n",
    "    # 验证观察结果是否符合预期\n",
    "    assert observed_results == expected_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7be2313c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
