{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "5bd5da88",
   "metadata": {},
   "source": [
    "# Relax VM 中带有内存作用域(scope)的朴素内存分配器"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "82e8657f",
   "metadata": {},
   "source": [
    "- TVM Relax 虚拟机(VM)中带有明确内存作用域指定的存储分配功能。\n",
    "- 内存作用域是 TVM 中用于标识不同内存区域的一种机制，可以用于区分全局内存、共享内存、纹理内存等不同类型的存储空间。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8feb8641",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import relax\n",
    "from tvm.script import ir as I\n",
    "from tvm.script import relax as R\n",
    "from tvm.script import tir as T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ad5e17b",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class Module:\n",
    "    \"\"\"\n",
    "    IR 模块定义，包含两个函数：\n",
    "    1. 一个 TIR 原始函数 add，执行矩阵加法操作\n",
    "    2. 一个 Relax 主函数 main，展示存储分配和张量操作\n",
    "    \"\"\"\n",
    "    @T.prim_func\n",
    "    def add(\n",
    "        arg0: T.Buffer((2, 2), \"float32\"),\n",
    "        arg1: T.Buffer((2, 2), \"float32\"),\n",
    "        output: T.Buffer((2, 2), \"float32\"),\n",
    "    ):\n",
    "        \"\"\"\n",
    "        TIR 原始函数，实现二维矩阵加法操作\n",
    "        \n",
    "        参数:\n",
    "            arg0: 第一个输入张量(2x2 float32)\n",
    "            arg1: 第二个输入张量(2x2 float32)\n",
    "            output: 输出张量(2x2 float32)\n",
    "        \"\"\"\n",
    "        T.func_attr({\"operator_name\": \"relax.add\"})\n",
    "        # 双重循环遍历矩阵的每个元素\n",
    "        for ax0 in range(2):\n",
    "            for ax1 in range(2):\n",
    "                with T.block(\"T_add\"):\n",
    "                    # 定义空间轴索引\n",
    "                    v_ax0 = T.axis.spatial(2, ax0)\n",
    "                    v_ax1 = T.axis.spatial(2, ax1)\n",
    "                    # 声明读取和写入的内存区域\n",
    "                    T.reads(arg0[v_ax0, v_ax1], arg1[v_ax0, v_ax1])\n",
    "                    T.writes(output[v_ax0, v_ax1])\n",
    "                    # 执行加法运算\n",
    "                    output[v_ax0, v_ax1] = arg0[v_ax0, v_ax1] + arg1[v_ax0, v_ax1]\n",
    "\n",
    "    @R.function(pure=False)\n",
    "    def main(x: R.Tensor((2, 2), dtype=\"float32\")):\n",
    "        \"\"\"\n",
    "        Relax 主函数，展示如何使用带有内存作用域的存储分配API\n",
    "        \n",
    "        参数:\n",
    "            x: 输入张量(2x2 float32)\n",
    "        \n",
    "        返回值:\n",
    "            处理后的张量(2x2 float32)\n",
    "        \"\"\"\n",
    "        cls = Module\n",
    "        # 分配存储，指定大小、设备索引、数据类型和内存作用域\n",
    "        # storage_scope=\"global\"表示使用全局内存\n",
    "        storage = R.vm.alloc_storage(\n",
    "            R.shape([2 * 2]), runtime_device_index=0, dtype=\"float32\", storage_scope=\"global\"\n",
    "        )\n",
    "        # 从已分配的存储中创建张量视图，指定偏移量、形状和数据类型\n",
    "        alloc = R.vm.alloc_tensor(storage, offset=0, shape=R.shape([2, 2]), dtype=\"float32\")\n",
    "        # 调用TIR函数执行计算，将结果存入分配的张量中\n",
    "        _: R.Tuple = cls.add(x, x, alloc)\n",
    "        # 将分配的张量设置为输出\n",
    "        out: R.Tensor((2, 2), dtype=\"float32\") = alloc\n",
    "        return out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f596020b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_alloc_storage_with_scope_global():\n",
    "    \"\"\"\n",
    "    测试带有'global'内存作用域的存储分配功能\n",
    "    \n",
    "    此测试验证了在 Relax VM 中使用带有明确内存作用域指定的存储分配API,\n",
    "    并确保计算结果的正确性。特别测试了使用'naive'内存配置时的行为。\n",
    "    \"\"\"\n",
    "    # 生成随机测试数据\n",
    "    arg0 = np.random.uniform(size=(2, 2)).astype(np.float32)\n",
    "    # 计算预期结果（输入矩阵与自身相加）\n",
    "    output_ref = arg0 + arg0\n",
    "    \n",
    "    # 使用前面定义的模块\n",
    "    mod = Module\n",
    "    # 设置目标为LLVM（在CPU上运行）\n",
    "    target = \"llvm\"\n",
    "    \n",
    "    # 使用优化级别3构建模块\n",
    "    with tvm.transform.PassContext(opt_level=3):\n",
    "        lib = tvm.relax.build(mod, target=target, exec_mode=\"compiled\")\n",
    "    \n",
    "    # 获取CPU设备\n",
    "    dev = tvm.cpu()\n",
    "    # 关键测试点：使用'naive'内存配置创建虚拟机运行时\n",
    "    vm_rt = relax.VirtualMachine(lib, dev, memory_cfg=\"naive\")\n",
    "    \n",
    "    # 将NumPy数组转换为TVM NDArray并设置为输入\n",
    "    x = tvm.nd.array(arg0, dev)\n",
    "    vm_rt.set_input(\"main\", x)\n",
    "    # 调用有状态函数执行计算\n",
    "    vm_rt.invoke_stateful(\"main\")\n",
    "    # 获取输出并转换为NumPy数组\n",
    "    output = vm_rt.get_outputs(\"main\").numpy()\n",
    "    \n",
    "    # 验证计算结果是否符合预期\n",
    "    tvm.testing.assert_allclose(output_ref, output)\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
