{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "90a3c384",
   "metadata": {},
   "source": [
    "# `BasePyModule`"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "63275422",
   "metadata": {},
   "source": [
    "`BasePyModule` 是 TVM Relax 模块中的重要组件，它提供了在 Python 环境中集成和执行 TIR（Tensor Intermediate Representation）函数的能力。\n",
    "\n",
    "测试 BasePyModule 核心功能：\n",
    "1. BasePyModule 的实例化和基本方法\n",
    "2. TIR 函数的编译和执行\n",
    "3. Python 函数的集成\n",
    "4. PyTorch 和 TVM 之间的 DLPack 转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c833add8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入测试框架和必要的库\n",
    "import torch\n",
    "import tvm\n",
    "from tvm import relax, tir\n",
    "from tvm.script import relax as R, tir as T\n",
    "from tvm.relax import BasePyModule\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "34f385da",
   "metadata": {},
   "source": [
    "## 测试 BasePyModule 的基本实例化功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8081d9e4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义简单的 TIR 原语函数，将输入数组中的每个元素乘以 2\n",
    "@T.prim_func\n",
    "def simple_func(A: T.Buffer((10,), \"float32\"), B: T.Buffer((10,), \"float32\")):\n",
    "    for i in T.grid(10):\n",
    "        B[i] = A[i] * 2.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "02b79914",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 创建 IRModule 并将 TIR 函数添加到其中\n",
    "ir_mod = tvm.IRModule({\"simple_func\": simple_func})\n",
    "# 指定在 CPU 上执行\n",
    "device = tvm.cpu(0)\n",
    "# 实例化 BasePyModule\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 验证实例化是否成功以及是否具有必要的属性和方法\n",
    "assert isinstance(py_mod, BasePyModule)\n",
    "assert hasattr(py_mod, \"call_tir\")\n",
    "assert hasattr(py_mod, \"call_dps_packed\")\n",
    "assert hasattr(py_mod, \"compiled_tir_funcs\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dd7fc461",
   "metadata": {},
   "source": [
    "## 测试在 GPU 上实例化 BasePyModule"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8fba846f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile one or more TIR functions: Memory verification failed with the following errors:\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def simple_func(A: T.Buffer((10,), \"float32\"), B: T.Buffer((10,), \"float32\")):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    for i in range(10):\n",
      "        B_1 = T.Buffer((10,), data=B.data)\n",
      "        A_1 = T.Buffer((10,), data=A.data)\n",
      "        B_1[i] = A_1[i] * T.float32(2.0)\n",
      "Warning: Failed to compile Relax VM: Memory verification failed with the following errors:\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def simple_func(A: T.Buffer((10,), \"float32\"), B: T.Buffer((10,), \"float32\")):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    for i in range(10):\n",
      "        B_1 = T.Buffer((10,), data=B.data)\n",
      "        A_1 = T.Buffer((10,), data=A.data)\n",
      "        B_1[i] = A_1[i] * T.float32(2.0)\n"
     ]
    }
   ],
   "source": [
    "# 定义一个简单的 TIR 原语函数\n",
    "@T.prim_func\n",
    "def simple_func(A: T.Buffer((10,), \"float32\"), B: T.Buffer((10,), \"float32\")):\n",
    "    for i in T.grid(10):\n",
    "        B[i] = A[i] * 2.0\n",
    "\n",
    "# 创建 IRModule\n",
    "ir_mod = tvm.IRModule({\"simple_func\": simple_func})\n",
    "\n",
    "# 检查 CUDA 是否可用\n",
    "if tvm.cuda().exist:\n",
    "    # 在 GPU 设备上实例化 BasePyModule\n",
    "    device = tvm.cuda(0)\n",
    "    py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "    # 验证 GPU 版本的实例化是否成功\n",
    "    assert isinstance(py_mod, BasePyModule)\n",
    "    assert hasattr(py_mod, \"call_tir\")\n",
    "    assert hasattr(py_mod, \"call_dps_packed\")\n",
    "    assert hasattr(py_mod, \"compiled_tir_funcs\")\n",
    "    # 检查目标设备是否包含 \"cuda\"\n",
    "    assert \"cuda\" in str(py_mod.target)\n",
    "else:\n",
    "    # 如果 CUDA 不可用，跳过测试\n",
    "    pytest.skip(\"CUDA not available\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b4144b5a",
   "metadata": {},
   "source": [
    "## 测试 TIR 函数的编译功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e63656e2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 定义一个执行数组加法的 TIR 函数\n",
    "@T.prim_func\n",
    "def add_func(\n",
    "    A: T.Buffer((5,), \"float32\"), B: T.Buffer((5,), \"float32\"), C: T.Buffer((5,), \"float32\")\n",
    "):\n",
    "    for i in T.grid(5):\n",
    "        C[i] = A[i] + B[i]\n",
    "\n",
    "# 创建 IRModule 和 BasePyModule 实例\n",
    "ir_mod = tvm.IRModule({\"add_func\": add_func})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 验证 TIR 函数是否已被正确编译和存储\n",
    "assert \"add_func\" in py_mod.tir_func_names\n",
    "assert \"add_func\" in py_mod.compiled_tir_funcs"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a82ee914",
   "metadata": {},
   "source": [
    "## 测试使用 PyTorch 张量调用 TIR 函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4e08a641",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 定义一个将输入数组乘以 2.5 的 TIR 函数\n",
    "@T.prim_func\n",
    "def scale_func(A: T.Buffer((4,), \"float32\"), B: T.Buffer((4,), \"float32\")):\n",
    "    for i in T.grid(4):\n",
    "        B[i] = A[i] * T.float32(2.5)\n",
    "\n",
    "# 创建 IRModule 和 BasePyModule 实例\n",
    "ir_mod = tvm.IRModule({\"scale_func\": scale_func})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 创建 PyTorch 输入张量\n",
    "input_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0], dtype=torch.float32)\n",
    "scale_value = 2.5\n",
    "\n",
    "# 调用 TIR 函数并获取结果\n",
    "result = py_mod.call_tir(scale_func, [input_tensor], R.Tensor((4,), \"float32\"))\n",
    "\n",
    "# 验证结果是否正确\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (4,)\n",
    "expected = input_tensor * scale_value\n",
    "assert torch.allclose(result, expected, atol=1e-5)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d1341119",
   "metadata": {},
   "source": [
    "## 测试在 GPU 上使用 PyTorch 张量调用 TIR 函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "f73f2d0c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 检查 CUDA 是否可用\n",
    "if tvm.cuda().exist:\n",
    "    # 创建一个不包含 TIR 函数的 IRModule（仅用于 GPU 基本功能测试）\n",
    "    ir_mod = tvm.IRModule({})\n",
    "    device = tvm.cuda(0)\n",
    "    py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "    # 验证 GPU 版本的实例化是否成功\n",
    "    assert isinstance(py_mod, BasePyModule)\n",
    "    assert hasattr(py_mod, \"call_tir\")\n",
    "    assert hasattr(py_mod, \"call_dps_packed\")\n",
    "    assert \"cuda\" in str(py_mod.target)\n",
    "\n",
    "    # 测试是否可以创建和使用 GPU 张量\n",
    "    input_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0], dtype=torch.float32, device=\"cuda\")\n",
    "    assert input_tensor.device.type == \"cuda\"\n",
    "    assert input_tensor.shape == (4,)\n",
    "else:\n",
    "    # 如果 CUDA 不可用，跳过测试\n",
    "    pytest.skip(\"CUDA not available\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "15d45b7e",
   "metadata": {},
   "source": [
    "## 测试 PyTorch 到 TVM 的 DLPack 转换功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "17550473",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 定义一个恒等变换的 TIR 函数（输入等于输出）\n",
    "@T.prim_func\n",
    "def identity_func(A: T.Buffer((3,), \"float32\"), B: T.Buffer((3,), \"float32\")):\n",
    "    for i in T.grid(3):\n",
    "        B[i] = A[i]\n",
    "\n",
    "# 创建 IRModule 和 BasePyModule 实例\n",
    "ir_mod = tvm.IRModule({\"identity_func\": identity_func})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 创建 PyTorch 输入张量\n",
    "input_tensor = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)\n",
    "\n",
    "# 调用 TIR 函数并获取结果（验证 DLPack 转换是否正确）\n",
    "result = py_mod.call_tir(identity_func, [input_tensor], R.Tensor((3,), \"float32\"))\n",
    "\n",
    "# 验证结果是否与输入相同（恒等变换）\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert torch.allclose(result, input_tensor, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e48fd77b",
   "metadata": {},
   "source": [
    "## 测试 TVM 到 PyTorch 的 DLPack 转换功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "c39fa58e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 定义一个生成常数值的 TIR 函数\n",
    "@T.prim_func\n",
    "def constant_func(B: T.Buffer((2,), \"float32\")):\n",
    "    for i in T.grid(2):\n",
    "        B[i] = T.float32(5.0)\n",
    "\n",
    "# 创建 IRModule 和 BasePyModule 实例\n",
    "ir_mod = tvm.IRModule({\"constant_func\": constant_func})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 调用 TIR 函数并获取结果\n",
    "result = py_mod.call_tir(constant_func, [], R.Tensor((2,), \"float32\"))\n",
    "\n",
    "# 验证结果是否正确\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (2,)\n",
    "expected = torch.tensor([5.0, 5.0], dtype=torch.float32)\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b6d51efa",
   "metadata": {},
   "source": [
    "## 测试向 BasePyModule 添加 Python 函数的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "5733ace9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 创建一个不包含 TIR 函数的 IRModule\n",
    "ir_mod = tvm.IRModule({})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 定义一个自定义的激活函数（tanh）\n",
    "def custom_activation(x):\n",
    "    return torch.tanh(x)\n",
    "\n",
    "# 向 BasePyModule 添加 Python 函数\n",
    "py_mod.add_python_function(\"custom_activation\", custom_activation)\n",
    "\n",
    "# 验证函数是否已成功添加\n",
    "assert hasattr(py_mod, \"custom_activation\")\n",
    "assert \"custom_activation\" in py_mod.pyfuncs\n",
    "\n",
    "# 测试添加的 Python 函数是否可以正常工作\n",
    "input_tensor = torch.tensor([1.0, -1.0, 0.0], dtype=torch.float32)\n",
    "result = py_mod.custom_activation(input_tensor)\n",
    "\n",
    "# 验证结果是否正确\n",
    "assert isinstance(result, torch.Tensor)\n",
    "expected = torch.tanh(input_tensor)\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "baf5dd0f",
   "metadata": {},
   "source": [
    "## 测试通过 `call_dps_packed` 调用 Python 函数的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "87408b97",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: Module has no function 'vm_load_executable'\n"
     ]
    }
   ],
   "source": [
    "# 创建一个不包含 TIR 函数的 IRModule\n",
    "ir_mod = tvm.IRModule({})\n",
    "device = tvm.cpu(0)\n",
    "py_mod = BasePyModule(ir_mod, device)\n",
    "\n",
    "# 定义一个自定义的 softmax 函数\n",
    "def my_softmax(tensor, dim):\n",
    "    return torch.softmax(tensor, dim=dim)\n",
    "\n",
    "# 向 BasePyModule 添加 Python 函数\n",
    "py_mod.add_python_function(\"my_softmax\", my_softmax)\n",
    "\n",
    "# 创建 PyTorch 输入张量\n",
    "input_tensor = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float32)\n",
    "\n",
    "# 通过 call_dps_packed 调用添加的 Python 函数\n",
    "result = py_mod.call_dps_packed(\n",
    "    \"my_softmax\", [input_tensor, 1], R.Tensor((2, 2), \"float32\")\n",
    ")\n",
    "\n",
    "# 验证结果是否正确\n",
    "assert isinstance(result, torch.Tensor)\n",
    "expected = torch.softmax(input_tensor, dim=1)\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12e8a107",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
