{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c2be6b5b",
   "metadata": {},
   "source": [
    "# TVMScript `@I.pyfunc` 装饰器"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c1bf75d2",
   "metadata": {},
   "source": [
    "本测试验证以下内容：\n",
    "1. `@I.pyfunc` 装饰器的正确工作方式\n",
    "2. Python 函数如何正确集成到 IRModule 中\n",
    "3. BasePyModule 继承关系是否正确处理\n",
    "4. 为 Python 函数创建 ExternFunc 节点的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7ad664c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入测试框架和必要的库\n",
    "import pytest\n",
    "import torch\n",
    "import tvm\n",
    "from tvm import relax\n",
    "from tvm.script import ir as I, relax as R, tir as T\n",
    "from tvm.relax import BasePyModule\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "@I.ir_module\n",
    "class TestPyFuncModule(BasePyModule):\n",
    "    \"\"\"使用 @I.pyfunc 装饰器的 Python 函数测试模块。\"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def pytorch_processor(x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"处理 PyTorch 张量的 Python 函数。\"\"\"\n",
    "        return torch.nn.functional.relu(x) * 2.0\n",
    "\n",
    "    @I.pyfunc\n",
    "    def pytorch_adder(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"将两个 PyTorch 张量相加的 Python 函数。\"\"\"\n",
    "        return x + y\n",
    "\n",
    "    @I.pyfunc\n",
    "    def pytorch_complex_ops(x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"复杂的 PyTorch 操作。\"\"\"\n",
    "        result = torch.nn.functional.softmax(x, dim=0)\n",
    "        result = torch.nn.functional.dropout(result, p=0.1, training=False)\n",
    "        return result * 10.0\n",
    "\n",
    "    @T.prim_func\n",
    "    def simple_tir_func(\n",
    "        var_A: T.handle,\n",
    "        var_B: T.handle,\n",
    "    ):\n",
    "        \"\"\"简单的 TIR 函数，用于测试 Python 函数与 TIR 函数的集成。\"\"\"\n",
    "        T.func_attr({\"tir.noalias\": True})\n",
    "        n = T.int32()\n",
    "        A = T.match_buffer(var_A, (n,), \"float32\")\n",
    "        B = T.match_buffer(var_B, (n,), \"float32\")\n",
    "\n",
    "        for i in T.grid(n):\n",
    "            with T.block(\"copy\"):\n",
    "                vi = T.axis.remap(\"S\", [i])\n",
    "                B[vi] = A[vi]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a8cc302e",
   "metadata": {},
   "source": [
    "## 测试 `@I.pyfunc` 装饰器是否正确创建 `pyfuncs` 属性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "cac5e1d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "# 验证模块包含 pyfuncs 属性\n",
    "assert hasattr(module, \"pyfuncs\"), \"模块应该包含 pyfuncs 属性\"\n",
    "\n",
    "pyfuncs = module.pyfuncs\n",
    "assert isinstance(pyfuncs, dict), \"pyfuncs 应该是一个字典\"\n",
    "\n",
    "# 验证所有期望的函数都在 pyfuncs 中\n",
    "expected_functions = [\"pytorch_processor\", \"pytorch_adder\", \"pytorch_complex_ops\"]\n",
    "for func_name in expected_functions:\n",
    "    assert func_name in pyfuncs, f\"函数 {func_name} 应该在 pyfuncs 中\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1c94b314",
   "metadata": {},
   "source": [
    "## 测试 `pyfuncs` 中的 Python 函数是否可调用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "41e098fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "pyfuncs = module.pyfuncs\n",
    "\n",
    "# 测试 pytorch_processor 函数\n",
    "processor_func = pyfuncs[\"pytorch_processor\"]\n",
    "assert callable(processor_func), \"pytorch_processor 应该是可调用的\"\n",
    "\n",
    "# 测试 pytorch_adder 函数\n",
    "adder_func = pyfuncs[\"pytorch_adder\"]\n",
    "assert callable(adder_func), \"pytorch_adder 应该是可调用的\"\n",
    "\n",
    "# 测试 pytorch_complex_ops 函数\n",
    "complex_func = pyfuncs[\"pytorch_complex_ops\"]\n",
    "assert callable(complex_func), \"pytorch_complex_ops 应该是可调用的\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1d92fe2e",
   "metadata": {},
   "source": [
    "## 测试 Python 函数是否正确执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "84ec4d6d",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "pyfuncs = module.pyfuncs\n",
    "\n",
    "# 创建测试数据\n",
    "x = torch.tensor([1.0, -2.0, 3.0, -4.0, 5.0], dtype=torch.float32)\n",
    "y = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5], dtype=torch.float32)\n",
    "\n",
    "# 测试 pytorch_processor 函数\n",
    "processor_func = pyfuncs[\"pytorch_processor\"]\n",
    "processor_result = processor_func(x)\n",
    "\n",
    "assert isinstance(processor_result, torch.Tensor)\n",
    "expected = torch.nn.functional.relu(x) * 2.0\n",
    "assert torch.allclose(processor_result, expected, atol=1e-5)\n",
    "\n",
    "# 测试 pytorch_adder 函数\n",
    "adder_func = pyfuncs[\"pytorch_adder\"]\n",
    "adder_result = adder_func(x, y)\n",
    "\n",
    "assert isinstance(adder_result, torch.Tensor)\n",
    "expected = x + y\n",
    "assert torch.allclose(adder_result, expected, atol=1e-5)\n",
    "\n",
    "# 测试 pytorch_complex_ops 函数\n",
    "complex_func = pyfuncs[\"pytorch_complex_ops\"]\n",
    "complex_result = complex_func(x)\n",
    "\n",
    "assert isinstance(complex_result, torch.Tensor)\n",
    "# 注意：dropout 是非确定性的，所以我们只检查形状和类型\n",
    "assert complex_result.shape == x.shape\n",
    "assert complex_result.dtype == x.dtype"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2594cdac",
   "metadata": {},
   "source": [
    "## 测试模块是否包含用于 IRModule 操作的 functions 属性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "613f65cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "# 检查 functions 属性是否存在\n",
    "assert hasattr(module, \"functions\"), \"模块应该包含 functions 属性\"\n",
    "\n",
    "functions = module.functions\n",
    "# TVM IRModule.functions 不是标准字典，但具有类似字典的行为\n",
    "assert hasattr(functions, \"__getitem__\"), \"functions 应该支持类似字典的访问\"\n",
    "assert hasattr(functions, \"__iter__\"), \"functions 应该是可迭代的\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b1cbd7a1",
   "metadata": {},
   "source": [
    "## 测试模块是否有用于 TVMScript 输出的 `script()` 方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "313ea424",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "# 检查 script 方法是否存在\n",
    "assert hasattr(module, \"script\"), \"模块应该包含 script 方法\"\n",
    "\n",
    "# 测试 script 方法的执行\n",
    "script_output = module.script()\n",
    "assert isinstance(script_output, str), \"script() 应该返回一个字符串\"\n",
    "assert len(script_output) > 0, \"script() 应该返回非空字符串\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b354a839",
   "metadata": {},
   "source": [
    "## 测试模块是否具有 BasePyModule 继承标志"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5add2658",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "# 检查继承标志是否存在（这可能在所有实现中都不设置）\n",
    "if hasattr(module, \"_base_py_module_inherited\"):\n",
    "    assert module._base_py_module_inherited, \"继承标志应该为 True\"\n",
    "else:\n",
    "    # 替代方法：检查模块是否支持 Python 函数\n",
    "    assert hasattr(module, \"pyfuncs\"), \"模块应该支持 Python 函数\"\n",
    "\n",
    "# 检查原始类是否被保留（这可能在所有实现中都不设置）\n",
    "if hasattr(module, \"_original_class\"):\n",
    "    assert module._original_class is not None, \"原始类应该被保留\"\n",
    "else:\n",
    "    # 替代方法：检查模块是否可调用（ModuleFactory）\n",
    "    assert hasattr(module, \"__call__\"), \"模块应该是可调用的（ModuleFactory）\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "525a175a",
   "metadata": {},
   "source": [
    "## 测试模块的创建和执行功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "9ad51f86",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "assert hasattr(module, \"__call__\"), \"模块应该是可调用的\"\n",
    "\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "assert isinstance(instance, BasePyModule), \"实例应该是 BasePyModule 类型\"\n",
    "assert hasattr(instance, \"pyfuncs\"), \"实例应该包含 pyfuncs 属性\"\n",
    "\n",
    "x = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)\n",
    "result = instance.pytorch_processor(x)\n",
    "\n",
    "assert isinstance(result, torch.Tensor)\n",
    "expected = torch.nn.functional.relu(x) * 2.0\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b4239469",
   "metadata": {},
   "source": [
    "## 测试模块在 GPU 设备上的创建和执行功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "184ee02f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile one or more TIR functions: Memory verification failed with the following errors:\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def simple_tir_func(var_A: T.handle, var_B: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32}), \"tir.noalias\": True})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n,))\n",
      "    B = T.match_buffer(var_B, (n,))\n",
      "    for i in range(n):\n",
      "        B_1 = T.Buffer((n,), data=B.data)\n",
      "        A_1 = T.Buffer((n,), data=A.data)\n",
      "        B_1[i] = A_1[i]\n",
      "Warning: Failed to compile Relax VM: Memory verification failed with the following errors:\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def simple_tir_func(var_A: T.handle, var_B: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32}), \"tir.noalias\": True})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n,))\n",
      "    B = T.match_buffer(var_B, (n,))\n",
      "    for i in range(n):\n",
      "        B_1 = T.Buffer((n,), data=B.data)\n",
      "        A_1 = T.Buffer((n,), data=A.data)\n",
      "        B_1[i] = A_1[i]\n"
     ]
    }
   ],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "if tvm.cuda().exist:\n",
    "    device = tvm.cuda(0)\n",
    "    instance = module(device)\n",
    "\n",
    "    assert isinstance(instance, BasePyModule), \"实例应该是 BasePyModule 类型\"\n",
    "    assert hasattr(instance, \"pyfuncs\"), \"实例应该包含 pyfuncs 属性\"\n",
    "\n",
    "    x = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32, device=\"cuda\")\n",
    "    result = instance.pytorch_processor(x)\n",
    "\n",
    "    assert isinstance(result, torch.Tensor)\n",
    "    assert result.device.type == \"cuda\"\n",
    "    expected = torch.nn.functional.relu(x) * 2.0\n",
    "    assert torch.allclose(result, expected, atol=1e-5)\n",
    "else:\n",
    "    pytest.skip(\"CUDA 不可用\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "db4f5df5",
   "metadata": {},
   "source": [
    "## 测试 Python 函数如何与 TIR 函数协作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "3d6fed4f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = TestPyFuncModule\n",
    "\n",
    "# 创建实例\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 测试 TIR 函数执行\n",
    "n = 5\n",
    "input_tensor = torch.randn(n, dtype=torch.float32)\n",
    "\n",
    "# 调用 TIR 函数 - 它需要 3 个参数：输入、输出和大小\n",
    "# 但 call_tir 会处理输出缓冲区的创建，所以我们只传递输入和大小\n",
    "# 注意：TIR 函数期望 TVM 类型，而不是 Python 类型\n",
    "result = instance.call_tir(\n",
    "    instance.simple_tir_func,\n",
    "    [input_tensor],  # 只传递输入张量，让 call_tir 处理其余部分\n",
    "    R.Tensor((n,), \"float32\"),\n",
    ")\n",
    "\n",
    "# 验证结果\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (n,)\n",
    "assert torch.allclose(result, input_tensor, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "21ec9e1f",
   "metadata": {},
   "source": [
    "## 测试 `@I.pyfunc` 装饰器是否保留函数签名"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "d8984e01",
   "metadata": {},
   "outputs": [],
   "source": [
    "module = TestPyFuncModule\n",
    "pyfuncs = module.pyfuncs\n",
    "\n",
    "# 检查函数签名\n",
    "import inspect\n",
    "\n",
    "# pytorch_processor 签名\n",
    "processor_func = pyfuncs[\"pytorch_processor\"]\n",
    "sig = inspect.signature(processor_func)\n",
    "params = list(sig.parameters.keys())\n",
    "assert len(params) == 1, \"pytorch_processor 应该有 1 个参数\"\n",
    "assert params[0] == \"x\", \"第一个参数应该是 'x'\"\n",
    "\n",
    "# pytorch_adder 签名\n",
    "adder_func = pyfuncs[\"pytorch_adder\"]\n",
    "sig = inspect.signature(adder_func)\n",
    "params = list(sig.parameters.keys())\n",
    "assert len(params) == 2, \"pytorch_adder 应该有 2 个参数\"\n",
    "assert params[0] == \"x\", \"第一个参数应该是 'x'\"\n",
    "assert params[1] == \"y\", \"第二个参数应该是 'y'\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "78992a35",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
