{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "803056c5",
   "metadata": {},
   "source": [
    "# PyTorch 与 TVM Relax 的集成"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a7277174",
   "metadata": {},
   "source": [
    "此测试验证以下内容：\n",
    "1. PyTorch 张量与 TVM 后端的无缝输入/输出\n",
    "2. Python、TIR 和 Relax 函数之间的跨函数调用\n",
    "3. 动态 Python 函数的添加和执行\n",
    "4. 端到端流水线测试\n",
    "5. 错误处理和边缘情况"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "da4a72f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入测试框架和必要的库\n",
    "import pytest\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import tvm\n",
    "from tvm import relax, tir\n",
    "from tvm.script import ir as I, relax as R, tir as T\n",
    "from tvm.relax import BasePyModule\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ca626c6f",
   "metadata": {},
   "outputs": [],
   "source": [
    "@I.ir_module\n",
    "class PyTorchIntegrationModule(BasePyModule):\n",
    "    \"\"\"用于测试 PyTorch 与 TVM 集成的测试模块。\"\"\"\n",
    "\n",
    "    @I.pyfunc\n",
    "    def main(self, x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"主函数，演示各种函数调用和数据处理流程。\"\"\"\n",
    "        n = x.shape[0]  # 获取输入张量的批次大小\n",
    "\n",
    "        # 调用 TIR 函数执行矩阵乘法\n",
    "        lv = self.call_tir(self.matmul, [x, w], out_sinfo=R.Tensor((n, 20), \"float32\"))\n",
    "\n",
    "        # 应用 ReLU 激活函数\n",
    "        lv1 = F.relu(lv)\n",
    "\n",
    "        # 调用动态添加的打包函数（通过 call_dps_packed 接口）\n",
    "        lv2 = self.call_dps_packed(\"my_softmax\", [lv1, 1], out_sinfo=R.Tensor((n, 20), \"float32\"))\n",
    "\n",
    "        # 调用 Python 函数\n",
    "        lv3 = self.my_identity_func(lv2)\n",
    "\n",
    "        return lv3\n",
    "\n",
    "    @T.prim_func\n",
    "    def matmul(\n",
    "        var_A: T.handle,\n",
    "        var_B: T.handle,\n",
    "        var_C: T.handle,\n",
    "    ):\n",
    "        \"\"\"TIR 原始函数，实现矩阵乘法操作。\"\"\"\n",
    "        n = T.int32()  # 符号变量，表示批次大小\n",
    "        # 定义匹配的缓冲区\n",
    "        A = T.match_buffer(var_A, (n, 16), \"float32\")\n",
    "        B = T.match_buffer(var_B, (16, 20), \"float32\")\n",
    "        C = T.match_buffer(var_C, (n, 20), \"float32\")\n",
    "\n",
    "        # 三重循环实现矩阵乘法\n",
    "        for i, j, k in T.grid(n, 20, 16):\n",
    "            with T.block(\"block\"):\n",
    "                vi, vj, vk = T.axis.remap(\"SSR\", [i, j, k])\n",
    "                with T.init():\n",
    "                    C[vi, vj] = T.float32(0)\n",
    "                C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]\n",
    "\n",
    "    @I.pyfunc\n",
    "    def my_identity_func(self, x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"简单的恒等函数，返回输入张量本身。\"\"\"\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7af73b4e",
   "metadata": {},
   "source": [
    "## 测试模块的创建和实例化功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e2b99831",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "\n",
    "# 验证模块是否可调用（支持实例化）\n",
    "assert hasattr(module, \"__call__\"), \"模块应该可调用\"\n",
    "\n",
    "# 在 CPU 上实例化模块\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 验证实例类型和必要方法\n",
    "assert isinstance(instance, BasePyModule), \"实例应该是 BasePyModule 类型\"\n",
    "\n",
    "required_methods = [\"main\", \"call_tir\", \"call_dps_packed\"]\n",
    "for method in required_methods:\n",
    "    assert hasattr(instance, method), f\"实例应该具有方法: {method}\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "44b39b1b",
   "metadata": {},
   "source": [
    "## 测试在 GPU 上创建和实例化模块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c72a44d7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile one or more TIR functions: Memory verification failed with the following errors:\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def matmul(var_A: T.handle, B: T.Buffer((16, 20), \"float32\"), var_C: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n, 16))\n",
      "    C = T.match_buffer(var_C, (n, 20))\n",
      "    for i, j, k in T.grid(n, 20, 16):\n",
      "        cse_v1: T.int32 = i * 20 + j\n",
      "        C_1 = T.Buffer((n * 20,), data=C.data)\n",
      "        if k == 0:\n",
      "            C_1[cse_v1] = T.float32(0.0)\n",
      "        A_1 = T.Buffer((n * 16,), data=A.data)\n",
      "        B_1 = T.Buffer((320,), data=B.data)\n",
      "        C_1[cse_v1] = C_1[cse_v1] + A_1[i * 16 + k] * B_1[k * 20 + j]\n",
      "Warning: Failed to compile Relax VM: Memory verification failed with the following errors:\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def matmul(var_A: T.handle, B: T.Buffer((16, 20), \"float32\"), var_C: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n, 16))\n",
      "    C = T.match_buffer(var_C, (n, 20))\n",
      "    for i, j, k in T.grid(n, 20, 16):\n",
      "        cse_v1: T.int32 = i * 20 + j\n",
      "        C_1 = T.Buffer((n * 20,), data=C.data)\n",
      "        if k == 0:\n",
      "            C_1[cse_v1] = T.float32(0.0)\n",
      "        A_1 = T.Buffer((n * 16,), data=A.data)\n",
      "        B_1 = T.Buffer((320,), data=B.data)\n",
      "        C_1[cse_v1] = C_1[cse_v1] + A_1[i * 16 + k] * B_1[k * 20 + j]\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "\n",
    "if tvm.cuda().exist:\n",
    "    # 验证模块是否可调用\n",
    "    assert hasattr(module, \"__call__\"), \"模块应该可调用\"\n",
    "\n",
    "    # 在 GPU 上实例化模块\n",
    "    device = tvm.cuda(0)\n",
    "    instance = module(device)\n",
    "\n",
    "    # 验证实例类型、必要方法和目标设备\n",
    "    assert isinstance(instance, BasePyModule), \"实例应该是 BasePyModule 类型\"\n",
    "    required_methods = [\"main\", \"call_tir\", \"call_dps_packed\"]\n",
    "    for method in required_methods:\n",
    "        assert hasattr(instance, method), f\"实例应该具有方法: {method}\"\n",
    "    assert \"cuda\" in str(instance.target)\n",
    "else:\n",
    "    # 如果 CUDA 不可用，则跳过此测试\n",
    "    pytest.skip(\"CUDA not available\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d250aa8e",
   "metadata": {},
   "source": [
    "## 测试 Python 函数是否正确执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0830339b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 测试恒等函数\n",
    "input_tensor = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)\n",
    "result = instance.my_identity_func(input_tensor)\n",
    "\n",
    "# 验证结果\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert torch.allclose(result, input_tensor, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "22ef207d",
   "metadata": {},
   "source": [
    "## 测试 TIR 函数是否正确执行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0ecc4dc9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 测试矩阵乘法函数\n",
    "n = 3\n",
    "x = torch.randn(n, 16, dtype=torch.float32)\n",
    "w = torch.randn(16, 20, dtype=torch.float32)\n",
    "\n",
    "result = instance.call_tir(instance.matmul, [x, w], R.Tensor((n, 20), \"float32\"))\n",
    "\n",
    "# 验证结果类型和形状\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (n, 20)\n",
    "\n",
    "# 使用 PyTorch 的矩阵乘法验证结果正确性\n",
    "expected = torch.matmul(x, w)\n",
    "assert torch.allclose(result, expected, atol=1e-3)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a37c67ad",
   "metadata": {},
   "source": [
    "## 测试动态添加 Python 函数的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "734c8bbd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 定义一个自定义函数\n",
    "def custom_activation(x):\n",
    "    return torch.sigmoid(x)\n",
    "\n",
    "# 添加函数到模块实例\n",
    "instance.add_python_function(\"custom_activation\", custom_activation)\n",
    "\n",
    "# 验证函数已添加\n",
    "assert hasattr(instance, \"custom_activation\")\n",
    "assert \"custom_activation\" in instance.pyfuncs\n",
    "\n",
    "# 测试函数执行\n",
    "input_tensor = torch.tensor([1.0, -1.0, 0.0], dtype=torch.float32)\n",
    "result = instance.custom_activation(input_tensor)\n",
    "\n",
    "# 验证结果\n",
    "assert isinstance(result, torch.Tensor)\n",
    "expected = torch.sigmoid(input_tensor)\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "305b38f4",
   "metadata": {},
   "source": [
    "## 测试通过 `call_dps_packed` 调用动态添加的函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e57a4912",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 定义 softmax 函数\n",
    "def my_softmax(tensor, dim):\n",
    "    \"\"\"用于测试 call_dps_packed 的自定义 softmax 函数。\"\"\"\n",
    "    # 必要时将 TVM NDArray 转换为 PyTorch 张量\n",
    "    if hasattr(tensor, \"numpy\"):\n",
    "        tensor = torch.from_numpy(tensor.numpy())\n",
    "    return F.softmax(tensor, dim=dim)\n",
    "\n",
    "# 添加函数到实例\n",
    "instance.my_softmax = my_softmax\n",
    "\n",
    "# 测试 call_dps_packed 调用\n",
    "input_tensor = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float32)\n",
    "\n",
    "result = instance.call_dps_packed(\n",
    "    \"my_softmax\", [input_tensor, 1], R.Tensor((2, 2), \"float32\")\n",
    ")\n",
    "\n",
    "# 验证结果\n",
    "assert isinstance(result, torch.Tensor)\n",
    "expected = F.softmax(input_tensor, dim=1)\n",
    "assert torch.allclose(result, expected, atol=1e-5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4aeeda89",
   "metadata": {},
   "source": [
    "## 测试端到端的执行流水线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "50157e6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 添加必要的 softmax 函数\n",
    "def my_softmax(tensor, dim):\n",
    "    if hasattr(tensor, \"numpy\"):\n",
    "        tensor = torch.from_numpy(tensor.numpy())\n",
    "    return F.softmax(tensor, dim=dim)\n",
    "\n",
    "instance.my_softmax = my_softmax\n",
    "\n",
    "# 创建测试数据\n",
    "n = 5\n",
    "x = torch.randn(n, 16, dtype=torch.float32)\n",
    "w = torch.randn(16, 20, dtype=torch.float32)\n",
    "\n",
    "# 执行完整流水线\n",
    "result = instance.main(x, w)\n",
    "\n",
    "# 验证结果\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (n, 20)\n",
    "assert result.dtype == torch.float32"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d20abd83",
   "metadata": {},
   "source": [
    "## 测试在 GPU 上的端到端执行流水线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "fd3c79be",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile one or more TIR functions: Memory verification failed with the following errors:\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def matmul(var_A: T.handle, B: T.Buffer((16, 20), \"float32\"), var_C: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n, 16))\n",
      "    C = T.match_buffer(var_C, (n, 20))\n",
      "    for i, j, k in T.grid(n, 20, 16):\n",
      "        cse_v1: T.int32 = i * 20 + j\n",
      "        C_1 = T.Buffer((n * 20,), data=C.data)\n",
      "        if k == 0:\n",
      "            C_1[cse_v1] = T.float32(0.0)\n",
      "        A_1 = T.Buffer((n * 16,), data=A.data)\n",
      "        B_1 = T.Buffer((320,), data=B.data)\n",
      "        C_1[cse_v1] = C_1[cse_v1] + A_1[i * 16 + k] * B_1[k * 20 + j]\n",
      "Warning: Failed to compile Relax VM: Memory verification failed with the following errors:\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `C` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `A` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "    Variable `B` is directly accessed by host memory (it is not contained in a thread environment or in the function arguments.\n",
      "  Did you forget to bind?\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def matmul(var_A: T.handle, B: T.Buffer((16, 20), \"float32\"), var_C: T.handle):\n",
      "    T.func_attr({\"target\": T.target({\"arch\": \"sm_86\", \"host\": {\"keys\": [\"cpu\"], \"kind\": \"llvm\", \"mtriple\": \"x86_64-unknown-linux-gnu\", \"tag\": \"\"}, \"keys\": [\"cuda\", \"gpu\"], \"kind\": \"cuda\", \"max_num_threads\": 1024, \"max_shared_memory_per_block\": 49152, \"max_threads_per_block\": 1024, \"tag\": \"\", \"thread_warp_size\": 32})})\n",
      "    n = T.int32()\n",
      "    A = T.match_buffer(var_A, (n, 16))\n",
      "    C = T.match_buffer(var_C, (n, 20))\n",
      "    for i, j, k in T.grid(n, 20, 16):\n",
      "        cse_v1: T.int32 = i * 20 + j\n",
      "        C_1 = T.Buffer((n * 20,), data=C.data)\n",
      "        if k == 0:\n",
      "            C_1[cse_v1] = T.float32(0.0)\n",
      "        A_1 = T.Buffer((n * 16,), data=A.data)\n",
      "        B_1 = T.Buffer((320,), data=B.data)\n",
      "        C_1[cse_v1] = C_1[cse_v1] + A_1[i * 16 + k] * B_1[k * 20 + j]\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "\n",
    "if tvm.cuda().exist:\n",
    "    device = tvm.cuda(0)\n",
    "    instance = module(device)\n",
    "\n",
    "    # 测试基本的 GPU 功能（不进行复杂的 TIR 操作）\n",
    "    assert isinstance(instance, BasePyModule)\n",
    "    assert \"cuda\" in str(instance.target)\n",
    "\n",
    "    # 测试创建和使用 GPU 张量\n",
    "    n = 5\n",
    "    x = torch.randn(n, 16, dtype=torch.float32, device=\"cuda\")\n",
    "    w = torch.randn(16, 20, dtype=torch.float32, device=\"cuda\")\n",
    "\n",
    "    # 验证张量设备和形状\n",
    "    assert x.device.type == \"cuda\"\n",
    "    assert w.device.type == \"cuda\"\n",
    "    assert x.shape == (n, 16)\n",
    "    assert w.shape == (16, 20)\n",
    "\n",
    "    # 测试 GPU 上的基本 PyTorch 操作\n",
    "    result = torch.matmul(x, w)\n",
    "    assert isinstance(result, torch.Tensor)\n",
    "    assert result.shape == (n, 20)\n",
    "    assert result.dtype == torch.float32\n",
    "    assert result.device.type == \"cuda\"\n",
    "else:\n",
    "    # 如果 CUDA 不可用，则跳过此测试\n",
    "    pytest.skip(\"CUDA not available\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b4decbc9",
   "metadata": {},
   "source": [
    "## 测试不同函数类型之间的数据流"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "cfe1ef3e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 添加必要的函数\n",
    "def my_softmax(tensor, dim):\n",
    "    if hasattr(tensor, \"numpy\"):\n",
    "        tensor = torch.from_numpy(tensor.numpy())\n",
    "    return F.softmax(tensor, dim=dim)\n",
    "\n",
    "instance.my_softmax = my_softmax\n",
    "\n",
    "# 创建测试数据\n",
    "n = 4\n",
    "x = torch.randn(n, 16, dtype=torch.float32)\n",
    "w = torch.randn(16, 20, dtype=torch.float32)\n",
    "\n",
    "# 逐步执行以验证数据流\n",
    "# 步骤1: TIR 矩阵乘法\n",
    "lv = instance.call_tir(instance.matmul, [x, w], R.Tensor((n, 20), \"float32\"))\n",
    "assert isinstance(lv, torch.Tensor)\n",
    "assert lv.shape == (n, 20)\n",
    "\n",
    "# 步骤2: ReLU 激活\n",
    "lv1 = F.relu(lv)\n",
    "assert isinstance(lv1, torch.Tensor)\n",
    "assert lv1.shape == (n, 20)\n",
    "\n",
    "# 步骤3: 通过 call_dps_packed 应用 Softmax\n",
    "lv2 = instance.call_dps_packed(\"my_softmax\", [lv1, 1], R.Tensor((n, 20), \"float32\"))\n",
    "assert isinstance(lv2, torch.Tensor)\n",
    "assert lv2.shape == (n, 20)\n",
    "\n",
    "# 步骤4: 应用恒等函数\n",
    "lv3 = instance.my_identity_func(lv2)\n",
    "assert isinstance(lv3, torch.Tensor)\n",
    "assert lv3.shape == (n, 20)\n",
    "\n",
    "# 验证最终结果与预期一致\n",
    "expected = F.softmax(F.relu(torch.matmul(x, w)), dim=1)\n",
    "assert torch.allclose(lv3, expected, atol=1e-3)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0f8900fb",
   "metadata": {},
   "source": [
    "## 测试各种边缘情况下的错误处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "3256747d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 测试调用不存在的函数\n",
    "with pytest.raises(Exception):\n",
    "    instance.call_dps_packed(\n",
    "        \"non_existent_function\", [torch.tensor([1.0])], R.Tensor((1,), \"float32\")\n",
    "    )\n",
    "\n",
    "# 测试传入错误形状的张量\n",
    "x = torch.randn(3, 16, dtype=torch.float32)\n",
    "w = torch.randn(15, 20, dtype=torch.float32)  # 错误的形状\n",
    "\n",
    "with pytest.raises(Exception):\n",
    "    instance.call_tir(instance.matmul, [x, w], R.Tensor((3, 20), \"float32\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d719411f",
   "metadata": {},
   "source": [
    "## 测试张量类型在整个执行过程中的保留"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "467028f2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 添加必要的函数\n",
    "def my_softmax(tensor, dim):\n",
    "    if hasattr(tensor, \"numpy\"):\n",
    "        tensor = torch.from_numpy(tensor.numpy())\n",
    "    return F.softmax(tensor, dim=dim)\n",
    "\n",
    "instance.my_softmax = my_softmax\n",
    "\n",
    "# 测试 float32 数据类型（TIR 函数硬编码为 float32）\n",
    "test_dtype = torch.float32\n",
    "n = 3\n",
    "x = torch.randn(n, 16, dtype=test_dtype)\n",
    "w = torch.randn(16, 20, dtype=test_dtype)\n",
    "\n",
    "# 执行完整流水线\n",
    "result = instance.main(x, w)\n",
    "\n",
    "# 验证类型保留\n",
    "assert result.dtype == test_dtype\n",
    "assert isinstance(result, torch.Tensor)\n",
    "assert result.shape == (n, 20)\n",
    "assert result.dtype == torch.float32"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "318f088a",
   "metadata": {},
   "source": [
    "## 测试批处理多个输入的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e770f5d2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Warning: Failed to compile Relax VM: 'NoneType' object has no attribute 'kind'\n"
     ]
    }
   ],
   "source": [
    "module = PyTorchIntegrationModule\n",
    "device = tvm.cpu(0)\n",
    "instance = module(device)\n",
    "\n",
    "# 添加必要的函数\n",
    "def my_softmax(tensor, dim):\n",
    "    if hasattr(tensor, \"numpy\"):\n",
    "        tensor = torch.from_numpy(tensor.numpy())\n",
    "    return F.softmax(tensor, dim=dim)\n",
    "\n",
    "instance.my_softmax = my_softmax\n",
    "\n",
    "# 处理多个输入\n",
    "batch_size = 5\n",
    "results = []\n",
    "\n",
    "for i in range(batch_size):\n",
    "    n = 3 + i  # 变化的批次大小\n",
    "    x = torch.randn(n, 16, dtype=torch.float32)\n",
    "    w = torch.randn(16, 20, dtype=torch.float32)\n",
    "\n",
    "    result = instance.main(x, w)\n",
    "    results.append(result)\n",
    "\n",
    "    assert isinstance(result, torch.Tensor)\n",
    "    assert result.shape == (n, 20)\n",
    "\n",
    "# 验证所有结果有效\n",
    "assert len(results) == batch_size\n",
    "for result in results:\n",
    "    assert isinstance(result, torch.Tensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8bc0c47",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
