{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "7453079d",
   "metadata": {},
   "source": [
    "# Relay BYODT 框架\n",
    "\n",
    "BYODT(Bring Your Own Datatype) 框架允许用户在 TVM 中注册和使用自定义数据类型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a04c4e30",
   "metadata": {
    "tags": [
     "hide-output"
    ]
   },
   "outputs": [],
   "source": [
    "import set_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "9da964f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pytest\n",
    "\n",
    "import tvm\n",
    "import tvm.topi.testing\n",
    "import tvm.testing\n",
    "from tvm import relay\n",
    "from tvm.relay.testing.layers import batch_norm_infer\n",
    "from tvm.target.datatype import (\n",
    "    create_lower_func,  # 创建自定义数据类型的Lower函数\n",
    "    create_min_lower_func,  # 创建最小值Lower函数\n",
    "    lower_call_pure_extern,  # 处理外部函数调用\n",
    "    lower_ite,  # 处理条件分支\n",
    "    register,  # 注册自定义数据类型\n",
    "    register_min_func,  # 注册最小值函数\n",
    "    register_op,  # 注册操作\n",
    ")\n",
    "from tvm.tir.op import call_pure_extern\n",
    "from tvm.script import tir as T"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "61e17042",
   "metadata": {},
   "source": [
    "## 设置 `myfloat` 自定义数据类型\n",
    "\n",
    "注册名为 `myfloat` 的自定义数据类型（底层是 `float`），并注册相应的算子函数，使 TVM 能够正确处理这种自定义数据类型。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "08fcea32",
   "metadata": {},
   "source": [
    "要使用外部库中的数据类型算子，应首先加载包含该数据类型实现的库：\n",
    "```\n",
    "CDLL(\"libposit.so\", RTLD_GLOBAL)\n",
    "```\n",
    "本示例中，使用的数据类型库已内置在 TVM 中，因此无需显式加载任何库："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b8b7879b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def setup_myfloat():\n",
    "    # 可以为自定义数据类型选择任意代码，只要大于128且未被其他数据类型使用\n",
    "    register(\"myfloat\", 131)  # 注册名为myfloat的数据类型，代码为131\n",
    "    # 注册float到myfloat的转换算子\n",
    "    register_op(\n",
    "        create_lower_func({(32, 32): \"FloatToCustom32\"}), \"Cast\", \"llvm\", \"float\", \"myfloat\"\n",
    "    )\n",
    "    # 注册myfloat到float的转换算子\n",
    "    register_op(\n",
    "        create_lower_func({(32, 32): \"Custom32ToFloat\"}), \"Cast\", \"llvm\", \"myfloat\", \"float\"\n",
    "    )\n",
    "    # 注册myfloat类型的加法算子\n",
    "    register_op(create_lower_func({32: \"Custom32Add\"}), \"Add\", \"llvm\", \"myfloat\")\n",
    "    # 注册myfloat类型的减法算子\n",
    "    register_op(\n",
    "        create_lower_func(\n",
    "            {\n",
    "                32: \"Custom32Sub\",\n",
    "            }\n",
    "        ),\n",
    "        \"Sub\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "    )\n",
    "    # 注册myfloat类型的乘法算子\n",
    "    register_op(create_lower_func({32: \"Custom32Mul\"}), \"Mul\", \"llvm\", \"myfloat\")\n",
    "    # 注册myfloat类型的浮点常量处理\n",
    "    register_op(\n",
    "        create_lower_func(\n",
    "            {\n",
    "                32: \"FloatToCustom32\",\n",
    "            }\n",
    "        ),\n",
    "        \"FloatImm\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "    )\n",
    "    # 注册myfloat类型的除法算子\n",
    "    register_op(\n",
    "        create_lower_func(\n",
    "            {\n",
    "                32: \"Custom32Div\",\n",
    "            }\n",
    "        ),\n",
    "        \"Div\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "    )\n",
    "    # 注册myfloat类型的最大值算子\n",
    "    register_op(create_lower_func({32: \"Custom32Max\"}), \"Max\", \"llvm\", \"myfloat\")\n",
    "    # 注册myfloat类型的平方根内联函数\n",
    "    register_op(\n",
    "        create_lower_func({32: \"Custom32Sqrt\"}),\n",
    "        \"Call\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "        intrinsic_name=\"tir.sqrt\",\n",
    "    )\n",
    "    # 注册myfloat类型的指数内联函数\n",
    "    register_op(\n",
    "        create_lower_func({32: \"Custom32Exp\"}),\n",
    "        \"Call\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "        intrinsic_name=\"tir.exp\",\n",
    "    )\n",
    "    # 注册myfloat类型的对数内联函数\n",
    "    register_op(\n",
    "        create_lower_func({32: \"Custom32Log\"}),\n",
    "        \"Call\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "        intrinsic_name=\"tir.log\",\n",
    "    )\n",
    "    # 注册myfloat类型的sigmoid内联函数\n",
    "    register_op(\n",
    "        create_lower_func({32: \"Custom32Sigmoid\"}),\n",
    "        \"Call\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "        intrinsic_name=\"tir.sigmoid\",\n",
    "    )\n",
    "    # 注册myfloat类型的tanh内联函数\n",
    "    register_op(\n",
    "        create_lower_func({32: \"Custom32Tanh\"}),\n",
    "        \"Call\",\n",
    "        \"llvm\",\n",
    "        \"myfloat\",\n",
    "        intrinsic_name=\"tir.tanh\",\n",
    "    )\n",
    "    # 注册myfloat类型的条件分支内联函数\n",
    "    register_op(lower_ite, \"Call\", \"llvm\", \"myfloat\", intrinsic_name=\"tir.if_then_else\")\n",
    "    # 注册myfloat类型的外部函数调用内联函数\n",
    "    register_op(\n",
    "        lower_call_pure_extern, \"Call\", \"llvm\", \"myfloat\", intrinsic_name=\"tir.call_pure_extern\"\n",
    "    )\n",
    "\n",
    "    # 注册myfloat类型的最小值函数\n",
    "    register_min_func(create_min_lower_func({32: \"MinCustom32\"}, \"myfloat\"), \"myfloat\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a56dd6ec",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "456767ac",
   "metadata": {
    "tags": [
     "hide-cell"
    ]
   },
   "outputs": [],
   "source": [
    "def convert_ndarray(dst_dtype, array):\n",
    "    \"\"\"\n",
    "    将NumPy数组转换为指定的数据类型\n",
    "    \n",
    "    参数:\n",
    "        dst_dtype: 目标数据类型\n",
    "        array: 输入的NumPy数组\n",
    "    \n",
    "    返回:\n",
    "        转换为目标数据类型后的数组\n",
    "    \"\"\"\n",
    "    # 创建一个与输入数组形状和数据类型相同的Relay变量\n",
    "    x = relay.var(\"x\", shape=array.shape, dtype=str(array.dtype))\n",
    "    # 创建一个将输入变量转换为目标数据类型的函数\n",
    "    cast = relay.Function([x], x.astype(dst_dtype))\n",
    "    # 在禁用向量化优化的上下文中执行转换（自定义数据类型可能尚未实现向量化操作）\n",
    "    with tvm.transform.PassContext(config={\"tir.disable_vectorize\": True}):\n",
    "        # 使用图形执行器评估转换函数并应用于输入数组\n",
    "        return relay.create_executor(\"graph\").evaluate(cast)(array)\n",
    "        \n",
    "def change_dtype(src, dst, module, params):\n",
    "    \"\"\"将模块中的常量和函数从源数据类型转换为目标数据类型\n",
    "    \n",
    "    参数:\n",
    "        src: 源数据类型，如 'float32'\n",
    "        dst: 目标数据类型，如自定义的 'myfloat'\n",
    "        module: TVM Relay IR 模块\n",
    "        params: 模型参数字典，键为参数名，值为参数值\n",
    "        \n",
    "    返回:\n",
    "        tuple: (转换数据类型后的模块, 转换数据类型后的参数字典)\n",
    "    \"\"\"\n",
    "    # 使用 Relay 前端的 ChangeDatatype 转换器将模块中的数据类型从 src 转换为 dst\n",
    "    module = relay.frontend.ChangeDatatype(src, dst)(module)\n",
    "    # 使用类型推断变换确保所有类型注解都正确更新\n",
    "    module = relay.transform.InferType()(module)\n",
    "    # 遍历参数字典，将每个参数值转换为目标数据类型\n",
    "    params = {k: convert_ndarray(dst, v) for k, v in params.items()}\n",
    "    # 返回转换后的模块和参数\n",
    "    return module, params\n",
    "\n",
    "\n",
    "def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target=\"llvm\"):\n",
    "    \"\"\"比较两种不同数据类型下模型执行结果的一致性\n",
    "    \n",
    "    该函数首先使用源数据类型执行模型获取基准结果，然后将模型转换为目标数据类型并执行，\n",
    "    最后将目标数据类型的执行结果转回源数据类型并与基准结果进行比较，确保误差在可接受范围内。\n",
    "    \n",
    "    参数:\n",
    "        module: TVM Relay IR 模块\n",
    "        input: 模型输入数据列表\n",
    "        src_dtype: 源数据类型，如 'float32'\n",
    "        dst_dtype: 目标数据类型，如自定义的 'myfloat'\n",
    "        rtol: 相对误差容限\n",
    "        atol: 绝对误差容限\n",
    "        params: 模型参数字典，默认为空字典\n",
    "        target: 目标执行平台，默认为 'llvm'\n",
    "    \n",
    "    异常:\n",
    "        AssertionError: 如果两种数据类型的执行结果差异超过容限，会触发断言失败\n",
    "    \"\"\"\n",
    "    # 应用类型推断变换确保模块中的类型注解正确\n",
    "    module = relay.transform.InferType()(module)\n",
    "    # 简化模块中的推断操作，优化计算图\n",
    "    module = relay.transform.SimplifyInference()(module)\n",
    "\n",
    "    # 使用源数据类型执行模型，获取基准结果（作为\"正确\"结果）\n",
    "    correct = relay.create_executor(\"graph\", mod=module).evaluate()(*input, **params)\n",
    "    # 将模块和参数从源数据类型转换为目标数据类型\n",
    "    module, converted_params = change_dtype(src_dtype, dst_dtype, module, params)\n",
    "    # 将所有输入数据转换为目标数据类型\n",
    "    x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]\n",
    "\n",
    "    # 自定义数据类型尚未实现向量化操作，需要禁用向量化优化\n",
    "    with tvm.transform.PassContext(config={\"tir.disable_vectorize\": True}):\n",
    "        # 使用转换后的模块和参数在目标平台上执行模型\n",
    "        maybe_correct = relay.create_executor(\"graph\", mod=module, target=target).evaluate()(\n",
    "            *x_converted, **converted_params\n",
    "        )\n",
    "        # 注意：当前此函数仅支持比较单输出模型\n",
    "        # 将目标数据类型的执行结果转回源数据类型，以便比较\n",
    "        maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)\n",
    "    # 断言两种执行结果的差异在允许的容限范围内\n",
    "    np.testing.assert_allclose(\n",
    "        maybe_correct_converted.numpy(), correct.numpy(), rtol=rtol, atol=atol\n",
    "    )\n",
    "\n",
    "\n",
    "def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):\n",
    "    \"\"\"使用两种不同的数据类型运行相同的操作，用于比较自定义数据类型与原始数据类型的计算结果\n",
    "    \n",
    "    参数:\n",
    "        src_dtype: 源数据类型，通常为标准数据类型如float32\n",
    "        dst_dtype: 目标数据类型，通常为自定义数据类型如myfloat\n",
    "        rtol: 相对误差容限\n",
    "        atol: 绝对误差容限\n",
    "    \"\"\"\n",
    "    # 用于一元操作和二元操作的第一个输入的形状\n",
    "    shape1 = (5, 10, 5)\n",
    "    # 二元操作的第二个输入的形状\n",
    "    shape2 = (5,)\n",
    "\n",
    "    def check_unary_op(op, src_dtype, dst_dtype, shape):\n",
    "        \"\"\"检查一元操作在不同数据类型下的执行结果\n",
    "        \n",
    "        参数:\n",
    "            op: 要测试的操作函数\n",
    "            src_dtype: 源数据类型\n",
    "            dst_dtype: 目标数据类型\n",
    "            shape: 输入张量的形状\n",
    "        \"\"\"\n",
    "        t1 = relay.TensorType(shape, src_dtype)\n",
    "        x = relay.var(\"x\", t1)\n",
    "        z = op(x)  # 应用操作到输入变量\n",
    "        x_data = np.random.rand(*shape).astype(t1.dtype)  # 生成随机测试数据\n",
    "\n",
    "        # 创建IR模块\n",
    "        module = tvm.IRModule.from_expr(relay.Function([x], z))\n",
    "\n",
    "        # 比较两种数据类型的执行结果\n",
    "        compare(module, (x_data,), src_dtype, dst_dtype, rtol, atol)\n",
    "\n",
    "    # 测试各种一元操作\n",
    "    for op in [\n",
    "        relay.nn.softmax,      # 计算softmax激活函数\n",
    "        tvm.relay.log,         # 计算自然对数\n",
    "        tvm.relay.exp,         # 计算指数函数\n",
    "        tvm.relay.sqrt,        # 计算平方根\n",
    "        tvm.relay.rsqrt,       # 计算平方根的倒数\n",
    "        tvm.relay.sigmoid,     # 计算sigmoid激活函数\n",
    "        tvm.relay.tanh,        # 计算双曲正切激活函数\n",
    "        relay.nn.relu,         # 计算ReLU激活函数\n",
    "        relay.nn.batch_flatten,# 对输入进行批展平操作\n",
    "    ]:\n",
    "        check_unary_op(op, src_dtype, dst_dtype, shape1)\n",
    "\n",
    "    # 测试在4维数据上的一元操作\n",
    "    for op in [relay.nn.max_pool2d, relay.nn.avg_pool2d, relay.nn.global_avg_pool2d]:\n",
    "        shape_2d = (3, 32, 32, 32)  # 4维张量形状: (批大小, 通道数, 高度, 宽度)\n",
    "        check_unary_op(op, src_dtype, dst_dtype, shape_2d)\n",
    "\n",
    "    def check_binary_op(opfunc, src_dtype, dst_dtype):\n",
    "        \"\"\"检查二元操作在不同数据类型下的执行结果\n",
    "        \n",
    "        参数:\n",
    "            opfunc: 要测试的二元操作函数\n",
    "            src_dtype: 源数据类型\n",
    "            dst_dtype: 目标数据类型\n",
    "        \"\"\"\n",
    "        t1 = relay.TensorType(shape1, src_dtype)\n",
    "        t2 = relay.TensorType(shape2, src_dtype)\n",
    "        x = relay.var(\"x\", t1)\n",
    "        y = relay.var(\"y\", t2)\n",
    "        z = opfunc(x, y)  # 应用二元操作到两个输入变量\n",
    "        x_data = np.random.rand(*shape1).astype(t1.dtype)  # 生成第一个输入的随机数据\n",
    "        y_data = np.random.rand(*shape2).astype(t2.dtype)  # 生成第二个输入的随机数据\n",
    "        module = tvm.IRModule.from_expr(relay.Function([x, y], z))\n",
    "\n",
    "        # 比较两种数据类型的执行结果\n",
    "        compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)\n",
    "\n",
    "    # 测试各种二元操作\n",
    "    for op in [\n",
    "        relay.add,       # 加法操作\n",
    "        relay.subtract,  # 减法操作\n",
    "        relay.divide,    # 除法操作\n",
    "        relay.multiply,  # 乘法操作\n",
    "    ]:\n",
    "        check_binary_op(op, src_dtype, dst_dtype)\n",
    "\n",
    "    # 注意：我们原本想测试tvm_if_then_else\n",
    "    # 但Relay.IfNode不会被转换为这个内在函数，\n",
    "    # 所以为了保持测试与relay的一致性，我们决定不进行单元测试\n",
    "    # 注：tvm_if_then_else在mobile_net模型中会被测试\n",
    "\n",
    "\n",
    "def run_model(get_workload, input, src_dtype, dst_dtype, rtol=1e-4, atol=1e-4):\n",
    "    \"\"\"运行完整模型并比较不同数据类型的执行结果\n",
    "    \n",
    "    参数:\n",
    "        get_workload: 一个函数，返回模型模块和参数\n",
    "        input: 模型的输入数据\n",
    "        src_dtype: 源数据类型\n",
    "        dst_dtype: 目标数据类型\n",
    "        rtol: 相对误差容限\n",
    "        atol: 绝对误差容限\n",
    "    \"\"\"\n",
    "    module, params = get_workload()  # 获取模型结构和参数\n",
    "\n",
    "    # 这里不生成随机数据\n",
    "    # 因为那样会导致输出数据的值都在相似的范围内\n",
    "    compare(module, input, src_dtype, dst_dtype, rtol, atol, params)\n",
    "\n",
    "\n",
    "def run_conv2d(src_dtype, dst_dtype, rtol=1e-7, atol=1e-4):\n",
    "    \"\"\"专门测试卷积操作在不同数据类型下的表现\n",
    "    \n",
    "    参数:\n",
    "        src_dtype: 源数据类型\n",
    "        dst_dtype: 目标数据类型\n",
    "        rtol: 相对误差容限\n",
    "        atol: 绝对误差容限\n",
    "    \"\"\"\n",
    "    def run_test_conv2d(\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        scale,  # 随机数据的范围缩放因子\n",
    "        dshape, # 输入数据形状\n",
    "        kshape, # 卷积核形状\n",
    "        padding=(1, 1),  # 填充大小\n",
    "        groups=1,        # 分组卷积的组数\n",
    "        dilation=(1, 1), # 膨胀率\n",
    "        **attrs,         # 其他卷积属性\n",
    "    ):\n",
    "        \"\"\"运行特定配置的卷积测试\n",
    "        \n",
    "        参数:\n",
    "            scale: 随机数据的范围缩放因子\n",
    "            dshape: 输入数据形状\n",
    "            kshape: 卷积核形状\n",
    "            padding: 填充大小\n",
    "            groups: 分组卷积的组数\n",
    "            dilation: 膨胀率\n",
    "            **attrs: 其他卷积属性\n",
    "        \"\"\"\n",
    "        x = relay.var(\"x\", shape=dshape, dtype=src_dtype)  # 定义输入变量\n",
    "        w = relay.var(\"w\", shape=kshape, dtype=src_dtype)  # 定义卷积核变量\n",
    "        # 创建卷积操作\n",
    "        y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)\n",
    "        # 创建IR模块\n",
    "        module = tvm.IRModule.from_expr(relay.Function([x, w], y))\n",
    "        # 生成随机测试数据\n",
    "        data = np.random.uniform(-scale, scale, size=dshape).astype(src_dtype)\n",
    "        kernel = np.random.uniform(-scale, scale, size=kshape).astype(src_dtype)\n",
    "\n",
    "        # 比较两种数据类型的执行结果\n",
    "        compare(module, (data, kernel), src_dtype, dst_dtype, rtol, atol)\n",
    "\n",
    "    # 测试深度可分离卷积\n",
    "    dshape = (1, 32, 18, 18)  # 输入形状: (批大小, 通道数, 高度, 宽度)\n",
    "    kshape = (32, 1, 3, 3)    # 卷积核形状: (输出通道数, 输入通道数/组数, 高度, 宽度)\n",
    "    run_test_conv2d(\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        1,  # 缩放因子为1\n",
    "        dshape,\n",
    "        kshape,\n",
    "        padding=(1, 1),\n",
    "        channels=32,     # 输出通道数\n",
    "        groups=32,       # 组数等于通道数，实现深度可分离卷积\n",
    "        kernel_size=(3, 3),  # 卷积核大小\n",
    "    )\n",
    "\n",
    "    # 注意：CUDA对于'direct'调度被禁用:\n",
    "    # https://github.com/dmlc/tvm/pull/3070#issuecomment-486597553\n",
    "    # 测试分组卷积\n",
    "    dshape = (1, 32, 18, 18)\n",
    "    kshape = (32, 4, 3, 3)  # 32=8*4，8是组数，4是每组的通道数\n",
    "    run_test_conv2d(\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        1,\n",
    "        dshape,\n",
    "        kshape,\n",
    "        padding=(1, 1),\n",
    "        channels=32,     # 输出通道数\n",
    "        groups=8,        # 组数为8\n",
    "        kernel_size=(3, 3),\n",
    "    )\n",
    "    # 另一个分组卷积测试\n",
    "    dshape = (1, 32, 18, 18)\n",
    "    kshape = (64, 1, 3, 3)  # 64=32*2，32是组数\n",
    "    run_test_conv2d(\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        1,\n",
    "        dshape,\n",
    "        kshape,\n",
    "        padding=(1, 1),\n",
    "        channels=64,     # 输出通道数\n",
    "        groups=32,       # 组数为32\n",
    "        kernel_size=(3, 3),\n",
    "    )\n",
    "\n",
    "    # 测试普通卷积\n",
    "    dshape = (1, 3, 224, 224)  # 标准图像输入大小\n",
    "    kshape = (10, 3, 3, 3)     # 10个3x3的卷积核，每个卷积核有3个输入通道\n",
    "    run_test_conv2d(\n",
    "        src_dtype, dst_dtype, 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)\n",
    "    )\n",
    "\n",
    "    # 测试膨胀卷积（空洞卷积）\n",
    "    dshape = (1, 3, 18, 18)\n",
    "    kshape = (10, 3, 3, 3)\n",
    "    run_test_conv2d(\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        1,\n",
    "        dshape,\n",
    "        kshape,\n",
    "        padding=(1, 1),\n",
    "        channels=10,\n",
    "        kernel_size=(3, 3),\n",
    "        dilation=(3, 3),  # 设置膨胀率为3\n",
    "    )\n",
    "\n",
    "def run_batchnorm(src_dtype, dst_dtype, rtol=1e-6, atol=1e-6):\n",
    "    \"\"\"\n",
    "    测试批归一化（Batch Normalization）操作在不同数据类型下的一致性\n",
    "    \n",
    "    参数:\n",
    "        src_dtype: 源数据类型，通常是标准数据类型\n",
    "        dst_dtype: 目标数据类型，通常是自定义数据类型\n",
    "        rtol: 相对误差容限，默认为1e-6\n",
    "        atol: 绝对误差容限，默认为1e-6\n",
    "    \"\"\"\n",
    "    # 设置输入数据形状 (通道数, 高度, 宽度)\n",
    "    shape = (3, 32, 32)\n",
    "    # 创建Relay张量类型\n",
    "    t = relay.TensorType(shape, src_dtype)\n",
    "    # 创建输入变量\n",
    "    x = relay.var(\"x\", t)\n",
    "    # 构建批归一化推理层（不使用缩放参数）\n",
    "    bn = batch_norm_infer(data=x, epsilon=2e-5, scale=False, name=\"bn_x\")\n",
    "    # 创建函数，捕获所有自由变量\n",
    "    f = relay.Function(relay.analysis.free_vars(bn), bn)\n",
    "\n",
    "    # 生成随机输入数据\n",
    "    x_data = np.random.rand(*shape).astype(t.dtype)\n",
    "    # 从表达式创建IR模块\n",
    "    module = tvm.IRModule.from_expr(f)\n",
    "\n",
    "    # 创建零值数组作为批归一化的参数（均值、方差、缩放和偏移）\n",
    "    zero_data = np.zeros((32), \"float32\")\n",
    "    # 比较源数据类型和目标数据类型下的批归一化结果\n",
    "    compare(\n",
    "        module,\n",
    "        (x_data, zero_data, zero_data, zero_data, zero_data),  # 输入数据和批归一化参数\n",
    "        src_dtype,\n",
    "        dst_dtype,\n",
    "        rtol,\n",
    "        atol,\n",
    "    )\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8ce1f50c",
   "metadata": {},
   "outputs": [],
   "source": [
    "setup_myfloat()\n",
    "run_ops(\"float32\", \"custom[myfloat]32\", rtol=1e-6, atol=1e-6)\n",
    "run_conv2d(\"float32\", \"custom[myfloat]32\", rtol=1e-6, atol=1e-6)\n",
    "run_batchnorm(\"float32\", \"custom[myfloat]32\", rtol=1e-6, atol=1e-6)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0ed074c0",
   "metadata": {},
   "source": [
    "## 测试 TVM 自定义数据类型（myfloat）的TIR（Tensor IR）降低（Lowering）过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8a1a09c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TestMyfloatLowering(tvm.testing.CompareBeforeAfter):\n",
    "    \"\"\"\n",
    "    测试TVM自定义数据类型（myfloat）的TIR（Tensor IR）降低（Lowering）过程\n",
    "    \n",
    "    该测试类继承自CompareBeforeAfter，用于验证自定义数据类型操作是否正确地转换为底层实现\n",
    "    通过比较转换前后的IR代码，确保myfloat类型的加法操作被正确地降低为外部函数调用\n",
    "    \"\"\"\n",
    "    # 在类加载时注册myfloat自定义数据类型\n",
    "    # setup_myfloat()\n",
    "\n",
    "    # 设置要测试的转换：降低自定义数据类型\n",
    "    transform = tvm.tir.transform.LowerCustomDatatypes()\n",
    "\n",
    "    def before(self):\n",
    "        \"\"\"\n",
    "        定义转换前的原始TIR函数，使用myfloat自定义数据类型\n",
    "        \n",
    "        返回:\n",
    "            使用myfloat类型的原始TIR函数\n",
    "        \"\"\"\n",
    "        # 定义自定义数据类型标识符\n",
    "        dtype = \"custom[myfloat]32\"\n",
    "\n",
    "        @T.prim_func\n",
    "        def func(A_data: T.handle(dtype)):\n",
    "            # 设置目标为LLVM\n",
    "            T.func_attr({\"target\": T.target(\"llvm\")})\n",
    "            # 从原始句柄创建myfloat类型的缓冲区\n",
    "            A = T.Buffer(16, dtype=dtype, data=A_data)\n",
    "            # 分配myfloat类型的输出缓冲区\n",
    "            B_data = T.allocate([16], dtype=dtype)\n",
    "            B = T.Buffer(16, dtype=dtype, data=B_data)\n",
    "            # 执行简单的元素级加法操作（每个元素加1.0）\n",
    "            for i in range(16):\n",
    "                B[i] = A[i] + 1.0\n",
    "\n",
    "        return func\n",
    "\n",
    "    def expected(self):\n",
    "        \"\"\"\n",
    "        定义转换后的期望TIR函数，验证自定义数据类型的降低实现\n",
    "        \n",
    "        返回:\n",
    "            降低后的期望TIR函数\n",
    "        \"\"\"\n",
    "        # 定义自定义数据类型标识符\n",
    "        dtype = \"custom[myfloat]32\"\n",
    "\n",
    "        @T.prim_func\n",
    "        def func(A_data: T.handle(dtype)):\n",
    "            # 设置目标为LLVM\n",
    "            T.func_attr({\"target\": T.target(\"llvm\")})\n",
    "            # 将myfloat类型的输入缓冲区视为uint32（底层表示）\n",
    "            A_uint32 = T.Buffer(16, \"uint32\", data=A_data)\n",
    "            # 分配uint32类型的输出缓冲区\n",
    "            B_data = T.allocate([16], dtype=\"uint32\")\n",
    "            B_uint32 = T.Buffer(16, \"uint32\", data=B_data)\n",
    "            # 执行降低后的操作，通过外部函数调用实现自定义数据类型的加法\n",
    "            for i in range(16):\n",
    "                B_uint32[i] = T.call_pure_extern(\n",
    "                    \"uint32\",\n",
    "                    \"FloatToCustom32\",  # 将浮点数转换回myfloat格式的外部函数\n",
    "                    T.call_pure_extern(\"float32\", \"Custom32ToFloat\", A_uint32[i]) + T.float32(1),  # 将myfloat转换为浮点数并执行加法\n",
    "                )\n",
    "\n",
    "        return func\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f8c1552",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "aix",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
