{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "4b810bf7",
   "metadata": {},
   "source": [
    "# `topi.qnn`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc48a3bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\"\"\"Test code for QNN operators.\"\"\"\n",
    "import numpy as np\n",
    "import tvm\n",
    "from tvm import topi, relay, te\n",
    "from tvm.contrib import graph_executor\n",
    "import tvm.topi.testing\n",
    "\n",
    "\n",
    "def verify_simulated_quantize(data_shape, out_dtype, channels, axis):\n",
    "    \"\"\"\n",
    "    验证模拟量化功能\n",
    "\n",
    "    该函数用于验证模拟量化操作的正确性，通过比较模拟量化和真实量化的结果\n",
    "    来确保模拟量化功能的准确性。\n",
    "\n",
    "    参数:\n",
    "    data_shape (tuple): 输入数据的形状\n",
    "    out_dtype (str): 输出数据类型，如'int8', 'uint8', 'int32'\n",
    "    channels (list): 通道数，用于确定缩放因子和零点的维度\n",
    "    axis (int): 量化轴\n",
    "\n",
    "    返回:\n",
    "    None\n",
    "    \"\"\"\n",
    "    # 创建所有QNN输入的占位符变量\n",
    "    A = te.placeholder(data_shape, name=\"value\", dtype=\"float32\")\n",
    "    D = te.placeholder([], name=\"dtype\", dtype=\"int32\")\n",
    "    S = te.placeholder([te.size_var(\"scale_dim\")], name=\"scale\", dtype=\"float32\")\n",
    "    Z = te.placeholder([te.size_var(\"zp_dim\")], name=\"zp\", dtype=\"int32\")\n",
    "    # 创建模拟量化操作\n",
    "    SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)\n",
    "\n",
    "    # 创建用于输入的随机numpy值\n",
    "    a_np = np.random.uniform(size=data_shape).astype(\"float32\")\n",
    "    d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])  # 将数据类型映射到代码\n",
    "    s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype(\"float32\")  # 缩放因子\n",
    "    z_np = np.random.uniform(low=-10, high=10, size=channels).astype(\"int32\")  # 零点\n",
    "    q_np = np.zeros(shape=data_shape, dtype=\"float32\")  # 输出占位符\n",
    "\n",
    "    def check_target(target, dev):\n",
    "        \"\"\"\n",
    "        在特定目标设备上检查模拟量化的正确性\n",
    "\n",
    "        参数:\n",
    "        target: TVM目标设备\n",
    "        dev: TVM设备上下文\n",
    "\n",
    "        返回:\n",
    "        None\n",
    "        \"\"\"\n",
    "        # 将numpy数组包装为tvm.nd数组\n",
    "        a = tvm.nd.array(a_np, dev)\n",
    "        d = tvm.nd.array(d_np, dev)\n",
    "        s = tvm.nd.array(s_np, dev)\n",
    "        z = tvm.nd.array(z_np, dev)\n",
    "        q = tvm.nd.array(q_np, dev)\n",
    "\n",
    "        # 构建等效的relay图\n",
    "        per_channel = channels[0] != 1  # 判断是否为每通道量化\n",
    "        a_var = relay.var(\"a\", shape=data_shape, dtype=\"float32\")\n",
    "        if per_channel:\n",
    "            s_var = relay.const(s_np)\n",
    "            z_var = relay.const(z_np)\n",
    "        else:\n",
    "            s_var = relay.const(s_np[0])\n",
    "            z_var = relay.const(z_np[0])\n",
    "        # 创建真实的QNN量化操作\n",
    "        real_q_op = relay.qnn.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)\n",
    "        with tvm.transform.PassContext(opt_level=3):\n",
    "            lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)\n",
    "\n",
    "        # 获取真实的QNN量化输出\n",
    "        m = graph_executor.GraphModule(lib[\"default\"](dev))\n",
    "        m.set_input(\"a\", a_np)\n",
    "\n",
    "        m.run()\n",
    "        real_q_out = m.get_output(0)\n",
    "\n",
    "        # 编译模拟量化函数\n",
    "        with tvm.target.Target(target):\n",
    "            sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)\n",
    "        func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name=\"sim_quantize\")\n",
    "        func(a, d, s, z, q)\n",
    "\n",
    "        # 检查与真实QNN输出的正确性\n",
    "        mismatch = q.numpy() != real_q_out.numpy().astype(\"float32\")\n",
    "        # 允许GPU浮点运算导致的一些舍入误差\n",
    "        assert np.sum(mismatch) <= 3\n",
    "\n",
    "    # 在所有启用的目标设备上运行检查\n",
    "    for target, dev in tvm.testing.enabled_targets():\n",
    "        check_target(target, dev)\n",
    "\n",
    "\n",
    "def test_simulated_quantize():\n",
    "    \"\"\"\n",
    "    测试模拟量化功能\n",
    "\n",
    "    该函数通过调用verify_simulated_quantize函数，在各种输入形状、数据类型、\n",
    "    通道数和轴配置下测试模拟量化功能的正确性。\n",
    "    \"\"\"\n",
    "    # 测试标量int8量化\n",
    "    verify_simulated_quantize([1], \"int8\", [1], -1)\n",
    "    # 测试2x5形状、轴为1的int8量化\n",
    "    verify_simulated_quantize([2, 5], \"int8\", [5], 1)\n",
    "    # 测试4D张量、轴为-1的int8量化\n",
    "    verify_simulated_quantize([1, 32, 32, 32], \"int8\", [32], -1)\n",
    "    # 测试4D张量、轴为-2的uint8量化\n",
    "    verify_simulated_quantize([1, 32, 32, 32], \"uint8\", [32], -2)\n",
    "    # 测试2x5形状、轴为1的int32量化\n",
    "    verify_simulated_quantize([2, 5], \"int32\", [5], 1)\n",
    "\n",
    "\n",
    "def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):\n",
    "    \"\"\"\n",
    "    验证模拟反量化功能\n",
    "\n",
    "    该函数用于验证模拟反量化操作的正确性，通过比较模拟反量化和真实反量化的结果\n",
    "    来确保模拟反量化功能的准确性。\n",
    "\n",
    "    参数:\n",
    "    data_shape (tuple): 输入数据的形状\n",
    "    in_dtype (str): 输入数据类型，如'int8', 'uint8', 'int32'\n",
    "    channels (list): 通道数，用于确定缩放因子和零点的维度\n",
    "    axis (int): 反量化轴\n",
    "\n",
    "    返回:\n",
    "    None\n",
    "    \"\"\"\n",
    "    # 创建所有QNN输入的占位符变量\n",
    "    A = te.placeholder(data_shape, name=\"value\", dtype=\"float32\")\n",
    "    D = te.placeholder([], name=\"dtype\", dtype=\"int32\")\n",
    "    S = te.placeholder([te.size_var(\"scale_dim\")], name=\"scale\", dtype=\"float32\")\n",
    "    Z = te.placeholder([te.size_var(\"zp_dim\")], name=\"zp\", dtype=\"int32\")\n",
    "    # 创建模拟反量化操作\n",
    "    SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)\n",
    "\n",
    "    # 创建用于输入的随机numpy值\n",
    "    a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)\n",
    "    a_np_f = a_np.astype(\"float32\")  # 转换为float32用于模拟反量化\n",
    "    d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])  # 将数据类型映射到代码\n",
    "    s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype(\"float32\")  # 缩放因子\n",
    "    z_np = np.random.uniform(low=-10, high=10, size=channels).astype(\"int32\")  # 零点\n",
    "    dq_np = np.zeros(shape=data_shape, dtype=\"float32\")  # 输出占位符\n",
    "\n",
    "    def check_target(target, dev):\n",
    "        \"\"\"\n",
    "        在特定目标设备上检查模拟反量化的正确性\n",
    "\n",
    "        参数:\n",
    "        target: TVM目标设备\n",
    "        dev: TVM设备上下文\n",
    "\n",
    "        返回:\n",
    "        None\n",
    "        \"\"\"\n",
    "        # 将numpy数组包装为tvm.nd数组\n",
    "        a = tvm.nd.array(a_np_f, dev)\n",
    "        d = tvm.nd.array(d_np, dev)\n",
    "        s = tvm.nd.array(s_np, dev)\n",
    "        z = tvm.nd.array(z_np, dev)\n",
    "        dq = tvm.nd.array(dq_np, dev)\n",
    "\n",
    "        # 构建等效的relay图\n",
    "        per_channel = channels[0] != 1  # 判断是否为每通道反量化\n",
    "        a_var = relay.var(\"a\", shape=data_shape, dtype=in_dtype)\n",
    "        if per_channel:\n",
    "            s_var = relay.const(s_np)\n",
    "            z_var = relay.const(z_np)\n",
    "        else:\n",
    "            s_var = relay.const(s_np[0])\n",
    "            z_var = relay.const(z_np[0])\n",
    "        # 创建真实的QNN反量化操作\n",
    "        real_dq_op = relay.qnn.dequantize(a_var, s_var, z_var, axis=axis)\n",
    "        with tvm.transform.PassContext(opt_level=3):\n",
    "            lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target)\n",
    "\n",
    "        # 获取真实的QNN反量化输出\n",
    "        m = graph_executor.GraphModule(lib[\"default\"](dev))\n",
    "        m.set_input(\"a\", a_np)\n",
    "\n",
    "        m.run()\n",
    "        real_dq_out = m.get_output(0)\n",
    "\n",
    "        # 编译模拟反量化函数\n",
    "        with tvm.target.Target(target):\n",
    "            sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)\n",
    "        func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name=\"sim_quantize\")\n",
    "        func(a, d, s, z, dq)\n",
    "\n",
    "        # 检查与真实QNN输出的正确性\n",
    "        tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype(\"float32\"), rtol=1e-5)\n",
    "\n",
    "    for target, dev in tvm.testing.enabled_targets():\n",
    "        check_target(target, dev)\n",
    "\n",
    "\n",
    "def test_simulated_dequantize():\n",
    "    \"\"\"\n",
    "    测试模拟反量化功能\n",
    "\n",
    "    该函数通过调用verify_simulated_dequantize函数，在各种输入形状、数据类型、\n",
    "    通道数和轴配置下测试模拟反量化功能的正确性。\n",
    "    \"\"\"\n",
    "    # 测试标量int8反量化\n",
    "    verify_simulated_dequantize([1], \"int8\", [1], -1)\n",
    "    # 测试2x5形状、轴为1的int8反量化\n",
    "    verify_simulated_dequantize([2, 5], \"int8\", [5], 1)\n",
    "    # 测试2x5形状、轴为0的int8反量化\n",
    "    verify_simulated_dequantize([2, 5], \"int8\", [2], 0)\n",
    "    # 测试4D张量、轴为-1的int8反量化\n",
    "    verify_simulated_dequantize([1, 32, 32, 32], \"int8\", [32], -1)\n",
    "    # 测试4D张量、轴为-2的uint8反量化\n",
    "    verify_simulated_dequantize([1, 32, 32, 32], \"uint8\", [32], -2)\n",
    "    # 测试2x5形状、轴为1的int32反量化\n",
    "    verify_simulated_dequantize([2, 5], \"int32\", [5], 1)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # 执行所有测试\n",
    "    test_simulated_quantize()  # 测试模拟量化功能\n",
    "    test_simulated_dequantize()  # 测试模拟反量化功能\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "aix",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
