{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "9636248e",
   "metadata": {},
   "source": [
    "# Relax 构建块核心"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e662498a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import tvm.contrib.cblas\n",
    "\n",
    "from tvm import te, tir, topi\n",
    "from tvm import relax as rx\n",
    "from tvm.ir.base import assert_structural_equal\n",
    "\n",
    "from tvm.script import ir as I, relax as R, tir as T\n",
    "from tvm.tir.function import PrimFunc\n",
    "import pytest"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b9cc557d",
   "metadata": {},
   "source": [
    "## 测试 BlockBuilder 在处理函数定义时的错误情况\n",
    "\n",
    "验证了当函数定义中没有显式声明参数，但可能在函数体中隐式引用了外部变量（如 `m`, `n`, `x`, `y`）时，系统是否会抛出适当的错误。这是一种编译时错误检查机制，确保函数定义的完整性和正确性。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b08bc30a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 注册 nop \n",
    "@tvm.register_func(\"test.blockbuilder.nop\")\n",
    "def nop():\n",
    "    ..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5414fa57",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm.relax import ExternFunc\n",
    "m = tir.Var(\"m\", \"int64\")\n",
    "n = tir.Var(\"n\", \"int64\")\n",
    "x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "bb = rx.BlockBuilder()\n",
    "\n",
    "with pytest.raises(RuntimeError):\n",
    "    with bb.function(\"func\"):\n",
    "        gv0 = bb.emit(rx.Call(ExternFunc(\"test.blockbuilder.nop\"), []))\n",
    "        bb.emit_func_output(gv0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45133b1a",
   "metadata": {},
   "source": [
    "## 简单测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "8a7ced42",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_block_builder():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    bb._begin_binding_block()\n",
    "    gv0 = bb.emit(rx.op.add(x, y))\n",
    "    bb._begin_dataflow_block()\n",
    "    lv0 = bb.emit(rx.op.multiply(gv0, y))\n",
    "    gv1 = bb.emit_output(rx.op.multiply(lv0, lv0))\n",
    "    b0 = bb._end_block()\n",
    "    bb._begin_dataflow_block()\n",
    "    lv1 = bb.emit(rx.op.multiply(gv0, y))\n",
    "    gv2 = bb.emit_output(rx.op.multiply(lv1, lv1))\n",
    "    b1 = bb._end_block()\n",
    "    gv3 = bb.emit(rx.op.add(x, y))\n",
    "    b2 = bb._end_block()\n",
    "\n",
    "    assert isinstance(b0, rx.DataflowBlock)\n",
    "    assert isinstance(b1, rx.DataflowBlock)\n",
    "    assert not isinstance(b2, rx.DataflowBlock)\n",
    "\n",
    "test_block_builder()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "622fc0a3",
   "metadata": {},
   "source": [
    "1. 变量定义：\n",
    "   - `m` 和 `n` 是 TIR（Tensor Intermediate Representation）变量，表示 int64 类型的标量尺寸\n",
    "   - `x` 和 `y` 是 Relax 变量，分别表示形状为 `[m, n]` 和 `[n]` 的 `float16` 张量\n",
    "   - `bb` 是 `rx.BlockBuilder()` 实例，用于构建 Relax 构建块。\n",
    "\n",
    "2. 块构建过程：\n",
    "   - `bb._begin_binding_block()`：开始绑定块（Binding Block）\n",
    "   - `gv0 = bb.emit(rx.op.add(x, y))`：创建全局变量 `gv0`，表示 `x` 和 `y` 的加法运输\n",
    "   - `bb._begin_dataflow_block()`：开始数据流块（Dataflow Block）\n",
    "   - `lv0 = bb.emit(rx.op.multiply(gv0, y))`：创建局部变量 `lv0`，表示 `gv0` 和 `y` 的乘法\n",
    "   - `gv1 = bb.emit_output(...)`：创建输出变量 `gv1`，表示 `lv0` 的平方\n",
    "   - `b0 = bb._end_block()`：结束当前块并返回块对象\n",
    "   - 代码重复创建了另一个数据流块 `b1` 和普通绑定块 `b2`\n",
    "\n",
    "\n",
    "测试目的：\n",
    "- 验证BlockBuilder能够正确创建和区分不同类型的块\n",
    "- 测试变量作用域管理（全局变量gv0、gv1、gv2、gv3和局部变量lv0、lv1）\n",
    "- 验证数据流块和普通绑定块的创建机制\n",
    "- 测试基本算子（add、multiply）的使用\n",
    "\n",
    "关键概念：\n",
    "- **Binding Block**：普通绑定块，用于顺序执行的计算\n",
    "- **Dataflow Block**：数据流块，通常用于表示可以并行执行的计算，内部变量为局部变量\n",
    "- **BlockBuilder**：Relax中用于构建计算图的核心工具\n",
    "\n",
    "这段代码展示了Relax模块中构建计算图的基本模式，包括块的创建、变量的定义和操作的执行，同时验证了BlockBuilder的正确性。\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e440b1eb",
   "metadata": {},
   "source": [
    "## 为构建块中变量指定名称"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6d3666fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_with_name():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    bb._begin_dataflow_block()\n",
    "    lv0 = bb.emit(rx.op.add(x, y), \"add\")\n",
    "    gv0 = bb.emit_output(rx.op.multiply(lv0, y), \"multi\")\n",
    "    b0 = bb._end_block()\n",
    "\n",
    "    assert b0.bindings[0].var.name_hint == \"add\"\n",
    "    assert b0.bindings[1].var.name_hint == \"multi\"\n",
    "\n",
    "test_emit_with_name()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "21230f9e",
   "metadata": {},
   "source": [
    "## 测试 BlockBuilder 创建函数的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "d5bf3b73",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_function_single_block():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", [x, y]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(rx.op.add(x, y))\n",
    "            assert lv0.name_hint == \"lv\"\n",
    "            lv1 = bb.emit(rx.op.multiply(lv0, y))\n",
    "            assert lv1.name_hint == \"lv1\"\n",
    "            gv0 = bb.emit_output(lv1)\n",
    "        assert gv0.name_hint == \"gv\"\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "    func = bb.finalize()[\"func\"]\n",
    "    assert func.params[0] == x\n",
    "    assert func.params[1] == y\n",
    "    assert func.body.body == gv0\n",
    "    assert_structural_equal(gv0.struct_info, rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    assert len(func.body.blocks) == 1\n",
    "    assert len(func.body.blocks[0].bindings) == 3\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a2eba9f9",
   "metadata": {},
   "source": [
    "1. 函数创建与块构建：\n",
    "   - `with bb.function(\"func\", [x, y])`：创建名为\"func\"的函数，参数为x和y\n",
    "   - `with bb.dataflow()`：在函数内创建数据流块\n",
    "   - `lv0 = bb.emit(rx.op.add(x, y))`：在数据流块中创建局部变量lv0，表示x和y的加法\n",
    "   - `lv1 = bb.emit(rx.op.multiply(lv0, y))`：创建局部变量lv1，表示lv0和y的乘法\n",
    "   - `gv0 = bb.emit_output(lv1)`：创建全局变量gv0，表示数据流块的输出\n",
    "\n",
    "2. 验证逻辑：\n",
    "   - 断言lv0的名称提示(name_hint)为\"lv\"\n",
    "   - 断言lv1的名称提示为\"lv1\"\n",
    "   - 断言gv0的名称提示为\"gv\"\n",
    "   - `func = bb.finalize()[\"func\"]`：获取最终的函数\n",
    "   - 断言函数参数正确\n",
    "   - 断言函数体结构正确\n",
    "   - 断言gv0的结构信息正确\n",
    "   - 断言函数体包含1个块，且该块包含3个绑定\n",
    "\n",
    "测试目的：\n",
    "- 验证使用BlockBuilder创建函数的基本流程\n",
    "- 测试函数参数的设置和传递\n",
    "- 验证数据流块的创建和操作\n",
    "- 测试变量名称提示的自动生成\n",
    "- 验证函数输出的设置和获取\n",
    "- 确认函数结构和绑定数量的正确性\n",
    "\n",
    "关键概念：\n",
    "- **function**：Relax中的函数定义，包含参数和函数体\n",
    "- **dataflow**：数据流块，用于表示可以并行执行的计算\n",
    "- **emit**：向当前块中添加操作并返回生成的变量\n",
    "- **emit_output**：标记数据流块的输出变量\n",
    "- **emit_func_output**：设置函数的返回值\n",
    "- **finalize**：完成构建并返回创建的函数\n",
    "\n",
    "这段代码全面测试了使用BlockBuilder创建包含单个数据流块的函数的各个环节，确保了函数创建、参数传递、块操作和输出设置等功能的正确性。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "4aa46c30",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def test_function_multi_blocks():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", [x, y]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(rx.op.add(x, y))\n",
    "            assert lv0.name_hint == \"lv\"\n",
    "            gv0 = bb.emit_output(lv0)\n",
    "        assert gv0.name_hint == \"gv\"\n",
    "        gv1 = bb.emit(rx.op.add(gv0, gv0))\n",
    "        assert gv1.name_hint == \"gv1\"\n",
    "        with bb.dataflow():\n",
    "            lv1 = bb.emit(rx.op.add(gv1, gv1))\n",
    "            assert lv1.name_hint == \"lv1\"\n",
    "            gv2 = bb.emit_output(gv1)\n",
    "        bb.emit_func_output(gv2)\n",
    "\n",
    "    func = bb.finalize()[\"func\"]\n",
    "\n",
    "    assert_structural_equal(gv2.struct_info, rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    assert func.params[0] == x\n",
    "    assert func.params[1] == y\n",
    "    assert func.body.body == gv2\n",
    "    assert len(func.body.blocks) == 3\n",
    "    assert len(func.body.blocks[0].bindings) == 2\n",
    "    assert len(func.body.blocks[1].bindings) == 1\n",
    "    assert len(func.body.blocks[2].bindings) == 2\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a1fe09aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_multi_functions():\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    m_1 = tir.Var(\"m\", \"int64\")\n",
    "    n_1 = tir.Var(\"n\", \"int64\")\n",
    "    x_1 = rx.Var(\"x\", rx.TensorStructInfo([m_1, n_1], \"float16\"))\n",
    "    y_1 = rx.Var(\"y\", rx.TensorStructInfo([n_1], \"float16\"))\n",
    "\n",
    "    with bb.function(\"func1\", [x_1, y_1]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(rx.op.add(x_1, y_1))\n",
    "            assert lv0.name_hint == \"lv\"\n",
    "            gv0 = bb.emit_output(lv0)\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "    m_2 = tir.Var(\"m\", \"int64\")\n",
    "    n_2 = tir.Var(\"n\", \"int64\")\n",
    "    x_2 = rx.Var(\"x\", rx.TensorStructInfo([m_2, n_2], \"float16\"))\n",
    "    y_2 = rx.Var(\"y\", rx.TensorStructInfo([n_2], \"float16\"))\n",
    "\n",
    "    with bb.function(\"func2\", [x_2, y_2]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(rx.op.add(y_2, x_2))\n",
    "            # TODO(@yuchen): enable block builder to reset local var unique name map\n",
    "            assert lv0.name_hint == \"lv1\"\n",
    "            gv0 = bb.emit_output(lv0)\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "    mod = bb.finalize()\n",
    "    func1 = mod[\"func1\"]\n",
    "    assert func1.params[0] == x_1\n",
    "    assert func1.params[1] == y_1\n",
    "    assert len(func1.body.blocks) == 1\n",
    "    func2 = mod[\"func2\"]\n",
    "    assert func2.params[0] == x_2\n",
    "    assert func2.params[1] == y_2\n",
    "    assert len(func2.body.blocks) == 1\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "765dbe4a",
   "metadata": {},
   "source": [
    "## 验证 Relax BlockBuilder 对二元算子（如加法、乘法）的 形状和类型推导 功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d383cedf",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_binary_shape_type_deduction():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    k = tir.Var(\"k\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, 1], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    z = rx.Var(\"z\", rx.TensorStructInfo([5], \"float16\"))\n",
    "    w = rx.Var(\"w\", rx.TensorStructInfo([k], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", [x, y, z, w]):\n",
    "        with bb.dataflow():\n",
    "            lv0 = bb.emit(rx.op.add(x, y))\n",
    "            assert_structural_equal(lv0.struct_info, rx.TensorStructInfo([m, n], \"float16\"))\n",
    "\n",
    "            lv1 = bb.emit(rx.op.multiply(x, z))\n",
    "            assert_structural_equal(lv1.struct_info, rx.TensorStructInfo([m, 5], \"float16\"))\n",
    "\n",
    "            lv2 = bb.emit(rx.op.multiply(z, w))\n",
    "            assert isinstance(lv2.struct_info, rx.TensorStructInfo)\n",
    "            assert lv2.struct_info.ndim == 1\n",
    "            assert lv2.struct_info.dtype == \"float16\"\n",
    "\n",
    "            lv3 = bb.emit(rx.op.multiply(y, w))\n",
    "            assert isinstance(lv3.struct_info, rx.TensorStructInfo)\n",
    "            assert lv3.struct_info.ndim == 1\n",
    "            assert lv3.struct_info.dtype == \"float16\"\n",
    "\n",
    "            gv0 = bb.emit_output(lv3)\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "        assert isinstance(gv0.struct_info, rx.TensorStructInfo)\n",
    "        assert gv0.struct_info.ndim == 1\n",
    "        assert gv0.struct_info.dtype == \"float16\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "55a0662a",
   "metadata": {},
   "source": [
    "## `match_cast` \n",
    "\n",
    "`match_cast` 是一种类型转换机制，用于将变量与特定的结构信息进行匹配"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "d1b1151f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_match_cast():\n",
    "    m = tir.Var(\"m\", dtype=\"int64\")\n",
    "    n = tir.Var(\"n\", dtype=\"int64\")\n",
    "    x = rx.Var(\"tensor_value\", rx.TensorStructInfo(dtype=\"float32\", ndim=-1))\n",
    "    y = rx.Var(\"shape_value\", rx.ShapeStructInfo([16, 8]))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", [x, y]):\n",
    "        with bb.dataflow():\n",
    "            # lv0: Tensor((m, n), \"float32\") =\n",
    "            #   match_cast(x: Tensor(_, \"float32\"], [m, n))\n",
    "            lv0 = bb.match_cast(x, rx.TensorStructInfo([m, n], \"float32\"))\n",
    "            assert isinstance(lv0, rx.DataflowVar)\n",
    "            assert_structural_equal(lv0.struct_info, rx.TensorStructInfo([m, n], \"float32\"))\n",
    "\n",
    "            # lv1: Shape = match_cast(shape, rx.ShapeStructInfo([m, n]))\n",
    "            lv1 = bb.match_cast(y, rx.ShapeStructInfo([m, n]), \"var_name\")\n",
    "            assert lv1.struct_info == rx.ShapeStructInfo([m, n])\n",
    "            gv0 = bb.emit_output(lv1)\n",
    "\n",
    "        bb.emit_func_output(gv0)\n",
    "    func = bb.finalize()[\"func\"]\n",
    "    block = func.body.blocks[0]\n",
    "    b0, b1 = block.bindings[:2]\n",
    "    assert isinstance(b0, rx.MatchCast)\n",
    "    assert isinstance(b1, rx.MatchCast)\n",
    "\n",
    "    assert b0.value == x\n",
    "    assert b0.struct_info == rx.TensorStructInfo([m, n], \"float32\")\n",
    "    assert b0.var == lv0\n",
    "\n",
    "    assert b1.value == y\n",
    "    assert b1.struct_info == rx.ShapeStructInfo([m, n])\n",
    "    assert b1.var == lv1\n",
    "    assert b1.var.name_hint == \"var_name\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8578bde9",
   "metadata": {},
   "source": [
    "该测试函数全面验证了 match_cast 操作的功能，包括： \n",
    "- 将张量从任意维度转换为指定的符号维度形状  \n",
    "- 将固定形状转换为符号维度形状  \n",
    "- 为转换后的变量指定名称  \n",
    "- 验证转换操作的底层表示和属性   \n",
    "\n",
    "`match_cast` 操作在 Relax 中扮演着重要角色，它允许开发者在编写程序时明确指定变量的结构信息，同时为框架提供了进行静态类型检查和形状推断的能力。这些测试确保了 match_cast 操作能够正确处理不同类型的变量转换，并验证了转换结果的正确性。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "20946b75",
   "metadata": {},
   "source": [
    "## `emit_normalized`\n",
    "\n",
    "测试函数专注于验证 `emit_normalized` 方法在数据流块中处理 `MatchCast` 操作的能力。它确保：\n",
    "1. `MatchCast` 操作能够被正确地添加到数据流块中\n",
    "2. `MatchCast` 操作的属性（源值、结构信息、目标变量）被正确设置\n",
    "3. 数据流块能够正确管理和绑定 `MatchCast` 操作\n",
    "\n",
    "`emit_normalized` 方法是 BlockBuilder 中的一个重要方法，它允许直接添加规范化的操作，而不需要通过 Builder 的其他方法（如 `match_cast`）来创建。这个测试确保了该方法在数据流块中的正确行为，为开发者提供了更灵活的编程方式。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "7d51a47b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_match_cast_binding_in_dataflow_block():\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo(dtype=\"float32\", ndim=-1))\n",
    "    m = tir.Var(\"m\", dtype=\"int64\")\n",
    "    gv = rx.Var(\"gv\", rx.TensorStructInfo(dtype=\"float32\", ndim=-1))\n",
    "    match_cast = rx.MatchCast(gv, x, rx.TensorStructInfo((m,), \"float32\"))\n",
    "\n",
    "    with bb.function(\"main\", [x]):\n",
    "        with bb.dataflow():\n",
    "            bb.emit_normalized(match_cast)\n",
    "            bb.emit_output(gv)\n",
    "        bb.emit_func_output(x)\n",
    "\n",
    "    func = bb.finalize()[\"main\"]\n",
    "    block = func.body.blocks[0]\n",
    "    b0 = block.bindings[0]\n",
    "    assert isinstance(b0, rx.MatchCast)\n",
    "\n",
    "    assert b0.value == x\n",
    "    assert isinstance(b0.struct_info, rx.TensorStructInfo)\n",
    "    assert b0.struct_info.shape[0] == m\n",
    "    assert b0.var == gv\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b1e04faf",
   "metadata": {},
   "source": [
    "## `normalize` \n",
    "\n",
    "测试函数全面验证了 `normalize` 方法对不同类型节点的规范化能力，包括：\n",
    "1. **Call 节点**：验证操作节点在规范化后具有正确的形状信息\n",
    "2. **Tuple 节点**：验证元组节点在规范化后具有正确的结构信息\n",
    "3. **嵌套 Tuple 节点**：验证嵌套元组在规范化后保持正确的层次结构信息\n",
    "\n",
    "`normalize` 方法是 BlockBuilder 中的一个核心方法，它确保各种类型的节点在创建后具有正确的结构信息，这对于后续的静态分析、优化和代码生成至关重要。通过规范化，框架可以明确每个节点的类型和形状，从而进行更精确的处理。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "d407a63d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_normalize():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    # Call node\n",
    "    add_call = rx.op.multiply(x, y)\n",
    "\n",
    "    bb.normalize(add_call)\n",
    "    shape = rx.get_shape_of(add_call)\n",
    "\n",
    "    assert isinstance(shape, rx.ShapeExpr)\n",
    "    assert shape[0] == m\n",
    "    assert shape[1] == n\n",
    "\n",
    "    # Tuple node\n",
    "    tuple_1 = rx.Tuple([x, y])\n",
    "    bb.normalize(tuple_1)\n",
    "    assert isinstance(tuple_1.struct_info, rx.TupleStructInfo)\n",
    "    assert isinstance(tuple_1.struct_info.fields[0], rx.TensorStructInfo)\n",
    "    assert isinstance(tuple_1.struct_info.fields[1], rx.TensorStructInfo)\n",
    "\n",
    "    # Nested Tuple\n",
    "    tuple_2 = rx.Tuple([x, rx.Tuple([x, y])])\n",
    "    bb.normalize(tuple_2)\n",
    "\n",
    "    assert isinstance(tuple_2.struct_info, rx.TupleStructInfo)\n",
    "    assert isinstance(tuple_2.struct_info.fields[0], rx.TensorStructInfo)\n",
    "    assert isinstance(tuple_2.struct_info.fields[1], rx.TupleStructInfo)\n",
    "    assert isinstance(tuple_2.struct_info.fields[1].fields[0], rx.TensorStructInfo)\n",
    "    assert isinstance(tuple_2.struct_info.fields[1].fields[1], rx.TensorStructInfo)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "616a6975",
   "metadata": {},
   "source": [
    "## 处理元组类型的结构信息\n",
    "\n",
    "测试确保Relax框架能够正确处理元组类型的结构信息，包括：\n",
    "- 元组索引操作能正确继承元素的结构信息\n",
    "- 元组解包功能按预期工作\n",
    "- 对错误的解包操作提供明确的错误提示\n",
    "这些功能对于静态类型分析、形状推导和代码正确性验证至关重要。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "6dec8140",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_tuple_indexing():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "\n",
    "    shape_x = rx.TensorStructInfo([m, n], \"float16\")\n",
    "    shape_y = rx.TensorStructInfo([n], \"float16\")\n",
    "    relax_tuple = rx.Var(\"relax_tuple\", rx.TupleStructInfo([shape_x, shape_y]))\n",
    "\n",
    "    assert isinstance(relax_tuple.struct_info, rx.TupleStructInfo)\n",
    "    assert isinstance(relax_tuple.struct_info.fields[0], rx.TensorStructInfo)\n",
    "    assert isinstance(relax_tuple.struct_info.fields[1], rx.TensorStructInfo)\n",
    "\n",
    "    # TupleGetItem will initialize struct info from the\n",
    "    # TupleStructInfo, if present.\n",
    "    x = relax_tuple[0]\n",
    "    tvm.ir.assert_structural_equal(x.struct_info, shape_x)\n",
    "\n",
    "    y = relax_tuple[1]\n",
    "    tvm.ir.assert_structural_equal(y.struct_info, shape_y)\n",
    "\n",
    "    # Tuple unpacking produces TupleGetItem structs\n",
    "    x_unpack, y_unpack = relax_tuple\n",
    "    tvm.ir.assert_structural_equal(x, x_unpack)\n",
    "    tvm.ir.assert_structural_equal(y, y_unpack)\n",
    "\n",
    "    # When TupleStructInfo is available, tuple unpacking fails immediately\n",
    "    # for incorrect number of arguments.\n",
    "    with pytest.raises(ValueError):\n",
    "        x_unpack, y_unpack, z_unpack = relax_tuple\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "08870a89",
   "metadata": {},
   "source": [
    "## 调用TE函数\n",
    "\n",
    "测试确保Relax框架能够正确集成和调用TE函数，这是TVM中连接Relax前端与底层张量计算的重要机制。测试验证了：\n",
    "- 位置参数和关键字参数的正确传递\n",
    "- TE计算与Relax函数的正确嵌套\n",
    "- 函数结构和绑定关系的正确性\n",
    "这些功能对于将高级Relax程序转换为可执行的底层张量计算至关重要。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "3d5607f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_call_te():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    z = rx.Var(\"z\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "\n",
    "    def te_func(args, args_dict, msg):\n",
    "        A, B = args\n",
    "        C = args_dict[\"C\"]\n",
    "        D = te.compute((128, 128), lambda i, j: A[i, j] + B[i, j])\n",
    "        E = te.compute((128, 128), lambda i, j: D[i, j] - C[i, j])\n",
    "        return E\n",
    "\n",
    "    with bb.function(\"rx_func\", [x, y, z]):\n",
    "        with bb.dataflow():\n",
    "            out = bb.emit_output(bb.call_te(te_func, [x, y], {\"C\": z}, msg=\"hello\"))\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    mod = bb.finalize()\n",
    "    rx_func = mod[\"rx_func\"]\n",
    "\n",
    "    assert rx_func.params[0] == x\n",
    "    assert rx_func.params[1] == y\n",
    "    assert rx_func.params[2] == z\n",
    "    assert rx_func.body.body == out\n",
    "    assert len(rx_func.body.blocks) == 1\n",
    "    assert len(rx_func.body.blocks[0].bindings) == 1\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a7a5e603",
   "metadata": {},
   "source": [
    "## TE函数的名称唯一性\n",
    "\n",
    "测试确保Relax框架在调用TE函数时能够为不同的张量参数生成唯一的名称。名称唯一性对于以下方面至关重要：\n",
    "- 避免代码生成和优化过程中的命名冲突\n",
    "- 确保张量和缓冲区的正确识别和引用\n",
    "- 提高代码的可读性和可维护性\n",
    "- 保证底层计算图的正确性\n",
    "\n",
    "这个测试特别关注矩阵乘法操作，验证了从参数到缓冲区的完整命名链都能保持唯一性。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "d13878b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_call_te_unique_tensor_name():\n",
    "    bb = rx.BlockBuilder()\n",
    "    x = rx.Var(\"x\", R.Tensor((2, 3), \"float32\"))\n",
    "    y = rx.Var(\"y\", R.Tensor((3, 4), \"float32\"))\n",
    "    with bb.function(\"main\", [x, y]):\n",
    "        gv = bb.emit_te(topi.nn.matmul, x, y)\n",
    "        bb.emit_func_output(gv)\n",
    "\n",
    "    f_matmul = bb.finalize()[\"matmul\"]\n",
    "    param_A = f_matmul.params[0]\n",
    "    param_B = f_matmul.params[1]\n",
    "    buffer_A = f_matmul.buffer_map[param_A]\n",
    "    buffer_B = f_matmul.buffer_map[param_B]\n",
    "    assert param_A.name != param_B.name\n",
    "    assert buffer_A.name != buffer_B.name\n",
    "    assert buffer_A.data.name != buffer_B.data.name\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cc52f262",
   "metadata": {},
   "source": [
    "## 调用TE函数时能够正确验证参数类型\n",
    "\n",
    "测试确保Relax框架在调用TE函数时能够正确验证参数类型，特别是形状参数。具体来说：\n",
    "- `topi.reshape`通常期望形状参数是具体的元组（如`(200,)`），而非`ShapeStructInfo`类型的变量\n",
    "- 测试验证了`call_te`方法能够检测到这种类型不匹配并抛出适当的异常\n",
    "- 确保框架在早期阶段就能捕获类型错误，避免后续执行过程中出现更严重的问题\n",
    "- 维护了类型安全和API使用的正确性\n",
    "\n",
    "这个测试是框架健壮性的重要保障，确保用户在使用API时能够得到明确的错误提示，而不是遇到隐藏的类型不兼容问题。\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "da40d27e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_call_te_with_unsupported_shape_arg():\n",
    "    bb = rx.BlockBuilder()\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo((200,), \"float32\"))\n",
    "    s = rx.Var(\"s\", rx.ShapeStructInfo((200,)))\n",
    "\n",
    "    with pytest.raises(AssertionError):\n",
    "        with bb.function(\"rx_func\", [x]):\n",
    "            out = bb.emit(bb.call_te(topi.reshape, x, s))\n",
    "            bb.emit_func_output(out)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b736a7bb",
   "metadata": {},
   "source": [
    "## `emit_te` 测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "fe3b4299",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_te():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    z = rx.Var(\"z\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "\n",
    "    def te_func(args, args_dict, msg):\n",
    "        A, B = args\n",
    "        C = args_dict[\"C\"]\n",
    "        D = te.compute((128, 128), lambda i, j: A[i, j] + B[i, j])\n",
    "        E = te.compute((128, 128), lambda i, j: D[i, j] - C[i, j])\n",
    "        return E\n",
    "\n",
    "    with bb.function(\"rx_func\", [x, y, z]):\n",
    "        out = bb.emit_te(te_func, [x, y], {\"C\": z}, msg=\"hello\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    mod = bb.finalize()\n",
    "    rx_func = mod[\"rx_func\"]\n",
    "\n",
    "    def get_tir_func():\n",
    "        A = te.placeholder((n, m), dtype=\"float32\", name=\"A\")\n",
    "        B = te.placeholder((n, m), dtype=\"float32\", name=\"B\")\n",
    "        C = te.placeholder((n, m), dtype=\"float32\", name=\"C\")\n",
    "        out = te_func((A, B), {\"C\": C}, \"\")\n",
    "        return tvm.te.create_prim_func([A, B, C, out], index_dtype_override=\"int64\")\n",
    "\n",
    "    # check TIR structure matches expected\n",
    "    assert_structural_equal(mod[\"te_func\"].body, get_tir_func().body)\n",
    "\n",
    "    # check Relax function calls TIR function with call_tir call\n",
    "    assert rx_func.params[0] == x\n",
    "    assert rx_func.params[1] == y\n",
    "    assert rx_func.params[2] == z\n",
    "    assert rx_func.body.body == out\n",
    "    assert len(rx_func.body.blocks) == 1\n",
    "    assert len(rx_func.body.blocks[0].bindings) == 1\n",
    "\n",
    "    call_node = rx_func.body.blocks[0].bindings[0].value\n",
    "    assert isinstance(call_node, rx.Call)\n",
    "    assert len(call_node.args) == 2\n",
    "    assert call_node.args[0].name_hint == \"te_func\"\n",
    "    assert call_node.args[1][0] == x\n",
    "    assert call_node.args[1][1] == y\n",
    "    assert call_node.args[1][2] == z\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "7ab5aa74",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_te_multiple():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    z = rx.Var(\"z\", rx.TensorStructInfo([128, m], \"float32\"))\n",
    "\n",
    "    def te_func(A):\n",
    "        B = te.compute((128, 128), lambda i, j: A[i, j] + 1)\n",
    "        return B\n",
    "\n",
    "    with bb.function(\"rx_func\", [x, y, z]):\n",
    "        x1 = bb.emit_te(te_func, x)\n",
    "        y1 = bb.emit_te(te_func, y)\n",
    "        z1 = bb.emit_te(te_func, z)\n",
    "        bb.emit_func_output(z1)\n",
    "\n",
    "    mod = bb.finalize()\n",
    "    rx_func = mod[\"rx_func\"]\n",
    "\n",
    "    prim_func = []\n",
    "    for gv in mod.get_global_vars():\n",
    "        if isinstance(mod[gv], PrimFunc):\n",
    "            prim_func.append(mod[gv])\n",
    "\n",
    "    # only two PrimFuncs were generated since two of them are equal so got deduped\n",
    "    assert len(prim_func) == 2\n",
    "    assert rx_func.body.blocks[0].bindings[0].value.args[0].name_hint == \"te_func\"\n",
    "    assert rx_func.body.blocks[0].bindings[1].value.args[0].name_hint == \"te_func\"\n",
    "    assert rx_func.body.blocks[0].bindings[2].value.args[0].name_hint == \"te_func1\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "8ee34089",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_emit_te_multiple_output():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "\n",
    "    def te_func(A):\n",
    "        B0, B1 = te.compute((n, m), lambda i, j: (A[i, j] + 1, A[i, j] * 2), name=\"B\")\n",
    "        return (B0, B1)\n",
    "\n",
    "    with bb.function(\"rx_func\", [x]):\n",
    "        y = bb.emit_te(te_func, x)\n",
    "        z = rx.TupleGetItem(y, 0)\n",
    "        bb.emit_func_output([y, z])\n",
    "\n",
    "    rx_func = bb.finalize()[\"rx_func\"]\n",
    "\n",
    "    # check call tir output shape is a Tuple of ShapeExpr\n",
    "    assert rx_func.params[0] == x\n",
    "    call_node = rx_func.body.blocks[0].bindings[0].value\n",
    "    assert call_node.args[0].name_hint == \"te_func\"\n",
    "    assert isinstance(call_node.sinfo_args[0], rx.TupleStructInfo)\n",
    "    assert len(call_node.sinfo_args[0].fields) == 2\n",
    "    assert isinstance(call_node.sinfo_args[0].fields[0].shape, rx.ShapeExpr)\n",
    "    assert isinstance(call_node.sinfo_args[0].fields[1].shape, rx.ShapeExpr)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "74c79603",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def test_emit_te_extern():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([m, n], \"float32\"))\n",
    "\n",
    "    with bb.function(\"rx_cblas_matmul\", [x, y]):\n",
    "        out = bb.emit_te(tvm.contrib.cblas.matmul, x, y, transa=False, transb=False)\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    mod = bb.finalize()\n",
    "    rx_func = mod[\"rx_cblas_matmul\"]\n",
    "\n",
    "    # check Relax function calls TIR function with call_tir call\n",
    "    assert rx_func.params[0] == x\n",
    "    assert rx_func.params[1] == y\n",
    "    assert len(rx_func.body.blocks) == 1\n",
    "    call_node = rx_func.body.blocks[0].bindings[0].value\n",
    "    assert isinstance(call_node, rx.Call)\n",
    "    assert len(call_node.args) == 2\n",
    "    assert call_node.args[0].name_hint == \"matmul\"\n",
    "    assert call_node.args[1][0] == x\n",
    "    assert call_node.args[1][1] == y\n",
    "    assert call_node.sinfo_args[0].shape[0] == n\n",
    "    assert call_node.sinfo_args[0].shape[1] == n\n",
    "\n",
    "\n",
    "def test_emit_te_prim_value():\n",
    "    bb = rx.BlockBuilder()\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", R.Tensor([n, m], \"float32\"))\n",
    "    a_min = rx.PrimValue(0)\n",
    "    a_max = rx.PrimValue(6)\n",
    "\n",
    "    with bb.function(\"rx_clip\", [x]):\n",
    "        out = bb.emit_te(topi.clip, x, a_min, a_max)\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    rx_func = bb.finalize()[\"rx_clip\"]\n",
    "\n",
    "    # check Relax function calls TIR function with call_tir call\n",
    "    assert rx_func.params[0] == x\n",
    "    assert len(rx_func.body.blocks) == 1\n",
    "    call_node = rx_func.body.blocks[0].bindings[0].value\n",
    "    assert isinstance(call_node, rx.Call)\n",
    "    assert len(call_node.args) == 2\n",
    "    assert call_node.args[1][0] == x\n",
    "\n",
    "\n",
    "def test_nested_function_fail():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with pytest.raises(RuntimeError):\n",
    "        with bb.function(\"func\", [x, y]):\n",
    "            gv0 = bb.emit(rx.op.add(x, x))\n",
    "            with bb.function(\"func1\", [x, y]):\n",
    "                gv1 = bb.emit(rx.op.add(x, x))\n",
    "            bb.emit_func_output(gv0)\n",
    "\n",
    "\n",
    "def test_emit_func_output_twice_fail():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with pytest.raises(RuntimeError):\n",
    "        with bb.function(\"func\", [x, y]):\n",
    "            gv0 = bb.emit(rx.op.add(x, y))\n",
    "            bb.emit_func_output(gv0)\n",
    "            bb.emit_func_output(gv0)\n",
    "\n",
    "\n",
    "def test_func_params_twice_fail():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with pytest.raises(RuntimeError):\n",
    "        with bb.function(\"func\", [x, y]):\n",
    "            gv0 = bb.emit(rx.op.add(x, y))\n",
    "            bb.emit_func_output(gv0, [x])\n",
    "\n",
    "\n",
    "def test_no_func_params_fail():\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    n = tir.Var(\"n\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([m, n], \"float16\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([n], \"float16\"))\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with pytest.raises(RuntimeError):\n",
    "        with bb.function(\"func\"):\n",
    "            gv0 = bb.emit(rx.Call(ExternFunc(\"test.blockbuilder.nop\"), []))\n",
    "            bb.emit_func_output(gv0)\n",
    "\n",
    "\n",
    "def test_block_builder_scope_recovery():\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    n, m = tir.Var(\"n\", \"int64\"), tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", rx.TensorStructInfo([n, m], \"float32\"))\n",
    "    y = rx.Var(\"y\", rx.TensorStructInfo([m, n], \"float32\"))\n",
    "\n",
    "    with pytest.raises(RuntimeError):\n",
    "        # this line fails\n",
    "        with bb.function(\"func\", [x, y]):\n",
    "            gv0 = bb.emit(rx.op.add(x, y))\n",
    "\n",
    "    # current should be recovered\n",
    "    assert rx.BlockBuilder.current() is None\n",
    "\n",
    "    # second attempt to do it correctly.\n",
    "    with bb.function(\"func\", [x, y]):\n",
    "        gv0 = bb.emit(rx.op.add(x, y))\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "\n",
    "@pytest.mark.parametrize(\"emit_nested_tuple\", [True, False])\n",
    "def test_emit_nested_tuple(emit_nested_tuple):\n",
    "    \"\"\"Convert nested tuples when emitting relax\"\"\"\n",
    "\n",
    "    def make_function(emit_nested_tuple: bool):\n",
    "        bb = rx.BlockBuilder()\n",
    "\n",
    "        n_sym = tir.Var(\"n\", \"int64\")\n",
    "        m_sym = tir.Var(\"m\", \"int64\")\n",
    "        n = rx.Var(\"n\", rx.PrimStructInfo(value=n_sym))\n",
    "        m = rx.Var(\"m\", rx.PrimStructInfo(value=m_sym))\n",
    "        x = rx.Var(\"x\", rx.TensorStructInfo([n_sym, m_sym], \"float32\"))\n",
    "        y = rx.Var(\"y\", rx.TensorStructInfo([m_sym, n_sym], \"float32\"))\n",
    "\n",
    "        with bb.function(\"func\", [n, m, x, y]):\n",
    "            scalars = (n, m)\n",
    "            if not emit_nested_tuple:\n",
    "                scalars = bb.emit(scalars)\n",
    "            output = (scalars, x, y)\n",
    "            bb.emit_func_output(output)\n",
    "\n",
    "        return bb.finalize()[\"func\"]\n",
    "\n",
    "    def make_expected(emit_nested_tuple: bool):\n",
    "        if emit_nested_tuple:\n",
    "\n",
    "            @R.function\n",
    "            def func(\n",
    "                n_1: R.Prim(value=\"n\"),\n",
    "                m_1: R.Prim(value=\"m\"),\n",
    "                x: R.Tensor((\"n\", \"m\"), dtype=\"float32\"),\n",
    "                y: R.Tensor((\"m\", \"n\"), dtype=\"float32\"),\n",
    "            ):\n",
    "                return ((n_1, m_1), x, y)\n",
    "\n",
    "        else:\n",
    "\n",
    "            @R.function\n",
    "            def func(\n",
    "                n_1: R.Prim(value=\"n\"),\n",
    "                m_1: R.Prim(value=\"m\"),\n",
    "                x: R.Tensor((\"n\", \"m\"), dtype=\"float32\"),\n",
    "                y: R.Tensor((\"m\", \"n\"), dtype=\"float32\"),\n",
    "            ):\n",
    "                gv = n_1, m_1\n",
    "                return (gv, x, y)\n",
    "\n",
    "        return func\n",
    "\n",
    "    expected = make_expected(emit_nested_tuple)\n",
    "    actual = make_function(emit_nested_tuple)\n",
    "\n",
    "    tvm.ir.assert_structural_equal(expected, actual)\n",
    "\n",
    "\n",
    "@pytest.mark.skip_well_formed_check_before_transform\n",
    "def test_finalize_public_private_name_conflict():\n",
    "    # tir call\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    def te_zero():\n",
    "        return topi.full((), \"int64\", tir.IntImm(\"int64\", 0))\n",
    "\n",
    "    def te_one():\n",
    "        return topi.full((), \"int64\", tir.IntImm(\"int64\", 1))\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        gv0 = bb.emit_te(te_zero, primfunc_name_hint=\"func\")\n",
    "        gv1 = bb.emit_te(te_one, primfunc_name_hint=\"func\")\n",
    "        bb.emit_func_output((gv0, gv1))\n",
    "\n",
    "    mod = bb.get()\n",
    "    assert not rx.analysis.well_formed(mod)\n",
    "    mod_final = bb.finalize()\n",
    "    assert rx.analysis.well_formed(mod_final)\n",
    "\n",
    "    # relax function call\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        gvar = bb.emit_func_output(rx.const(0, \"int64\"))\n",
    "\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        gv0 = bb.emit(rx.Call(gvar, []))\n",
    "        gvar1 = bb.emit_func_output(gv0)\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        gv0 = bb.emit(rx.Call(gvar1, []))\n",
    "        bb.emit_func_output(gv0)\n",
    "\n",
    "    mod = bb.get()\n",
    "    assert not rx.analysis.well_formed(mod)\n",
    "    mod_final = bb.finalize()\n",
    "    assert rx.analysis.well_formed(mod_final)\n",
    "\n",
    "\n",
    "def test_emit_nested_seqexpr_in_binding_block():\n",
    "    \"\"\"May emit a SeqExpr inside a BindingBlock\"\"\"\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        lhs = bb.emit(rx.const(1, \"int64\"), \"a\")\n",
    "        rhs = bb.emit(rx.const(2, \"int64\"), \"b\")\n",
    "        out = bb.emit(rx.op.add(lhs, rhs), \"c\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    seq_expr = bb.finalize()[\"func\"].body\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        lhs = bb.emit(rx.const(3, \"int64\"), \"d\")\n",
    "        rhs = bb.emit(seq_expr, \"e\")\n",
    "        out = bb.emit(rx.op.add(lhs, rhs), \"f\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    output = bb.finalize()[\"func\"]\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def expected():\n",
    "        d = R.const(3, \"int64\")\n",
    "        a = R.const(1, \"int64\")\n",
    "        b = R.const(2, \"int64\")\n",
    "        c = R.add(a, b)\n",
    "        e = c\n",
    "        f = R.add(d, e)\n",
    "        return f\n",
    "\n",
    "    tvm.ir.assert_structural_equal(expected, output)\n",
    "\n",
    "\n",
    "def test_emit_nested_dataflow_seqexpr_in_dataflow_block():\n",
    "    \"\"\"May emit a SeqExpr with dataflow inside a DataflowBlock\"\"\"\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        with bb.dataflow():\n",
    "            lhs = bb.emit(rx.const(1, \"int64\"), \"a\")\n",
    "            rhs = bb.emit(rx.const(2, \"int64\"), \"b\")\n",
    "            out = bb.emit_output(rx.op.add(lhs, rhs), \"c\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    seq_expr = bb.finalize()[\"func\"].body\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        with bb.dataflow():\n",
    "            lhs = bb.emit(rx.const(3, \"int64\"), \"d\")\n",
    "            rhs = bb.emit(seq_expr, \"e\")\n",
    "            out = bb.emit_output(rx.op.add(lhs, rhs), \"f\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    output = bb.finalize()[\"func\"]\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def expected():\n",
    "        with R.dataflow():\n",
    "            d = R.const(3, \"int64\")\n",
    "            a = R.const(1, \"int64\")\n",
    "            b = R.const(2, \"int64\")\n",
    "            c = R.add(a, b)\n",
    "            e = c\n",
    "            f = R.add(d, e)\n",
    "            R.output(c, f)\n",
    "        return f\n",
    "\n",
    "    tvm.ir.assert_structural_equal(expected, output)\n",
    "\n",
    "\n",
    "def test_emit_ill_formed_nested_seqexpr_in_dataflow_block():\n",
    "    \"\"\"May emit a SeqExpr inside a DataflowBlock\n",
    "\n",
    "    This produces ill-formed code, but cannot be caught at the\n",
    "    normalizer.  See also\n",
    "    test_emit_well_formed_nested_seqexpr_in_dataflow_block.\n",
    "\n",
    "    \"\"\"\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        lhs = bb.emit(rx.const(1, \"int64\"), \"a\")\n",
    "        rhs = bb.emit(rx.const(2, \"int64\"), \"b\")\n",
    "        out = bb.emit(rx.op.add(lhs, rhs), \"c\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    seq_expr = bb.finalize()[\"func\"].body\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        with bb.dataflow():\n",
    "            lhs = bb.emit(rx.const(3, \"int64\"), \"d\")\n",
    "            # This would be ill-formed, as it requires breaking up the\n",
    "            # DataflowBlock with a BindingBlock.\n",
    "            rhs = bb.emit(seq_expr, \"e\")\n",
    "\n",
    "            # We cannot throw an error at that point, because it is\n",
    "            # only the later usage of \"d\" that results in use of a\n",
    "            # DataflowVar outside of its home DataflowBlock.\n",
    "            out = bb.emit_output(rx.op.add(lhs, rhs), \"f\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    output = bb.finalize()[\"func\"]\n",
    "\n",
    "    assert not rx.analysis.well_formed(tvm.ir.IRModule.from_expr(output))\n",
    "\n",
    "\n",
    "def test_emit_well_formed_nested_seqexpr_in_dataflow_block():\n",
    "    \"\"\"May emit a SeqExpr inside a DataflowBlock\n",
    "\n",
    "    This produces well-formed code, and should not have any output\n",
    "    produced by the normalizer.  See also\n",
    "    test_emit_ill_formed_nested_seqexpr_in_dataflow_block.\n",
    "    \"\"\"\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    with bb.function(\"func\", []):\n",
    "        lhs = bb.emit(rx.const(1, \"int64\"), \"a\")\n",
    "        rhs = bb.emit(rx.const(2, \"int64\"), \"b\")\n",
    "        out = bb.emit(rx.op.add(lhs, rhs), \"c\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    seq_expr = bb.finalize()[\"func\"].body\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        with bb.dataflow():\n",
    "            lhs = bb.emit(rx.const(3, \"int64\"), \"d\")\n",
    "            # This similarly breaks up the DataflowBlock, with\n",
    "            # identical steps as the previous test up until this\n",
    "            # point.\n",
    "            rhs = bb.emit(seq_expr, \"e\")\n",
    "\n",
    "            # But the \"d\" variable isn't used, and so there aren't any\n",
    "            # usages of DataflowVar outside of their home\n",
    "            # DataflowBlock.\n",
    "            out = bb.emit_output(rhs, \"f\")\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    output = bb.finalize()[\"func\"]\n",
    "\n",
    "    assert rx.analysis.well_formed(tvm.ir.IRModule.from_expr(output))\n",
    "\n",
    "    @R.function(private=True)\n",
    "    def expected() -> R.Tensor((), dtype=\"int64\"):\n",
    "        with R.dataflow():\n",
    "            d = R.const(3, \"int64\")\n",
    "            R.output()\n",
    "        a = R.const(1, \"int64\")\n",
    "        b = R.const(2, \"int64\")\n",
    "        c = R.add(a, b)\n",
    "        with R.dataflow():\n",
    "            e = c\n",
    "            f = e\n",
    "            R.output(f)\n",
    "        return f\n",
    "\n",
    "    tvm.ir.assert_structural_equal(expected, output)\n",
    "\n",
    "\n",
    "def test_error_when_unwrapping_dataflowvar():\n",
    "    \"\"\"Checks for ill-formed use of DataflowVar at normalization\n",
    "\n",
    "    We can check for some illegal unwrapping of SeqExpr, though.  If\n",
    "    the inlined non-dataflow SeqExpr uses a DataflowVar, that should\n",
    "    trigger an error when the SeqExpr is being unwrapped.\n",
    "    \"\"\"\n",
    "    bb = rx.BlockBuilder()\n",
    "\n",
    "    lhs = rx.Var(\"a\", rx.TensorStructInfo(shape=[], dtype=\"int64\"))\n",
    "\n",
    "    with bb.function(\"func\", [lhs]):\n",
    "        rhs = rx.const(2, \"int64\")\n",
    "        out = bb.emit(rx.op.add(lhs, rhs))\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    func = bb.finalize()[\"func\"]\n",
    "\n",
    "    bb = rx.BlockBuilder()\n",
    "    with bb.function(\"func\", [], private=True):\n",
    "        with bb.dataflow():\n",
    "            local_lhs = bb.emit(rx.const(3, \"int64\"), \"local_a\")\n",
    "            rhs = bb.emit(func.bind_params({lhs: local_lhs}).body, \"f\")\n",
    "            out = bb.emit_output(rhs, \"f\")\n",
    "\n",
    "        with pytest.raises(tvm.TVMError, match=\"Malformed AST\"):\n",
    "            bb.emit_func_output(out)\n",
    "\n",
    "\n",
    "def test_deduplication_when_input_contains_duplicates():\n",
    "    \"\"\"De-duplication of IRModules\n",
    "\n",
    "    A well-formed IRModule may contain duplicate function definitions.\n",
    "    This is rare, as most functions can be disambiguated by the the\n",
    "    function attribute `tvm::attr::kGlobalSymbol`.  However, private\n",
    "    functions do not have this attribute, and a well-formed IRModule\n",
    "    may contain multiple copies of the same function.\n",
    "\n",
    "    This is a regression test.  Previous implementation de-duplicated\n",
    "    using a `Dict[Function, GlobalVar]`, which has the failure mode\n",
    "    shown below.  This was resolved by de-duplicating using a\n",
    "    `Dict[Function, Set[GlobalVar]]` instead.\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "    @I.ir_module\n",
    "    class Module:\n",
    "        @R.function\n",
    "        def main(A: R.Tensor):\n",
    "            B = Module.subroutine_a(A)\n",
    "            C = Module.subroutine_b(B)\n",
    "            return C\n",
    "\n",
    "        @R.function(private=True)\n",
    "        def subroutine_a(arg: R.Tensor) -> R.Tensor:\n",
    "            return R.add(arg, arg)\n",
    "\n",
    "        @R.function(private=True)\n",
    "        def subroutine_b(arg: R.Tensor) -> R.Tensor:\n",
    "            return R.add(arg, arg)\n",
    "\n",
    "        @R.function(private=True)\n",
    "        def subroutine_c(arg: R.Tensor) -> R.Tensor:\n",
    "            return R.multiply(arg, arg)\n",
    "\n",
    "    # This test case is only valid when the two subroutines are\n",
    "    # structurally equal, and therefore allowed to be de-duplicated by\n",
    "    # the BlockBuilder.\n",
    "    tvm.ir.assert_structural_equal(Module[\"subroutine_a\"], Module[\"subroutine_b\"])\n",
    "\n",
    "    gvar_a = Module.get_global_var(\"subroutine_a\")\n",
    "    gvar_b = Module.get_global_var(\"subroutine_b\")\n",
    "    subroutine_c = Module[\"subroutine_c\"]\n",
    "\n",
    "    bb = rx.BlockBuilder(Module)\n",
    "\n",
    "    # Add a function to the module.  What we add doesn't matter, as\n",
    "    # this is only to initialize the de-duplication map.\n",
    "    bb.add_func(subroutine_c, \"_unused\")\n",
    "    # The deduplication table now maps `subroutine_ab` to either\n",
    "    # `gvar_a` or `gvar_b`.\n",
    "\n",
    "    # Update gvar_a.\n",
    "    bb.update_func(gvar_a, subroutine_c)\n",
    "    # The deduplication map no longer has an entry for\n",
    "    # `subroutine_ab`.\n",
    "\n",
    "    # Update gvar_b.  The deduplication map is present (because we\n",
    "    # called `add_func`), but doesn't contain an entry for\n",
    "    # `subroutine_ab` (because it was just removed).  This throws an\n",
    "    # error.\n",
    "    bb.update_func(gvar_b, subroutine_c)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "578930ea",
   "metadata": {},
   "outputs": [],
   "source": [
    "# The tests here depend on tvmscript\n",
    "import tvm\n",
    "from tvm import te, tir\n",
    "from tvm import relax as rx\n",
    "from tvm.ir.base import assert_structural_equal\n",
    "from tvm.script.parser import ir as I\n",
    "from tvm.script.parser import relax as R\n",
    "from tvm.script.parser import tir as T\n",
    "\n",
    "\n",
    "def test_emit_te_with_symbolic_arg():\n",
    "    bb = rx.BlockBuilder()\n",
    "    m = tir.Var(\"m\", \"int64\")\n",
    "    x = rx.Var(\"x\", R.Tensor([10], \"float32\"))\n",
    "    y = rx.Var(\"y\", R.Shape([m]))\n",
    "\n",
    "    def te_func(A, offset):\n",
    "        return te.compute(A.shape, lambda i: A[i + offset], name=\"B\")\n",
    "\n",
    "    with bb.function(\"main\", [x, y]):\n",
    "        out = bb.emit_te(te_func, x, m)\n",
    "        bb.emit_func_output(out)\n",
    "\n",
    "    after = bb.get()\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def te_func(\n",
    "            A: T.Buffer((T.int64(10),), \"float32\"),\n",
    "            B: T.Buffer((T.int64(10),), \"float32\"),\n",
    "            m: T.int64,\n",
    "        ):\n",
    "            T.func_attr({\"tir.noalias\": True})\n",
    "            for i in range(T.int64(10)):\n",
    "                with T.block(\"B\"):\n",
    "                    v_i = T.axis.spatial(T.int64(10), i)\n",
    "                    T.writes(B[v_i])\n",
    "                    B[v_i] = A[v_i + m]\n",
    "\n",
    "        @R.function\n",
    "        def main(\n",
    "            x: R.Tensor((10,), dtype=\"float32\"), y: R.Shape([\"m\"])\n",
    "        ) -> R.Tensor((10,), dtype=\"float32\"):\n",
    "            m = T.int64()\n",
    "            cls = Expected\n",
    "            gv = R.call_tir(\n",
    "                cls.te_func,\n",
    "                (x,),\n",
    "                out_sinfo=R.Tensor((10,), dtype=\"float32\"),\n",
    "                tir_vars=R.shape([m]),\n",
    "            )\n",
    "            return gv\n",
    "\n",
    "    assert_structural_equal(after, Expected)\n",
    "\n",
    "\n",
    "def test_symbolic_shape_in_prim_value():\n",
    "    \"\"\"Symbolic vars may be provided to TE in R.Prim\"\"\"\n",
    "\n",
    "    def te_slice(tensor, i):\n",
    "        return tvm.te.compute([tensor.shape[1]], lambda j: tensor[i, j], name=\"slice\")\n",
    "\n",
    "    def from_builder():\n",
    "        bb = rx.BlockBuilder()\n",
    "        A = rx.Var(\"A\", R.Tensor([16, 16], \"float32\"))\n",
    "        tir_i = tvm.tir.Var(\"tir_i\", \"int64\")\n",
    "        relax_i = rx.Var(\"relax_i\", R.Prim(value=tir_i))\n",
    "\n",
    "        with bb.function(\"main\", params=[A, relax_i]):\n",
    "            A_sliced = bb.emit_te(te_slice, A, relax_i)\n",
    "            bb.emit_func_output(A_sliced)\n",
    "\n",
    "        return bb.get()\n",
    "\n",
    "    @I.ir_module\n",
    "    class Expected:\n",
    "        @T.prim_func(private=True)\n",
    "        def te_slice(\n",
    "            A: T.Buffer([T.int64(16), T.int64(16)], \"float32\"),\n",
    "            Output: T.Buffer(T.int64(16), \"float32\"),\n",
    "            row_index: T.int64,\n",
    "        ):\n",
    "            T.func_attr({\"tir.noalias\": True})\n",
    "\n",
    "            for i in range(A.shape[1]):\n",
    "                with T.block(\"slice\"):\n",
    "                    vi = T.axis.remap(\"S\", [i])\n",
    "                    Output[vi] = A[row_index, vi]\n",
    "\n",
    "        @R.function\n",
    "        def main(\n",
    "            A: R.Tensor([16, 16], \"float32\"),\n",
    "            arg_row_index: R.Prim(value=\"row_index\"),\n",
    "        ):\n",
    "            cls = Expected\n",
    "\n",
    "            row_index = T.int64()\n",
    "\n",
    "            gv = R.call_tir(\n",
    "                cls.te_slice,\n",
    "                A,\n",
    "                tir_vars=[row_index],\n",
    "                out_sinfo=R.Tensor([16], \"float32\"),\n",
    "            )\n",
    "            return gv\n",
    "\n",
    "    tvm.ir.assert_structural_equal(from_builder(), Expected)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "84d472ed",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py313",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
