{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# {class}`~tvm.tir.transform.StorageRewrite`\n",
    "\n",
    "参考：`tvm/tests/python/tir-transform/test_tir_transform_storage_rewrite.py`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "from pathlib import Path\n",
    "ROOT = Path(\".\").resolve().parents[2]\n",
    "sys.path.extend([f\"{ROOT}/tests\", f\"{ROOT}/src\"])\n",
    "# # from tools.tag_span import _create_span, _set_span, _verify_structural_equal_with_span\n",
    "from tools.torch_utils import verify_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import te\n",
    "from tvm.driver.build_module import schedule_to_module\n",
    "from tvm.script import tir as T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def test_storage_share():\n",
    "    m = te.var(\"m\")\n",
    "    l = te.var(\"l\")\n",
    "    A = te.placeholder((m, l), name=\"A\")\n",
    "    num_stage = 5\n",
    "    B = A\n",
    "    for t in range(num_stage):\n",
    "        B = te.compute((m, l), lambda i, j: B[i, j] + (t + 1), name=\"A%d\" % t)\n",
    "\n",
    "    s = te.create_schedule(B.op)\n",
    "    mod = schedule_to_module(s, [A, B])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    # verify only have one allocations.\n",
    "    # verify inplace folding works\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def register_mem(scope_tb, max_bits):\n",
    "    # Register mem\n",
    "    @tvm.register_func(\"tvm.info.mem.%s\" % scope_tb)\n",
    "    def mem_info_inp_buffer():\n",
    "        return tvm.ir.make_node(\n",
    "            \"MemoryInfo\", unit_bits=16, max_simd_bits=32, max_num_bits=max_bits, head_address=None\n",
    "        )\n",
    "\n",
    "\n",
    "def test_alloc_seq():\n",
    "    scope_tb = \"local.L0A\"\n",
    "    max_bits = 1024 * 1024 * 1024\n",
    "\n",
    "    register_mem(scope_tb, max_bits)\n",
    "\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"i\") as i:\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"float32\", 200, name=\"A\", scope=scope_tb)\n",
    "            A[j] = 1.2\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"float32\", 200, name=\"B\", scope=scope_tb)\n",
    "            A[j] = 1.3\n",
    "\n",
    "    body = ib.get()\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"].body\n",
    "\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "            assert n.extents[0].value == 200\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(body, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_alloc_different_dtypes():\n",
    "    def stmt_generater(dtype_list, length):\n",
    "        ib = tvm.tir.ir_builder.create()\n",
    "        base_dtype = dtype_list[0]\n",
    "        global_a = te.placeholder((length,), name=\"global_a\", dtype=base_dtype)\n",
    "        assert len(dtype_list) == 4\n",
    "        with ib.for_range(0, length, name=\"j\") as j:\n",
    "            dtype = dtype_list[0]\n",
    "            A = ib.allocate(dtype, length, name=\"A\", scope=\"local.L0A\")\n",
    "            A[j] = tvm.tir.const(1, dtype=dtype)\n",
    "        with ib.for_range(0, length, name=\"j\") as j:\n",
    "            dtype = dtype_list[1]\n",
    "            B = ib.allocate(dtype, length, name=\"B\", scope=\"local.L0A\")\n",
    "            B[j] = tvm.tir.const(1, dtype=dtype)\n",
    "        with ib.for_range(0, length, name=\"j\") as j:\n",
    "            dtype = dtype_list[2]\n",
    "            C = ib.allocate(dtype, length, name=\"C\", scope=\"local.L0A\")\n",
    "            C[j] = tvm.tir.const(1, dtype=dtype)\n",
    "        with ib.for_range(0, length, name=\"j\") as j:\n",
    "            dtype = dtype_list[3]\n",
    "            D = ib.allocate(dtype, length, name=\"D\", scope=\"local.L0A\")\n",
    "            D[j] = tvm.tir.const(1, dtype=dtype)\n",
    "        with ib.for_range(0, length, name=\"j\") as j:\n",
    "            dtype = \"int8\"\n",
    "            E = ib.allocate(dtype, length, name=\"E\", scope=\"local.L0A\")\n",
    "            E[j] = A[j].astype(dtype) + B[j].astype(dtype) + C[j].astype(dtype) + D[j].astype(dtype)\n",
    "        return ib.get()\n",
    "\n",
    "    def dtype_bit_len(dtype):\n",
    "        index = 0\n",
    "        for i in dtype:\n",
    "            if i.isdigit():\n",
    "                break\n",
    "            index += 1\n",
    "        return int(dtype[index:])\n",
    "\n",
    "    def offset_generater(dtype_list, length):\n",
    "        dtype_len_list = [dtype_bit_len(i) for i in dtype_list]\n",
    "        base_len = dtype_len_list[0]\n",
    "        return sum([i * length / base_len for i in dtype_len_list])\n",
    "\n",
    "    def dtype_test(dtype_list, length):\n",
    "        def verify(n):\n",
    "            if isinstance(n, tvm.tir.Allocate):\n",
    "                assert n.extents[0].value == offset\n",
    "\n",
    "        body = stmt_generater(dtype_list, length)\n",
    "        offset = offset_generater(dtype_list, length)\n",
    "\n",
    "        mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], body))\n",
    "        body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"].body\n",
    "\n",
    "        tvm.tir.stmt_functor.post_order_visit(body, verify)\n",
    "\n",
    "    length = 1024\n",
    "    dtype_list = [\"float16\", \"int32\", \"uint16\", \"int8\"]\n",
    "    dtype_test(dtype_list, length)\n",
    "\n",
    "    dtype_list = [\"float32\", \"int32\", \"uint16\", \"int8\"]\n",
    "    dtype_test(dtype_list, length)\n",
    "\n",
    "    dtype_list = [\"float64\", \"int32\", \"uint16\", \"int8\"]\n",
    "    dtype_test(dtype_list, length)\n",
    "\n",
    "    dtype_list = [\"int8\", \"int32\", \"uint16\", \"uint8\"]\n",
    "    dtype_test(dtype_list, length)\n",
    "\n",
    "\n",
    "def test_inplace_rule():\n",
    "    m = 10\n",
    "    A = te.placeholder((m,), name=\"A\")\n",
    "    A0 = te.compute((m,), lambda i: A[i], name=\"A0\")\n",
    "    A1 = te.compute((m,), lambda i: A[i] + 1, name=\"A1\")\n",
    "    AA = te.compute((m,), lambda i: A0[i] + A1[i] + A1[0], name=\"AA\")\n",
    "    B = te.compute((m,), lambda i: AA[i] + 1, name=\"B\")\n",
    "    s = te.create_schedule(B.op)\n",
    "    mod = schedule_to_module(s, [A, B])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    # verify only have one allocations.\n",
    "    # verify inplace folding works\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert num_alloc[0] == 2\n",
    "\n",
    "\n",
    "def test_storage_combine():\n",
    "    n = 8\n",
    "    A = te.placeholder((4,), name=\"A\")\n",
    "    num_stage = 5\n",
    "    B = A\n",
    "    stages = []\n",
    "    for t in range(num_stage):\n",
    "        B = te.compute((n,), lambda i: B[i] + B[0] + (t + 1), name=\"A%d\" % t)\n",
    "        stages.append(B)\n",
    "\n",
    "    s = te.create_schedule(B.op)\n",
    "    for S in stages[:-1]:\n",
    "        s[S].set_scope(\"global:tag\")\n",
    "\n",
    "    mod = schedule_to_module(s, [A, B])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "            assert n.extents[0].value == 16\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_storage_combine_with_vectorization():\n",
    "    n = 1024\n",
    "    A = te.placeholder((n,), name=\"A\")\n",
    "    B = te.placeholder((n,), name=\"B\")\n",
    "    C = te.compute((n,), lambda i: A[i] + B[i], name=\"C\")\n",
    "    s = te.create_schedule(C.op)\n",
    "    AA = s.cache_read(A, \"global:tag\", readers=[C])\n",
    "    BB = s.cache_read(B, \"global:tag\", readers=[C])\n",
    "    CC = s.cache_write(C, \"global:tag\")\n",
    "    s[CC].vectorize(s[CC].op.axis[0])\n",
    "    mod = schedule_to_module(s, [A, B, C])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "    mod = tvm.tir.transform.VectorizeLoop()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(v):\n",
    "        # find add op\n",
    "        if (\n",
    "            isinstance(v, tvm.tir.Add)\n",
    "            and isinstance(v.a, tvm.tir.BufferLoad)\n",
    "            and isinstance(v.b, tvm.tir.BufferLoad)\n",
    "        ):\n",
    "            lhs_ramp = v.a.indices[0]\n",
    "            rhs_ramp = v.b.indices[0]\n",
    "            # these two ramp load should not overlap\n",
    "            assert lhs_ramp.lanes == n\n",
    "            assert rhs_ramp.lanes == n\n",
    "            assert lhs_ramp.base >= rhs_ramp.base + n or rhs_ramp.base >= lhs_ramp.base + n\n",
    "        elif isinstance(v, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_address_of():\n",
    "    # In this test, the storage rewrite pass is allowed to\n",
    "    # combine buffers B and D, but not C\n",
    "    @T.prim_func\n",
    "    def before(A: T.Buffer(8, \"float32\"), E: T.Buffer(8, \"float32\")):\n",
    "        B_data = T.allocate([8], \"float32\")\n",
    "        B = T.Buffer(8, data=B_data, align=32)\n",
    "        for i in range(8):\n",
    "            B[i] = (\n",
    "                T.call_extern(\"deref\", T.address_of(A[i]), dtype=\"float32\")\n",
    "                + T.call_extern(\"deref\", T.address_of(A[0]), dtype=\"float32\")\n",
    "                + T.float32(1)\n",
    "            )\n",
    "        C_data = T.allocate([8], \"float32\")\n",
    "        C = T.Buffer(8, data=C_data, align=32)\n",
    "        for i in range(8):\n",
    "            C[i] = (\n",
    "                T.call_extern(\"deref\", T.address_of(B[i]), dtype=\"float32\")\n",
    "                + T.call_extern(\"deref\", T.address_of(B[0]), dtype=\"float32\")\n",
    "                + T.float32(2)\n",
    "            )\n",
    "        D_data = T.allocate([8], \"float32\")\n",
    "        D = T.Buffer(8, data=D_data, align=32)\n",
    "        for i in range(8):\n",
    "            D[i] = (\n",
    "                T.call_extern(\"deref\", T.address_of(C[i]), dtype=\"float32\")\n",
    "                + T.call_extern(\"deref\", T.address_of(C[0]), dtype=\"float32\")\n",
    "                + T.float32(2)\n",
    "            )\n",
    "        for i in range(8):\n",
    "            E[i] = (\n",
    "                T.call_extern(\"deref\", T.address_of(D[i]), dtype=\"float32\")\n",
    "                + T.call_extern(\"deref\", T.address_of(D[0]), dtype=\"float32\")\n",
    "                + T.float32(3)\n",
    "            )\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            total_alloc[0] += n.extents[0].value\n",
    "\n",
    "    total_alloc = [0]\n",
    "    mod = tvm.IRModule.from_expr(before.with_attr(\"global_symbol\", \"main\"))\n",
    "    mod.show()\n",
    "    tvm.tir.stmt_functor.post_order_visit(mod[\"main\"].body, verify)\n",
    "    assert total_alloc[0] == 24\n",
    "\n",
    "    total_alloc[0] = 0\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    mod.show()\n",
    "    tvm.tir.stmt_functor.post_order_visit(mod[\"main\"].body, verify)\n",
    "    assert total_alloc[0] == 16\n",
    "\n",
    "\n",
    "def test_storage_share_gpu():\n",
    "    m = te.var(\"m\")\n",
    "    A = [te.placeholder((m), name=\"A\")]\n",
    "    num_stage = 5\n",
    "    for t in range(num_stage):\n",
    "        A.append(te.compute((m,), lambda i: A[-1][i] + (t + 1), name=\"A%d_s\" % t))\n",
    "        A.append(te.compute((m,), lambda i: A[-1][i], name=\"A%d\" % t))\n",
    "    s = te.create_schedule(A[-1].op)\n",
    "    for t in range(num_stage):\n",
    "        x = A[2 * t + 2].op.axis[0]\n",
    "        bx, tx = s[A[2 * t + 2]].split(x, factor=32)\n",
    "        s[A[2 * t + 2]].bind(bx, te.thread_axis(\"blockIdx.x\"))\n",
    "        s[A[2 * t + 2]].bind(tx, te.thread_axis(\"threadIdx.x\"))\n",
    "        s[A[2 * t + 1]].compute_at(s[A[2 * t + 2]], tx)\n",
    "        s[A[2 * t + 1]].set_scope(\"shared\")\n",
    "\n",
    "    mod = schedule_to_module(s, [A[0], A[-1]])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    alloc_stats = {\"global\": 0, \"shared\": 0}\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            scope = n.buffer_var.type_annotation.storage_scope\n",
    "            alloc_stats[scope] += 1\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert alloc_stats[\"global\"] == 2\n",
    "    assert alloc_stats[\"shared\"] == num_stage\n",
    "\n",
    "\n",
    "def test_parallel_alloc():\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"i\", kind=\"parallel\") as i:\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"float32\", n, name=\"A\", scope=\"global\")\n",
    "            A[j] = A[j] + 2\n",
    "\n",
    "    body = ib.get()\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"]\n",
    "\n",
    "    assert isinstance(body.body.body, tvm.tir.Allocate)\n",
    "\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"t\") as i:\n",
    "        ib.scope_attr(\n",
    "            tvm.tir.const(1, \"int32\"), \"pragma_scope\", tvm.tir.StringImm(\"parallel_launch_point\")\n",
    "        )\n",
    "        with ib.for_range(0, n, name=\"i\", kind=\"parallel\") as i:\n",
    "            with ib.for_range(0, 10, name=\"j\") as j:\n",
    "                A = ib.allocate(\"float32\", n, name=\"A\", scope=\"global\")\n",
    "                A[j] = A[j] + 2\n",
    "    body = ib.get()\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"]\n",
    "\n",
    "    assert isinstance(body.body.body.body.body, tvm.tir.Allocate)\n",
    "\n",
    "\n",
    "def test_while_alloc():\n",
    "    def get_mod(kind=\"serial\"):\n",
    "        ib = tvm.tir.ir_builder.create()\n",
    "        n = te.var(\"n\")\n",
    "        with ib.for_range(0, n, name=\"i\", kind=kind) as i:\n",
    "            j = ib.allocate(\"int32\", 1, name=\"j\", scope=\"global\")\n",
    "            j[0] = 0\n",
    "            with ib.while_loop(j[0] < 10):\n",
    "                A = ib.allocate(\"float32\", n, name=\"A\", scope=\"global\")\n",
    "                A[j[0]] = A[j[0]] + 2\n",
    "                j[0] += j[0] + 1\n",
    "\n",
    "        body = ib.get()\n",
    "        return tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "\n",
    "    mod = get_mod(kind=\"parallel\")\n",
    "    # parallel (i, 0, n) {\n",
    "    #   allocate j[int32 * 1]\n",
    "    #   j[0] = 0\n",
    "    #   while((j[0] < 10)){\n",
    "    #     // attr [A] storage_scope = \"global\"\n",
    "    #     allocate A[float32 * n]\n",
    "    #     A[j[0]] = (A[j[0]] + 2f)\n",
    "    #     j[0] = (j[0] + (j[0] + 1))\n",
    "    #   }\n",
    "    # }\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"]\n",
    "    # parallel (i, 0, n) {\n",
    "    #   allocate j[int32 * 1]\n",
    "    #   allocate A[float32 * n]\n",
    "    #   j[0] = 0\n",
    "    #   while((j[0] < 10)){\n",
    "    #     A[j[0]] = (A[j[0]] + 2f)\n",
    "    #     j[0] = (j[0] + (j[0] + 1))\n",
    "    #   }\n",
    "    # }\n",
    "    assert isinstance(body.body.body, tvm.tir.Allocate)  # j\n",
    "    assert isinstance(body.body.body.body, tvm.tir.Allocate)  # A\n",
    "\n",
    "    mod = get_mod(kind=\"serial\")\n",
    "    # for (i, 0, n) {\n",
    "    #   allocate j[int32 * 1]\n",
    "    #   j[0] = 0\n",
    "    #   while((j[0] < 10)){\n",
    "    #     // attr [A] storage_scope = \"global\"\n",
    "    #     allocate A[float32 * n]\n",
    "    #     A[j[0]] = (A[j[0]] + 2f)\n",
    "    #     j[0] = (j[0] + (j[0] + 1))\n",
    "    #   }\n",
    "    # }\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"]\n",
    "    # allocate j[int32 * 1]\n",
    "    # allocate A[float32 * n]\n",
    "    # for (i, 0, n) {\n",
    "    #   j[0] = 0\n",
    "    #   while((j[0] < 10)){\n",
    "    #     A[j[0]] = (A[j[0]] + 2f)\n",
    "    #     j[0] = (j[0] + (j[0] + 1))\n",
    "    #   }\n",
    "    # }\n",
    "    assert isinstance(body.body, tvm.tir.Allocate)  # j\n",
    "    assert isinstance(body.body.body, tvm.tir.Allocate)  # A\n",
    "\n",
    "\n",
    "def test_inplace_rule2(scope_tb=\"local_TB2\", max_bits=1024 * 1024 * 1024):\n",
    "    # Test Buffer\n",
    "    register_mem(scope_tb, max_bits)\n",
    "    m = 10\n",
    "    A = te.placeholder((m,), name=\"A\")\n",
    "    C = te.placeholder((m,), name=\"C\")\n",
    "    D = te.placeholder((m,), name=\"D\")\n",
    "    A0 = te.compute((m,), lambda i: A[i] + C[i], name=\"A0\")\n",
    "    A1 = te.compute((m,), lambda i: D[i] * D[i], name=\"A1\")\n",
    "    A2 = te.compute((m,), lambda i: A0[i] + A1[i], name=\"A2\")\n",
    "    B = te.compute((m,), lambda i: A2[i], name=\"B\")\n",
    "    s = te.create_schedule(B.op)\n",
    "    A0L = s.cache_read(A0, scope_tb, [A2])\n",
    "    A1L = s.cache_read(A1, scope_tb, [A2])\n",
    "    A2L = s.cache_read(A2, scope_tb, [B])\n",
    "    mod = schedule_to_module(s, [A, B, C, D])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    # verify only have one allocations.\n",
    "    # verify inplace folding works\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "    assert num_alloc[0] == 2\n",
    "\n",
    "\n",
    "def test_exceed_mem():\n",
    "    max_bits = 639\n",
    "    # The critical max_num_bits is between 639 and 640\n",
    "    loc = -1\n",
    "    try:\n",
    "        test_inplace_rule2(\"local_TEM\", max_bits)\n",
    "    except Exception as e:\n",
    "        estr = str(e)\n",
    "        loc = estr.find(\"Allocation exceed bound of memory\")\n",
    "        assert loc != -1\n",
    "\n",
    "\n",
    "def test_inplace_rule3():\n",
    "    # Test Buffer\n",
    "    scope_tb = \"local_TB3\"\n",
    "    max_bits = 1024 * 1024 * 1024\n",
    "\n",
    "    register_mem(scope_tb, max_bits)\n",
    "    m = 10\n",
    "    B0 = te.placeholder((m,), name=\"B0\")\n",
    "    B1 = te.placeholder((m,), name=\"B1\")\n",
    "    B2 = te.placeholder((m,), name=\"B2\")\n",
    "    B3 = te.placeholder((m,), name=\"B3\")\n",
    "    B4 = te.placeholder((m,), name=\"B4\")\n",
    "    B5 = te.placeholder((m,), name=\"B5\")\n",
    "\n",
    "    B6 = te.compute((m,), lambda i: B1[i] * B5[i], name=\"B6\")\n",
    "    B7 = te.compute((m,), lambda i: B2[i] * B4[i], name=\"B7\")\n",
    "    B8 = te.compute((m,), lambda i: B6[i] - B7[i], name=\"B8\")\n",
    "\n",
    "    B9 = te.compute((m,), lambda i: B2[i] * B3[i], name=\"B9\")\n",
    "    B10 = te.compute((m,), lambda i: B0[i] * B5[i], name=\"B10\")\n",
    "    B11 = te.compute((m,), lambda i: B9[i] - B10[i], name=\"B11\")\n",
    "\n",
    "    B12 = te.compute((m,), lambda i: B0[i] * B4[i], name=\"B12\")\n",
    "    B13 = te.compute((m,), lambda i: B1[i] * B3[i], name=\"B13\")\n",
    "    B14 = te.compute((m,), lambda i: B12[i] - B13[i], name=\"B14\")\n",
    "\n",
    "    B = te.compute((m,), lambda i: B8[i] * B11[i] + B14[i], name=\"B\")\n",
    "    s = te.create_schedule(B.op)\n",
    "\n",
    "    B1L = s.cache_read(B1, scope_tb, [B6, B13])\n",
    "    B5L = s.cache_read(B5, scope_tb, [B6, B10])\n",
    "    B2L = s.cache_read(B2, scope_tb, [B7, B9])\n",
    "    B4L = s.cache_read(B4, scope_tb, [B7, B12])\n",
    "    B3L = s.cache_read(B3, scope_tb, [B9, B13])\n",
    "    B0L = s.cache_read(B0, scope_tb, [B10, B12])\n",
    "\n",
    "    B8L = s.cache_write(B8, scope_tb)\n",
    "    B11L = s.cache_write(B11, scope_tb)\n",
    "    B14L = s.cache_write(B14, scope_tb)\n",
    "    B6L = s.cache_write(B6, scope_tb)\n",
    "    B7L = s.cache_write(B7, scope_tb)\n",
    "    B9L = s.cache_write(B9, scope_tb)\n",
    "    B10L = s.cache_write(B10, scope_tb)\n",
    "    B12L = s.cache_write(B12, scope_tb)\n",
    "    B13L = s.cache_write(B13, scope_tb)\n",
    "\n",
    "    s[B12].compute_inline()\n",
    "    s[B13].compute_inline()\n",
    "    s[B8].compute_inline()\n",
    "    s[B11].compute_inline()\n",
    "    s[B14].compute_inline()\n",
    "    s[B6].compute_inline()\n",
    "    s[B7].compute_inline()\n",
    "    s[B9].compute_inline()\n",
    "    s[B10].compute_inline()\n",
    "\n",
    "    s = s.normalize()\n",
    "    mod = schedule_to_module(s, [B0, B1, B2, B3, B4, B5, B])\n",
    "    mod = tvm.tir.transform.StorageFlatten(64)(mod)\n",
    "\n",
    "    mod = tvm.tir.transform.Simplify()(mod)\n",
    "    mod = tvm.tir.transform.StorageRewrite()(mod)\n",
    "    stmt = mod[\"main\"].body\n",
    "\n",
    "    # verify only have one allocations.\n",
    "    # verify inplace folding works\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            assert n.extents[0].value == 70\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "\n",
    "\n",
    "def test_alloc_seq_type():\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"i\") as i:\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"float32\", 200, name=\"A\", scope=\"local.L0A\")\n",
    "            A1 = ib.allocate(\"float32\", 200, name=\"A1\", scope=\"local.L0A\")\n",
    "            A[j] = 1.2\n",
    "            A1[j] = 1.3\n",
    "            B = ib.allocate(\"int16\", 200, name=\"B\", scope=\"local.L0A\")\n",
    "            B[j] = tvm.tir.const(1, \"int16\")\n",
    "            C = ib.allocate(\"int16\", 200, name=\"C\", scope=\"local.L0A\")\n",
    "            C[j] = tvm.tir.const(1, \"int16\")\n",
    "            D = ib.allocate(\"int16\", 200, name=\"D\", scope=\"local.L0A\")\n",
    "            D[j] = B[j] + C[j]\n",
    "            A2 = ib.allocate(\"float32\", 200, name=\"A2\", scope=\"local.L0A\")\n",
    "            A2[j] = A[j]\n",
    "\n",
    "    body = ib.get()\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"].body\n",
    "\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "            assert n.extents[0].value == 500\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(body, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_alloc_seq_type2():\n",
    "    scope_tb = \"local.L0A2\"\n",
    "    max_bits = 1024 * 1024 * 1024\n",
    "\n",
    "    register_mem(scope_tb, max_bits)\n",
    "\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"i\") as i:\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"float32\", 200, name=\"A\", scope=scope_tb)\n",
    "            A[j] = 1.2\n",
    "        with ib.for_range(0, 20, name=\"j\") as j:\n",
    "            B = ib.allocate(\"int16\", 400, name=\"B\", scope=scope_tb)\n",
    "            B[j] = tvm.tir.const(1, \"int16\")\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            C = ib.allocate(\"float32\", 200, name=\"C\", scope=scope_tb)\n",
    "            C[j] = 1.2\n",
    "\n",
    "    body = ib.get()\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"].body\n",
    "\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "            assert n.extents[0].value == 200\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(body, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_reuse_small_buffer():\n",
    "    ib = tvm.tir.ir_builder.create()\n",
    "    n = te.var(\"n\")\n",
    "    with ib.for_range(0, n, name=\"i\") as i:\n",
    "        with ib.for_range(0, 10, name=\"j\") as j:\n",
    "            A = ib.allocate(\"int16\", 200, name=\"A\", scope=\"local.L0A\")\n",
    "            A[j] = tvm.tir.const(1, \"int16\")\n",
    "            B = ib.allocate(\"int16\", 200, name=\"B\", scope=\"local.L0A\")\n",
    "            B[j] = tvm.tir.const(1, \"int16\")\n",
    "            B1 = ib.allocate(\"int16\", 200, name=\"B1\", scope=\"local.L0A\")\n",
    "            B1[j] = A[j] + B[j]\n",
    "            C = ib.allocate(\"int16\", 400, name=\"C\", scope=\"local.L0A\")\n",
    "            C[j] = tvm.tir.const(1, \"int16\")\n",
    "            D = ib.allocate(\"int16\", 400, name=\"D\", scope=\"local.L0A\")\n",
    "            D[j] = tvm.tir.const(1, \"int16\")\n",
    "            E = ib.allocate(\"int16\", 400, name=\"E\", scope=\"local.L0A\")\n",
    "            E[j] = C[j]\n",
    "\n",
    "    body = ib.get()\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))\n",
    "    body = tvm.tir.transform.StorageRewrite()(mod)[\"main\"].body\n",
    "\n",
    "    num_alloc = [0]\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            num_alloc[0] += 1\n",
    "            assert n.extents[0].value == 800\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(body, verify)\n",
    "    assert num_alloc[0] == 1\n",
    "\n",
    "\n",
    "def test_replace_dataflow():\n",
    "    shape = (255,)\n",
    "    A = te.placeholder(shape, name=\"A\")\n",
    "    B = te.compute(shape, lambda i: A[i] + A[i], name=\"B\")\n",
    "    C = te.compute(shape, lambda i: A[i] + B[i], name=\"C\")\n",
    "    D = te.compute(shape, lambda i: A[i] + C[i], name=\"D\")\n",
    "    E = te.compute(shape, lambda i: A[i] + D[i], name=\"E\")\n",
    "\n",
    "    s = te.create_schedule(E.op)\n",
    "    s.cache_read(A, \"local\", [B, C, D, E])\n",
    "    bounds = tvm.te.schedule.InferBound(s)\n",
    "    assert isinstance(bounds, tvm.container.Map)\n",
    "\n",
    "\n",
    "def test_large_input():\n",
    "    @te.hybrid.script\n",
    "    def compute(a, b):\n",
    "        n = 16384\n",
    "        c = output_tensor((n, n), \"int32\")\n",
    "        for i in range(n):\n",
    "            for j in range(n):\n",
    "                c[i, j] = a[i, j] - b[i, j]\n",
    "        return c\n",
    "\n",
    "    n = 16384\n",
    "    shape = (n, n)\n",
    "    a = te.placeholder(shape, name=\"a\", dtype=\"int32\")\n",
    "    b = te.placeholder(shape, name=\"b\", dtype=\"int32\")\n",
    "    c = te.compute(shape, lambda i, j: compute(a, b)[i, j])\n",
    "    c = te.compute(shape, lambda i, j: 1 + c[i, j])\n",
    "    s = te.create_schedule(c.op)\n",
    "    stmt = tvm.lower(s, [a, b, c])[\"main\"].body\n",
    "\n",
    "    def verify(n):\n",
    "        if isinstance(n, tvm.tir.Allocate):\n",
    "            assert n.extents[0].value == 268435456\n",
    "\n",
    "    tvm.tir.stmt_functor.post_order_visit(stmt, verify)\n",
    "\n",
    "\n",
    "def test_access_in_let_value():\n",
    "    @T.prim_func\n",
    "    def func(A: T.Buffer((8,), \"float32\")):\n",
    "        for i in range(8):\n",
    "            B_data = T.allocate((1,), \"float32\", \"global\")\n",
    "            B = T.Buffer(shape=[1], dtype=\"float32\", data=B_data)\n",
    "            B[0] = 3.14\n",
    "            x: T.float32 = T.exp(B[0], dtype=\"float32\")\n",
    "            A[i] = (x + 1.0) / (x - 1.0)\n",
    "\n",
    "    @T.prim_func\n",
    "    def func_rewritten(A: T.Buffer((8,), \"float32\")) -> None:\n",
    "        B_data = T.allocate((1,), \"float32\", \"global\")\n",
    "        B = T.Buffer(shape=[1], dtype=\"float32\", data=B_data)\n",
    "        for i in range(8):\n",
    "            B[0] = 3.14\n",
    "            x: T.float32 = T.exp(B[0], dtype=\"float32\")\n",
    "            A[i] = (x + 1.0) / (x - 1.0)\n",
    "\n",
    "    mod = tvm.tir.transform.StorageRewrite()(\n",
    "        tvm.IRModule.from_expr(func.with_attr(\"global_symbol\", \"main\"))\n",
    "    )\n",
    "    tvm.ir.assert_structural_equal(mod[\"main\"], func_rewritten.with_attr(\"global_symbol\", \"main\"))\n",
    "\n",
    "\n",
    "class BaseCompare(tvm.testing.CompareBeforeAfter):\n",
    "    transform = tvm.tir.transform.StorageRewrite()\n",
    "\n",
    "\n",
    "class TestLetBufferRewrite(BaseCompare):\n",
    "    \"\"\"StorageRewrite replaces the bound var of backing allocations\n",
    "\n",
    "    If StorageRewrite replaces the backing variable of an array, such\n",
    "    as when vectorizing the storage type, the variable must be\n",
    "    replaced in the LetStmt that defines it.  Currently, StmtMutator\n",
    "    only visits usage of variables, and does not visit definitions of\n",
    "    variables, so the definition in a LetStmt must be explicitly\n",
    "    handled.\n",
    "    \"\"\"\n",
    "\n",
    "    def before() -> None:\n",
    "        A_data: T.handle(\"int32\") = T.call_extern(\"dummy_func\", dtype=\"handle\")\n",
    "        A = T.Buffer([8], \"int32\", data=A_data)\n",
    "        A[0:8] = T.broadcast(42, 8)\n",
    "\n",
    "    def expected() -> None:\n",
    "        A_data: T.handle(\"int32x8\") = T.call_extern(\"dummy_func\", dtype=\"handle\")\n",
    "        A = T.Buffer([1], \"int32x8\", data=A_data)\n",
    "        A[0] = T.broadcast(42, 8)\n",
    "\n",
    "\n",
    "class TestRewriteInPlaceUseOfNonFlatBuffer(BaseCompare):\n",
    "    \"\"\"A non-flat buffer may be re-used for in-place operations\"\"\"\n",
    "\n",
    "    def before(A: T.Buffer((16, 16), \"float32\"), D: T.Buffer((16, 16), \"float32\")):\n",
    "        B_data = T.allocate(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            scope=\"global\",\n",
    "        )\n",
    "        B = T.Buffer(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            axis_separators=[1],\n",
    "            data=B_data,\n",
    "        )\n",
    "        C_data = T.allocate(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            scope=\"global\",\n",
    "        )\n",
    "        C = T.Buffer(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            axis_separators=[1],\n",
    "            data=C_data,\n",
    "        )\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            B[i, j] = A[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            C[i, j] = 2.0 * B[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            D[i, j] = C[i, j]\n",
    "\n",
    "    def expected(A: T.Buffer((16, 16), \"float32\"), D: T.Buffer((16, 16), \"float32\")):\n",
    "        B_data = T.allocate(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            scope=\"global\",\n",
    "        )\n",
    "        B = T.Buffer([16, 16], dtype=\"float32\", axis_separators=[1], data=B_data)\n",
    "        C = T.Buffer(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            axis_separators=[1],\n",
    "            data=B.data,\n",
    "        )\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            B[i, j] = A[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            C[i, j] = 2.0 * B[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            D[i, j] = C[i, j]\n",
    "\n",
    "\n",
    "class TestNoRewriteOfSharedNonFlatBuffer(BaseCompare):\n",
    "    \"\"\"In general, sharing of non-flat buffer isn't supported\n",
    "\n",
    "    The current packing algorithms in StorageRewrite assume a flat\n",
    "    memory space, and do not support packing of N-d buffers.  For\n",
    "    buffers with axis separators, normal buffer sharing should be\n",
    "    disabled.\n",
    "\n",
    "    Like TestRewriteInPlaceUseOfNonFlatBuffer, except that B and C do\n",
    "    not have matching shapes.\n",
    "    \"\"\"\n",
    "\n",
    "    def before(A: T.Buffer((16, 16), \"float32\"), D: T.Buffer((16, 16), \"float32\")):\n",
    "        B_data = T.allocate(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            scope=\"global\",\n",
    "        )\n",
    "        B = T.Buffer(\n",
    "            [16, 16],\n",
    "            dtype=\"float32\",\n",
    "            axis_separators=[1],\n",
    "            data=B_data,\n",
    "        )\n",
    "        C_data = T.allocate(\n",
    "            [20, 20],\n",
    "            dtype=\"float32\",\n",
    "            scope=\"global\",\n",
    "        )\n",
    "        C = T.Buffer(\n",
    "            [20, 20],\n",
    "            dtype=\"float32\",\n",
    "            axis_separators=[1],\n",
    "            data=C_data,\n",
    "        )\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            B[i, j] = A[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            C[i, j] = 2.0 * B[i, j]\n",
    "\n",
    "        for i, j in T.grid(16, 16):\n",
    "            D[i, j] = C[i, j]\n",
    "\n",
    "    expected = before\n",
    "\n",
    "\n",
    "class TestRewriteDeclBuffer(BaseCompare):\n",
    "    \"\"\"A DeclBuffer node may appear in StorageRewrite's input\"\"\"\n",
    "\n",
    "    def before(A: T.Buffer(16, \"float32\"), D: T.Buffer(16, \"float32\")):\n",
    "        B = T.decl_buffer(16, dtype=\"float32\")\n",
    "        C = T.decl_buffer(16, dtype=\"float32\")\n",
    "\n",
    "        for i in range(16):\n",
    "            B[i] = A[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            C[i] = 2.0 * B[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            D[i] = C[i]\n",
    "\n",
    "    def expected(A: T.Buffer(16, \"float32\"), D: T.Buffer(16, \"float32\")):\n",
    "        B = T.decl_buffer(16, dtype=\"float32\")\n",
    "        C = T.decl_buffer(16, dtype=\"float32\", data=B.data)\n",
    "\n",
    "        for i in range(16):\n",
    "            B[i] = A[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            C[i] = 2.0 * B[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            D[i] = C[i]\n",
    "\n",
    "\n",
    "class TestNoOrphanedDeclBuffer(BaseCompare):\n",
    "    \"\"\"A DeclBuffer of an unused Allocate should be removed\n",
    "\n",
    "    StorageRewrite removes any allocations that are unused.  When it\n",
    "    does so, any DeclBuffer that refers to that allocation should also\n",
    "    be removed.\n",
    "    \"\"\"\n",
    "\n",
    "    def before(A: T.Buffer(16, \"float32\"), D: T.Buffer(16, \"float32\")):\n",
    "        B = T.decl_buffer(16, dtype=\"float32\")\n",
    "        C = T.decl_buffer(16, dtype=\"float32\")\n",
    "        Unused = T.decl_buffer(16, dtype=\"float32\")\n",
    "\n",
    "        for i in range(16):\n",
    "            B[i] = A[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            C[i] = 2.0 * B[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            D[i] = C[i]\n",
    "\n",
    "    def expected(A: T.Buffer(16, \"float32\"), D: T.Buffer(16, \"float32\")):\n",
    "        B = T.decl_buffer(16, dtype=\"float32\")\n",
    "        C = T.decl_buffer(16, dtype=\"float32\", data=B.data)\n",
    "\n",
    "        for i in range(16):\n",
    "            B[i] = A[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            C[i] = 2.0 * B[i]\n",
    "\n",
    "        for i in range(16):\n",
    "            D[i] = C[i]\n",
    "\n",
    "\n",
    "def test_vulkan_smem_reuse():\n",
    "    target = tvm.target.Target(\n",
    "        {\n",
    "            \"keys\": [\"vulkan\", \"gpu\"],\n",
    "            \"kind\": \"vulkan\",\n",
    "            \"max_num_threads\": 256,\n",
    "            \"max_threads_per_block\": 256,\n",
    "            \"supports_float32\": T.bool(True),\n",
    "            \"supports_int32\": T.bool(True),\n",
    "            \"tag\": \"\",\n",
    "            \"thread_warp_size\": 1,\n",
    "        }\n",
    "    )\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def func(A: T.Buffer((4,), \"float32\"), B: T.Buffer((4,), \"float16\")):\n",
    "        T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "        A_shared = T.allocate([4], \"float32\", \"shared\")\n",
    "        A_local = T.allocate([4], \"float32\", \"local\")\n",
    "        B_shared = T.allocate([4], \"float16\", \"shared\")\n",
    "        A_shared_1 = T.Buffer((4,), data=A_shared, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_1 = T.Buffer((4,), data=A.data)\n",
    "            A_shared_1[threadIdx_x] = A_1[threadIdx_x]\n",
    "        A_local_1 = T.Buffer((4,), data=A_local, scope=\"local\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_local_1[threadIdx_x] = A_shared_1[threadIdx_x]\n",
    "        B_shared_1 = T.Buffer((4,), \"float16\", data=B_shared, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            B_shared_1[threadIdx_x] = T.Cast(\"float16\", A_local_1[threadIdx_x])\n",
    "        threadIdx_x = T.launch_thread(\"threadIdx.x\", 4)\n",
    "        B_1 = T.Buffer((4,), \"float16\", data=B.data)\n",
    "        B_1[threadIdx_x] = B_shared_1[threadIdx_x]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def normal_lowering(A: T.Buffer((4,), \"float32\"), B: T.Buffer((4,), \"float16\")):\n",
    "        T.func_attr({\"tir.noalias\": T.bool(True)})\n",
    "        A_shared = T.allocate([4], \"float32\", \"shared\")\n",
    "        A_local = T.allocate([4], \"float32\", \"local\")\n",
    "        A_shared_1 = T.Buffer((4,), data=A_shared, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_1 = T.Buffer((4,), data=A.data)\n",
    "            A_shared_1[threadIdx_x] = A_1[threadIdx_x]\n",
    "        A_local_1 = T.Buffer((4,), data=A_local, scope=\"local\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_local_1[threadIdx_x] = A_shared_1[threadIdx_x]\n",
    "        A_shared_2 = T.Buffer((4,), \"float16\", data=A_shared, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_shared_2[threadIdx_x] = T.Cast(\"float16\", A_local_1[threadIdx_x])\n",
    "        threadIdx_x = T.launch_thread(\"threadIdx.x\", 4)\n",
    "        B_1 = T.Buffer((4,), \"float16\", data=B.data)\n",
    "        B_1[threadIdx_x] = A_shared_2[threadIdx_x]\n",
    "\n",
    "    @T.prim_func(private=True)\n",
    "    def no_reuse_lowering(A: T.Buffer((4,), \"float32\"), B: T.Buffer((4,), \"float16\")):\n",
    "        T.func_attr({\"target\": target, \"tir.noalias\": T.bool(True)})\n",
    "        A_shared_1 = T.allocate([4], \"float32\", \"shared\")\n",
    "        A_local_1 = T.allocate([4], \"float32\", \"local\")\n",
    "        B_shared_1 = T.allocate([4], \"float16\", \"shared\")\n",
    "        A_shared_1_1 = T.Buffer((4,), data=A_shared_1, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_1 = T.Buffer((4,), data=A.data)\n",
    "            A_shared_1_1[threadIdx_x] = A_1[threadIdx_x]\n",
    "        A_local_1_1 = T.Buffer((4,), data=A_local_1, scope=\"local\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            A_local_1_1[threadIdx_x] = A_shared_1_1[threadIdx_x]\n",
    "        B_shared_1_1 = T.Buffer((4,), \"float16\", data=B_shared_1, scope=\"shared\")\n",
    "        with T.launch_thread(\"threadIdx.x\", 4) as threadIdx_x:\n",
    "            B_shared_1_1[threadIdx_x] = T.Cast(\"float16\", A_local_1_1[threadIdx_x])\n",
    "        threadIdx_x = T.launch_thread(\"threadIdx.x\", 4)\n",
    "        B_1 = T.Buffer((4,), \"float16\", data=B.data)\n",
    "        B_1[threadIdx_x] = B_shared_1_1[threadIdx_x]\n",
    "\n",
    "    # Reuse shared memory when lowering without target.\n",
    "    mod = tvm.IRModule({\"main\": func})\n",
    "    tvm.ir.assert_structural_equal(tvm.lower(mod)[\"main\"], normal_lowering)\n",
    "\n",
    "    # No shared memory reuse when lowering with target Vulkan.\n",
    "    mod = tvm.tir.transform.BindTarget(target)(mod)\n",
    "    tvm.ir.assert_structural_equal(tvm.lower(mod)[\"main\"], no_reuse_lowering)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
