{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# `ToMixedPrecision` 解读\n",
    "\n",
    "参考：`tvm/tests/python/relay/test_to_mixed_precision.py`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "from pathlib import Path\n",
    "ROOT = Path(\".\").resolve().parents[2]\n",
    "sys.path.extend([f\"{ROOT}/tests\", f\"{ROOT}/src\"])\n",
    "# # from tools.tag_span import _create_span, _set_span, _verify_structural_equal_with_span\n",
    "from tools.torch_utils import verify_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Any, Dict, List\n",
    "\n",
    "import numpy as np\n",
    "import pytest\n",
    "import tvm\n",
    "from tvm import relay\n",
    "from tvm.relay.testing import lstm\n",
    "from tvm.relay.transform import InferType, ToMixedPrecision, mixed_precision\n",
    "\n",
    "target_precision = tvm.testing.parameter(\n",
    "    pytest.param(\"float16\"),\n",
    "    pytest.param(\"bfloat16\"),\n",
    "    ids=[\"float16\", \"bfloat16\"],\n",
    ")\n",
    "\n",
    "\n",
    "def run_module(mod: tvm.runtime.Module, mod_params: Dict[str, Any]) -> List:\n",
    "    dev = tvm.device(\"llvm\", 0)\n",
    "    result = relay.create_executor(\"debug\", mod, device=dev, target=\"llvm\").evaluate()(**mod_params)\n",
    "    if isinstance(result, tvm.runtime.container.ADT):\n",
    "        result = [r.numpy() for r in result]\n",
    "        return result\n",
    "    else:\n",
    "        return [result.numpy()]\n",
    "\n",
    "\n",
    "def verify_mixed_precision_output_close(\n",
    "    mod: tvm.runtime.Module,\n",
    "    mod_params: Dict[str, Any],\n",
    "    mixed_precision_dtype=\"float16\",\n",
    "    rtol: float = 1e-3,\n",
    "    atol: float = 0,\n",
    "    keep_orig_output_dtype=False,\n",
    ") -> tvm.runtime.Module:\n",
    "    mod = InferType()(mod)\n",
    "    result_fp32 = run_module(mod, mod_params)\n",
    "\n",
    "    if not keep_orig_output_dtype:\n",
    "        amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)\n",
    "        result_amp = run_module(amp_mod, mod_params)\n",
    "    else:\n",
    "        with tvm.transform.PassContext(\n",
    "            config={\"relay.ToMixedPrecision.keep_orig_output_dtype\": True}\n",
    "        ):\n",
    "            amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)\n",
    "            result_amp = run_module(amp_mod, mod_params)\n",
    "\n",
    "    # Ensure the results are close\n",
    "    if mixed_precision_dtype != \"bfloat16\":\n",
    "        for fp32, amp in zip(result_fp32, result_amp):\n",
    "            np.testing.assert_allclose(fp32, amp, rtol=rtol, atol=atol)\n",
    "\n",
    "    if keep_orig_output_dtype:\n",
    "        assert (\n",
    "            np.array(result_amp).dtype == np.array(result_fp32).dtype\n",
    "        ), \"output type and original type mismatch\"\n",
    "\n",
    "    return amp_mod\n",
    "\n",
    "\n",
    "def test_lstm(target_precision):\n",
    "    \"\"\"A small stress test on a single unrolled lstm unit.\n",
    "\n",
    "    Has internal functions and let statements the pass must work on.\n",
    "    \"\"\"\n",
    "    # TODO(AndrewZhaoLuo): investigate why non-even units cause failure in codegen for CUDA\n",
    "    # See discussion here: https://github.com/apache/tvm/issues/8294#issuecomment-866190408\n",
    "    units = 4\n",
    "    iterations = 5\n",
    "    mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)\n",
    "\n",
    "    # This is an unrolled lstm so each data should be the previous results but\n",
    "    # we don't care, we just want to stress test things.\n",
    "    for i in range(iterations):\n",
    "        mod_params[\"data\" if i == 0 else f\"data{i}\"] = np.random.uniform(\n",
    "            -10, 10, (1, units)\n",
    "        ).astype(\"float32\")\n",
    "\n",
    "    verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, rtol=0.01, atol=0.01\n",
    "    )\n",
    "\n",
    "\n",
    "def test_lstm_float64():\n",
    "    \"\"\"Tests if can handle other mixed precision types.\n",
    "\n",
    "    As a toy example show can convert graph to float64 and have it run.\n",
    "\n",
    "    It doesn't really make sense to do it, this just shows we can change\n",
    "    the target mixed_precision_dtype.\n",
    "    \"\"\"\n",
    "    units = 3\n",
    "    iterations = 5\n",
    "    mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)\n",
    "\n",
    "    # This is an unrolled lstm so each data should be the previous results but\n",
    "    # we don't care, we just want to stress test things.\n",
    "    for i in range(iterations):\n",
    "        mod_params[\"data\" if i == 0 else f\"data{i}\"] = np.random.uniform(\n",
    "            -10, 10, (1, units)\n",
    "        ).astype(\"float32\")\n",
    "\n",
    "    verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=\"float64\", rtol=0.01, atol=0.01\n",
    "    )\n",
    "\n",
    "\n",
    "def test_convert_single_conv(target_precision):\n",
    "    \"\"\"Conv is a green listed operation meaning it will always use fp16 workload.\n",
    "\n",
    "    By default it accumulates to fp32 and outputs fp16.\n",
    "    \"\"\"\n",
    "    data_shape = (1, 3, 32, 32)\n",
    "    weight_shape = (5, 3, 3, 3)\n",
    "    data = relay.var(\"data\", shape=data_shape, dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=weight_shape, dtype=\"float32\")\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=\"float32\")\n",
    "    mod = tvm.IRModule.from_expr(conv)\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=data_shape).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=weight_shape).astype(\"float32\"),\n",
    "    }\n",
    "    amp_mod = verify_mixed_precision_output_close(\n",
    "        mod,\n",
    "        mod_params,\n",
    "        mixed_precision_dtype=target_precision,\n",
    "        atol=0.01,\n",
    "        rtol=1e-3,\n",
    "        keep_orig_output_dtype=True,\n",
    "    )\n",
    "\n",
    "    expected_mod = tvm.IRModule.from_expr(\n",
    "        relay.cast(\n",
    "            relay.nn.conv2d(\n",
    "                relay.cast(data, target_precision),\n",
    "                relay.cast(weight, target_precision),\n",
    "                strides=(1, 1),\n",
    "                padding=(1, 1),\n",
    "                out_dtype=target_precision,\n",
    "            ),\n",
    "            \"float32\",\n",
    "        )\n",
    "    )\n",
    "    expected_mod = tvm.relay.transform.InferType()(expected_mod)\n",
    "\n",
    "    assert not tvm.ir.structural_equal(amp_mod, mod)\n",
    "    assert tvm.ir.structural_equal(amp_mod, expected_mod)\n",
    "\n",
    "\n",
    "def test_convert_single_conv_fp64():\n",
    "    \"\"\"As above but checks choosing a mixed_precision_type other than FP16 works\"\"\"\n",
    "    data_shape = (1, 3, 32, 32)\n",
    "    weight_shape = (5, 3, 3, 3)\n",
    "    data = relay.var(\"data\", shape=data_shape, dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=weight_shape, dtype=\"float32\")\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=\"float32\")\n",
    "    mod = tvm.IRModule.from_expr(conv)\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=data_shape).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=weight_shape).astype(\"float32\"),\n",
    "    }\n",
    "    amp_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=\"float64\", atol=0.01, rtol=1e-3\n",
    "    )\n",
    "\n",
    "    # Note we still accumulate to FP32 by default, a user would need to overwrite default\n",
    "    # behavior to make this make more sense.\n",
    "    expected_mod = tvm.IRModule.from_expr(\n",
    "        relay.nn.conv2d(\n",
    "            relay.cast(data, \"float64\"),\n",
    "            relay.cast(weight, \"float64\"),\n",
    "            strides=(1, 1),\n",
    "            padding=(1, 1),\n",
    "            out_dtype=\"float64\",\n",
    "        ),\n",
    "    )\n",
    "    expected_mod = tvm.relay.transform.InferType()(expected_mod)\n",
    "\n",
    "    assert not tvm.ir.structural_equal(amp_mod, mod)\n",
    "    assert tvm.ir.structural_equal(amp_mod, expected_mod)\n",
    "\n",
    "\n",
    "def test_convert_conv_bn(target_precision):\n",
    "    \"\"\"Conv is green and batch norm is gray. As Conv should output fp16 batch_norm should be green.\"\"\"\n",
    "    data_shape = (1, 3, 32, 32)\n",
    "    weight_shape = (5, 3, 3, 3)\n",
    "    data = relay.var(\"data\", shape=data_shape, dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=weight_shape, dtype=\"float32\")\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=\"float32\")\n",
    "\n",
    "    bn_shape = [5]\n",
    "    gamma = relay.var(\"gamma\", shape=bn_shape)\n",
    "    beta = relay.var(\"beta\", shape=bn_shape)\n",
    "    moving_mean = relay.var(\"moving_mean\", shape=bn_shape)\n",
    "    moving_var = relay.var(\"moving_var\", shape=bn_shape)\n",
    "    bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)\n",
    "    mod = tvm.IRModule.from_expr(bn[0])\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=data_shape).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=weight_shape).astype(\"float32\"),\n",
    "        \"gamma\": np.random.uniform(-1, 1, size=bn_shape).astype(\"float32\"),\n",
    "        \"beta\": np.random.uniform(-1, 1, size=bn_shape).astype(\"float32\"),\n",
    "        \"moving_mean\": np.random.uniform(-1, 1, size=bn_shape).astype(\"float32\"),\n",
    "        \"moving_var\": np.random.uniform(-1, 1, size=bn_shape).astype(\"float32\"),\n",
    "    }\n",
    "    amp_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.025, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Creating expected module\n",
    "    data = relay.cast(relay.var(\"data\", shape=data_shape), target_precision)\n",
    "    weight = relay.cast(relay.var(\"weight\", shape=weight_shape), target_precision)\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=target_precision)\n",
    "\n",
    "    bn_shape = [5]\n",
    "    gamma = relay.cast(relay.var(\"gamma\", shape=bn_shape), target_precision)\n",
    "    beta = relay.cast(relay.var(\"beta\", shape=bn_shape), target_precision)\n",
    "    moving_mean = relay.cast(relay.var(\"moving_mean\", shape=bn_shape), target_precision)\n",
    "    moving_var = relay.cast(relay.var(\"moving_var\", shape=bn_shape), target_precision)\n",
    "    bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)\n",
    "\n",
    "    expected_mod = tvm.IRModule.from_expr(bn[0])\n",
    "    expected_mod = tvm.relay.transform.InferType()(expected_mod)\n",
    "    assert not tvm.ir.structural_equal(amp_mod, mod)\n",
    "    assert tvm.ir.structural_equal(amp_mod, expected_mod)\n",
    "\n",
    "\n",
    "def test_do_not_convert_softmax(target_precision):\n",
    "    \"\"\"Softmax is a red listed operation and therefore should never be fp16.\"\"\"\n",
    "    shape = [1, 2, 3]\n",
    "    a = relay.var(\"a\", shape=shape)\n",
    "    b = relay.nn.softmax(a)\n",
    "    mod = tvm.IRModule.from_expr(b)\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "    out_mod = ToMixedPrecision(target_precision)(mod)\n",
    "    orig_mod = tvm.relay.transform.InferType()(mod)\n",
    "    assert tvm.ir.structural_equal(orig_mod, out_mod)\n",
    "\n",
    "\n",
    "def test_do_not_convert_arange(target_precision):\n",
    "    \"\"\"Arange is a red listed operation and therefore should never be fp16.\"\"\"\n",
    "    dtype = \"float32\"\n",
    "    arange = relay.arange(relay.const(1, dtype), relay.const(128, dtype))\n",
    "    mod = tvm.IRModule.from_expr(arange)\n",
    "    out_mod = ToMixedPrecision(target_precision)(mod)\n",
    "    orig_mod = tvm.relay.transform.InferType()(mod)\n",
    "    assert tvm.ir.structural_equal(orig_mod, out_mod)\n",
    "\n",
    "\n",
    "def test_do_not_convert_summation(target_precision):\n",
    "    \"\"\"Ops that could involve a large summation are not allowed in fp16.\"\"\"\n",
    "    shape = [1, 3, 16, 16]\n",
    "    a = relay.var(\"a\", shape=shape)\n",
    "    ops = [\n",
    "        relay.sum,\n",
    "        relay.mean,\n",
    "        relay.nn.global_avg_pool2d,\n",
    "        lambda inp: relay.nn.adaptive_avg_pool2d(inp, (1, 1)),\n",
    "    ]\n",
    "    for op in ops:\n",
    "        mod = tvm.IRModule.from_expr(op(a))\n",
    "        out_mod = ToMixedPrecision(target_precision)(mod)\n",
    "        orig_mod = tvm.relay.transform.InferType()(mod)\n",
    "        assert tvm.ir.structural_equal(orig_mod, out_mod)\n",
    "\n",
    "\n",
    "def test_green_gray_propagates_simple(target_precision):\n",
    "    \"\"\"Conv is a green listed operation, while addition is gray.\n",
    "\n",
    "    As Conv outputs fp16 the add should be done in fp16.\n",
    "    \"\"\"\n",
    "    data_shape = (1, 3, 32, 32)\n",
    "    weight_shape = (5, 3, 3, 3)\n",
    "    data = relay.var(\"data\", shape=data_shape, dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=weight_shape, dtype=\"float32\")\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=\"float32\")\n",
    "    conv = conv + conv\n",
    "    mod = tvm.IRModule.from_expr(conv)\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=data_shape).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=weight_shape).astype(\"float32\"),\n",
    "    }\n",
    "    amp_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    conv_expr = relay.nn.conv2d(\n",
    "        relay.cast(data, target_precision),\n",
    "        relay.cast(weight, target_precision),\n",
    "        strides=(1, 1),\n",
    "        padding=(1, 1),\n",
    "        out_dtype=target_precision,\n",
    "    )\n",
    "    expected_mod = tvm.IRModule.from_expr(conv_expr + conv_expr)\n",
    "    expected_mod = tvm.relay.transform.InferType()(expected_mod)\n",
    "\n",
    "    assert not tvm.ir.structural_equal(amp_mod, mod)\n",
    "    assert tvm.ir.structural_equal(amp_mod, expected_mod)\n",
    "\n",
    "\n",
    "def test_green_red_not_use_extraneous_cast(target_precision):\n",
    "    \"\"\"Conv. is a green listed operation, while softmax is red.\n",
    "\n",
    "    Conv. also by default accumulates to fp32 but outputs fp16.\n",
    "\n",
    "    We want to avoid a situation where we have extraneous casts.\n",
    "    E.g. because softmax wants to operate on FP32 we might have\n",
    "\n",
    "    conv (FP32) -> cast (FP16) -> cast (FP32) -> softmax (FP32)\n",
    "\n",
    "    To get around this internally when we cast in the pass we cache\n",
    "    the output nodes and the reverse of the cast back to the original\n",
    "    node. For example casting the `conv (FP32)` to FP16 would produce:\n",
    "\n",
    "    `conv (FP32) -> cast (FP16)`\n",
    "\n",
    "    As the outputs. Now anytime we try to cast the `conv (FP32)` node\n",
    "    to FP16 it would return the cached result instead of a new cast node:\n",
    "\n",
    "    `conv (FP32) -> cast (FP16)`\n",
    "\n",
    "    Furthermore, if we try to cast the `cast (FP16)` node back to FP32 it\n",
    "    would just return\n",
    "\n",
    "    `conv (FP32)`.\n",
    "\n",
    "    This test makes sure this behavior occurs.\n",
    "    \"\"\"\n",
    "    data_shape = (1, 3, 32, 32)\n",
    "    weight_shape = (5, 3, 3, 3)\n",
    "    data = relay.var(\"data\", shape=data_shape, dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=weight_shape, dtype=\"float32\")\n",
    "    conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=\"float32\")\n",
    "    result = relay.nn.softmax(conv)\n",
    "    mod = tvm.IRModule.from_expr(result)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=data_shape).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=weight_shape).astype(\"float32\"),\n",
    "    }\n",
    "    amp_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=1e-3\n",
    "    )\n",
    "\n",
    "    # Construct expected structure\n",
    "    conv = relay.cast(\n",
    "        relay.nn.conv2d(\n",
    "            relay.cast(data, target_precision),\n",
    "            relay.cast(weight, target_precision),\n",
    "            strides=(1, 1),\n",
    "            padding=(1, 1),\n",
    "            out_dtype=target_precision,\n",
    "        ),\n",
    "        \"float32\",\n",
    "    )\n",
    "    result = relay.nn.softmax(conv)\n",
    "    expected_mod = tvm.IRModule.from_expr(result)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "\n",
    "    assert tvm.ir.structural_equal(expected_mod, amp_mod)\n",
    "\n",
    "\n",
    "def test_red_gray_propagates_simple(target_precision):\n",
    "    \"\"\"Everything after a softmax should be in FP32 (exception green colored ops)\"\"\"\n",
    "    shape = [1, 2, 3]\n",
    "    a = relay.var(\"a\", shape=shape)\n",
    "    b = relay.nn.softmax(a)\n",
    "    c = b + b\n",
    "    mod = tvm.IRModule.from_expr(c)\n",
    "    mod = tvm.relay.transform.InferType()(mod)\n",
    "\n",
    "    mod_params = {\n",
    "        \"a\": np.random.uniform(-1, 1, size=shape).astype(\"float32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.0, rtol=0.0\n",
    "    )\n",
    "\n",
    "    assert tvm.ir.structural_equal(mod, output_mod)\n",
    "\n",
    "\n",
    "def test_let_statement_simple(target_precision):\n",
    "    \"\"\"A 'simple' let statement example.\n",
    "\n",
    "    Noticeable is the mutation of the bound variable types.\n",
    "    \"\"\"\n",
    "    var1 = relay.var(\"var1\", shape=[1, 20])\n",
    "    var2 = relay.var(\"var2\", shape=[1, 20])\n",
    "\n",
    "    data = relay.var(\"data\", shape=[1, 20])\n",
    "    weight = relay.var(\"weight\", shape=[20, 20])\n",
    "\n",
    "    r1 = var1 + var1\n",
    "\n",
    "    r2 = var2 + var2\n",
    "    let2 = relay.Let(var2, relay.nn.dense(r1, weight, units=20), r2)\n",
    "    let1 = relay.Let(var1, relay.nn.dense(data, weight, units=20), let2)\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(let1)\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 20]).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=[20, 20]).astype(\"float32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.05, rtol=0.15\n",
    "    )\n",
    "\n",
    "    # Construct expected structure\n",
    "    var1 = relay.var(\"var1\", shape=[1, 20], dtype=target_precision)\n",
    "    var2 = relay.var(\"var2\", shape=[1, 20], dtype=target_precision)\n",
    "    data = relay.cast(relay.var(\"data\", shape=[1, 20]), target_precision)\n",
    "    weight = relay.cast(relay.var(\"weight\", shape=[20, 20]), target_precision)\n",
    "    r1 = var1 + var1\n",
    "    r2 = var2 + var2\n",
    "    let2 = relay.Let(\n",
    "        var2,\n",
    "        relay.nn.dense(r1, weight, units=20, out_dtype=target_precision),\n",
    "        r2,\n",
    "    )\n",
    "    let1 = relay.Let(\n",
    "        var1,\n",
    "        relay.nn.dense(data, weight, units=20, out_dtype=target_precision),\n",
    "        let2,\n",
    "    )\n",
    "    expected_mod = tvm.IRModule.from_expr(let1)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_where_simple(target_precision):\n",
    "    data = relay.var(\"data\", shape=[1, 20])\n",
    "    weight = relay.var(\"weight\", shape=[20, 20])\n",
    "    a = relay.nn.dense(data, weight, units=20)\n",
    "    b = relay.where(data, a, a)\n",
    "    mod = tvm.IRModule.from_expr(b)\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 20]).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=[20, 20]).astype(\"float32\"),\n",
    "    }\n",
    "\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Create expected module\n",
    "    data = relay.cast(relay.var(\"data\", shape=[1, 20]), target_precision)\n",
    "    weight = relay.cast(relay.var(\"weight\", shape=[20, 20]), target_precision)\n",
    "    a = relay.nn.dense(data, weight, units=20, out_dtype=target_precision)\n",
    "    b = relay.where(data, a, a)\n",
    "    expected_mod = tvm.IRModule.from_expr(b)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_batch_matmul_simple(target_precision):\n",
    "    \"\"\"Batch matmul is a special case where we try to accumulate to fp16.\n",
    "\n",
    "    This is due to the fact heterogenous accumulation dtypes does not work\n",
    "    on all platforms at the moment.\n",
    "    \"\"\"\n",
    "    data = relay.var(\"data\", shape=[1, 1, 20])\n",
    "    weight = relay.var(\"weight\", shape=[1, 20, 20])\n",
    "    a = relay.nn.batch_matmul(data, weight)\n",
    "    mod = tvm.IRModule.from_expr(a)\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 1, 20]).astype(\"float32\"),\n",
    "        \"weight\": np.random.uniform(-1, 1, size=[1, 20, 20]).astype(\"float32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "    # Create expected module\n",
    "    data = relay.cast(relay.var(\"data\", shape=[1, 1, 20]), target_precision)\n",
    "    weight = relay.cast(relay.var(\"weight\", shape=[1, 20, 20]), target_precision)\n",
    "    a = relay.nn.batch_matmul(data, weight, out_dtype=target_precision)\n",
    "    expected_mod = tvm.IRModule.from_expr(a)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_convert_follow_node_with_integer_arguments(target_precision):\n",
    "    \"\"\"Tests the conversion of a follow op with integer arguments + constant float args.\n",
    "\n",
    "    The follow op should convert the floating point argument into fp16 as constants/vars\n",
    "    will always be converted if safe to do so.\n",
    "    \"\"\"\n",
    "\n",
    "    data = relay.var(\"data\", shape=[1, 10], dtype=\"float32\")\n",
    "\n",
    "    # We use an addition to make sure the input indices are not a var\n",
    "    # (which are always casted if safe)\n",
    "    indices = relay.var(\"indices\", shape=[1, 1], dtype=\"int32\") + relay.const(0, dtype=\"int32\")\n",
    "    take = relay.take(data, indices, axis=0)\n",
    "    mod = tvm.IRModule.from_expr(take)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 10]).astype(\"float32\"),\n",
    "        \"indices\": np.array([[0]]).astype(\"int32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Create expected module\n",
    "    data = relay.cast(relay.var(\"data\", shape=[1, 10]), target_precision)\n",
    "    take = relay.take(data, indices, axis=0)\n",
    "    expected_mod = tvm.IRModule.from_expr(take)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_clip(target_precision):\n",
    "    data = relay.var(\"data\", shape=[1, 10], dtype=\"float32\")\n",
    "    res = relay.clip(data, a_min=-128000, a_max=128000)\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(res)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 10]).astype(\"float32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Create expected module\n",
    "    if target_precision == \"bfloat16\":\n",
    "        data = relay.cast(relay.var(\"data\", shape=[1, 10]), target_precision)\n",
    "    res = relay.clip(data, a_min=-128000, a_max=128000)\n",
    "    expected_mod = tvm.IRModule.from_expr(res)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_clip_with_pre_op(target_precision):\n",
    "    data = relay.var(\"data\", shape=[1, 10], dtype=\"float32\")\n",
    "    const = relay.const(5, \"float32\")\n",
    "    res = relay.divide(data, const)\n",
    "    res = relay.clip(res, a_min=-128000, a_max=128000)\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(res)\n",
    "\n",
    "    mod_params = {\n",
    "        \"data\": np.random.uniform(-1, 1, size=[1, 10]).astype(\"float32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Create expected module\n",
    "    data = relay.cast(relay.var(\"data\", shape=[1, 10]), target_precision)\n",
    "    const = relay.cast(relay.const(5, \"float32\"), target_precision)\n",
    "    res = relay.divide(data, const)\n",
    "    if target_precision == \"float16\":\n",
    "        res = relay.cast(res, \"float32\")\n",
    "    res = relay.clip(res, a_min=-128000, a_max=128000)\n",
    "    expected_mod = tvm.IRModule.from_expr(res)\n",
    "    expected_mod = InferType()(expected_mod)\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n",
    "\n",
    "\n",
    "def test_loop(target_precision):\n",
    "    i = relay.var(\"i\", shape=(), dtype=\"int32\")\n",
    "    st = relay.var(\"st\", shape=(relay.Any(), 1), dtype=\"int32\")\n",
    "\n",
    "    def int32(val):\n",
    "        return relay.const(val, \"int32\")\n",
    "\n",
    "    def _cond(i, st):\n",
    "        return relay.op.min(relay.op.less(i, int32(10)))\n",
    "\n",
    "    def _body(i, st):\n",
    "        i_vec = relay.op.reshape(i, (1, 1))\n",
    "        ret = relay.op.concatenate([st, i_vec], axis=0)\n",
    "        return i + int32(1), ret\n",
    "\n",
    "    loop = relay.loops.while_loop(_cond, [i, st], _body)\n",
    "    start = relay.var(\"start\", shape=(), dtype=\"int32\")\n",
    "    body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))\n",
    "    func = relay.Function([start], relay.TupleGetItem(body, 1))\n",
    "    mod = tvm.IRModule()\n",
    "    mod[\"main\"] = func\n",
    "\n",
    "    mod_params = {\n",
    "        \"start\": np.random.uniform(-1, 1, size=()).astype(\"int32\"),\n",
    "    }\n",
    "    output_mod = verify_mixed_precision_output_close(\n",
    "        mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01\n",
    "    )\n",
    "\n",
    "    # Create expected module\n",
    "    expected_mod = InferType()(mod)\n",
    "    assert tvm.ir.structural_equal(expected_mod, output_mod)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
