{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# cpp-build-module"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Licensed to the Apache Software Foundation (ASF) under one\n",
    "# or more contributor license agreements.  See the NOTICE file\n",
    "# distributed with this work for additional information\n",
    "# regarding copyright ownership.  The ASF licenses this file\n",
    "# to you under the Apache License, Version 2.0 (the\n",
    "# \"License\"); you may not use this file except in compliance\n",
    "# with the License.  You may obtain a copy of the License at\n",
    "#\n",
    "#   http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing,\n",
    "# software distributed under the License is distributed on an\n",
    "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
    "# KIND, either express or implied.  See the License for the\n",
    "# specific language governing permissions and limitations\n",
    "# under the License.\n",
    "import numpy as np\n",
    "\n",
    "import tvm\n",
    "from tvm import te\n",
    "from tvm import relay, runtime\n",
    "from tvm.contrib.nvcc import have_fp16\n",
    "import tvm.testing\n",
    "\n",
    "\n",
    "def test_basic_build():\n",
    "    tgt = \"llvm\"\n",
    "    dev = tvm.cpu()\n",
    "    # func\n",
    "    a = relay.var(\"a\", dtype=\"float32\", shape=(16, 8))\n",
    "    b = relay.var(\"b\", dtype=\"float32\", shape=(8, 8))\n",
    "    c = relay.var(\"c\", dtype=\"float32\", shape=(16, 8))\n",
    "    x = relay.nn.dense(a, b)\n",
    "    y = relay.nn.relu(x)\n",
    "    z = y + c\n",
    "    func = relay.Function([a, b, c], z)\n",
    "    A = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype(\"float32\"), device=dev)\n",
    "    B = tvm.nd.array(np.random.uniform(-1, 1, (8, 8)).astype(\"float32\"), device=dev)\n",
    "    C = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype(\"float32\"), device=dev)\n",
    "    params = {\"b\": B, \"c\": C}\n",
    "    # build\n",
    "    targets = {tvm.tir.IntImm(\"int32\", dev.device_type): tgt}\n",
    "    mod = tvm.IRModule.from_expr(func)\n",
    "    func_in_mod = mod[\"main\"]\n",
    "    assert mod[\"main\"] == func_in_mod, \"cannot compare function to itself\"\n",
    "\n",
    "    lib = relay.build(mod, targets, \"llvm\", params=params)\n",
    "    assert mod[\"main\"] == func_in_mod, \"relay.build changed module in-place\"\n",
    "\n",
    "    # test\n",
    "    rt = tvm.contrib.graph_executor.GraphModule(lib[\"default\"](dev))\n",
    "    rt.set_input(\"a\", A)\n",
    "    rt.run()\n",
    "    out = rt.get_output(0)\n",
    "\n",
    "    np.testing.assert_allclose(\n",
    "        out.numpy(),\n",
    "        np.maximum(np.dot(A.numpy(), B.numpy().T), 0) + C.numpy(),\n",
    "        atol=1e-5,\n",
    "        rtol=1e-5,\n",
    "    )\n",
    "\n",
    "\n",
    "@tvm.testing.requires_cuda\n",
    "def test_fp16_build():\n",
    "    dtype = \"float16\"\n",
    "\n",
    "    dev = tvm.cuda(0)\n",
    "    if dtype == \"float16\" and not have_fp16(dev.compute_version):\n",
    "        print(\"skip because gpu does not support fp16\")\n",
    "        return\n",
    "\n",
    "    x = relay.var(\"x\", dtype=dtype, shape=(4, 4))\n",
    "    y = relay.var(\"y\", dtype=dtype, shape=(4, 4))\n",
    "    z = x + y\n",
    "    func = relay.Function([x, y], z)\n",
    "    X = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)\n",
    "    Y = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)\n",
    "    params = {\n",
    "        \"x\": X,\n",
    "        \"y\": Y,\n",
    "    }\n",
    "\n",
    "    # build\n",
    "    g_json, mmod, params = relay.build(func, \"cuda\", params=params)\n",
    "\n",
    "    # test\n",
    "    rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)\n",
    "    rt.load_params(runtime.save_param_dict(params))\n",
    "    rt.run()\n",
    "    out = rt.get_output(0)\n",
    "\n",
    "    np.testing.assert_allclose(out.numpy(), X.numpy() + Y.numpy(), atol=1e-5, rtol=1e-5)\n",
    "\n",
    "\n",
    "@tvm.testing.requires_llvm\n",
    "def test_bf16_build():\n",
    "    data = relay.var(\"data\", shape=(1, 3, 224, 224), dtype=\"float32\")\n",
    "    weight = relay.var(\"weight\", shape=(64, 3, 7, 7), dtype=\"float32\")\n",
    "    bn_gamma = relay.var(\"gamma\", shape=(64,), dtype=\"float32\")\n",
    "    bn_beta = relay.var(\"beta\", shape=(64,), dtype=\"float32\")\n",
    "    bn_mean = relay.var(\"mean\", shape=(64,), dtype=\"float32\")\n",
    "    bn_var = relay.var(\"var\", shape=(64,), dtype=\"float32\")\n",
    "    params = {\n",
    "        \"weight\": np.random.uniform(-1, 1, size=(64, 3, 7, 7)).astype(\"float32\"),\n",
    "        \"gamma\": np.random.uniform(-1, 1, size=(64,)).astype(\"float32\"),\n",
    "        \"beta\": np.random.uniform(-1, 1, size=(64,)).astype(\"float32\"),\n",
    "        \"mean\": np.random.uniform(-1, 1, size=(64,)).astype(\"float32\"),\n",
    "        \"var\": np.random.uniform(-1, 1, size=(64,)).astype(\"float32\"),\n",
    "    }\n",
    "    conv_bf16 = relay.nn.conv2d(\n",
    "        relay.cast(data, \"bfloat16\"),\n",
    "        relay.cast(weight, \"bfloat16\"),\n",
    "        strides=(2, 2),\n",
    "        padding=(3, 3, 3, 3),\n",
    "        channels=64,\n",
    "        kernel_size=(7, 7),\n",
    "        out_dtype=\"bfloat16\",\n",
    "    )\n",
    "    bn_bf16 = relay.nn.batch_norm(\n",
    "        conv_bf16,\n",
    "        relay.cast(bn_gamma, \"bfloat16\"),\n",
    "        relay.cast(bn_beta, \"bfloat16\"),\n",
    "        relay.cast(bn_mean, \"bfloat16\"),\n",
    "        relay.cast(bn_var, \"bfloat16\"),\n",
    "    )\n",
    "    relu_bf16 = relay.nn.relu(bn_bf16[0])\n",
    "    maxpool_bf16 = relay.nn.max_pool2d(relu_bf16, pool_size=(2, 2), strides=(2, 2))\n",
    "    avgpool_bf16 = relay.nn.avg_pool2d(maxpool_bf16, pool_size=(2, 2), strides=(2, 2))\n",
    "    flattened_bf16 = relay.nn.batch_flatten(avgpool_bf16)\n",
    "    softmax_bf16 = relay.nn.softmax(flattened_bf16)\n",
    "    mod_bf16 = tvm.IRModule.from_expr(softmax_bf16)\n",
    "    with tvm.transform.PassContext(opt_level=3):\n",
    "        relay.build(mod_bf16, target=\"llvm\", params=params)\n",
    "\n",
    "\n",
    "@tvm.testing.parametrize_targets(\"llvm\", \"cuda\")\n",
    "def test_fp16_conversion(target, dev):\n",
    "    if target == \"cuda\" and not have_fp16(dev.compute_version):\n",
    "        print(\"skip because gpu does not support fp16\")\n",
    "        return\n",
    "\n",
    "    n = 10\n",
    "\n",
    "    for (src, dst) in [(\"float32\", \"float16\"), (\"float16\", \"float32\")]:\n",
    "        x = relay.var(\"x\", relay.TensorType((n,), src))\n",
    "        y = x.astype(dst)\n",
    "        func = relay.Function([x], y)\n",
    "\n",
    "        # init input\n",
    "        X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)\n",
    "\n",
    "        # build\n",
    "        with tvm.transform.PassContext(opt_level=1):\n",
    "            g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), target)\n",
    "\n",
    "        # test\n",
    "        rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)\n",
    "        rt.set_input(\"x\", X)\n",
    "        rt.run()\n",
    "        out = rt.get_output(0)\n",
    "\n",
    "        np.testing.assert_allclose(out.numpy(), X.numpy().astype(dst), atol=1e-5, rtol=1e-5)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    test_basic_build()\n",
    "    test_fp16_build()\n",
    "    test_fp16_conversion()\n",
    "    test_bf16_build()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
