{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "86b403bf-fadd-4d14-81a1-d805eed113ae",
   "metadata": {},
   "source": [
    "# Fast and Efficient Code Generation with BitBLAS Roller Component\n",
    "\n",
    "Reimplemented and improved from **OSDI 22'Roller**: https://www.usenix.org/system/files/osdi22-zhu.pdf\n",
    "\n",
    "Core Code: https://github.com/microsoft/BitBLAS/blob/main/bitblas/base/roller\n",
    "\n",
    "Only takes seconds to optimize high performance kernels via hardware-aware white box search space recommendation.\n",
    "\n",
    "<div style=\"text-align: center;\">\n",
    "    <img src=\"./img/roller.png\" alt=\"BitBLAS Roller\" style=\"width: 30%;\">\n",
    "</div>\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8a8a16cc-64c2-4568-bb1f-9ef391a6ebd7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize Import\n",
    "import bitblas\n",
    "from bitblas import tvm as tvm"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ebb1b8b-9348-4ac5-8a5b-32658e414012",
   "metadata": {},
   "source": [
    "## 1. Get start with an elememt-wise add\n",
    "tensor expression: B = A + 1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e61b868a-0e69-4e46-a517-7b91a043a63f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm import te\n",
    "\n",
    "A = te.placeholder((1024, 1024), name=\"A\", dtype=\"float16\")\n",
    "\n",
    "def fcompute(i, j):\n",
    "    return A[i, j] + 1.0\n",
    "\n",
    "B = te.compute((1024, 1024), fcompute, name=\"B\")\n",
    "\n",
    "args = [A, B]\n",
    "func = te.create_prim_func(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "289e13e2-6eca-4b11-a788-a9ac1a7a8acf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def main(A: T.Buffer((1024, 1024), \"float16\"), B: T.Buffer((1024, 1024), \"float32\")):\n",
      "    T.func_attr({\"tir.noalias\": T.bool(True)})\n",
      "    # with T.block(\"root\"):\n",
      "    for i, j in T.grid(1024, 1024):\n",
      "        with T.block(\"B\"):\n",
      "            v_i, v_j = T.axis.remap(\"SS\", [i, j])\n",
      "            T.reads(A[v_i, v_j])\n",
      "            T.writes(B[v_i, v_j])\n",
      "            B[v_i, v_j] = T.Cast(\"float32\", A[v_i, v_j]) + T.float32(1)\n"
     ]
    }
   ],
   "source": [
    "print(func) # TIR Script Function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "ca3448f5-a796-4089-981b-eb4ff9f17a48",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "target='nvidia/geforce-rtx-4090'\n"
     ]
    }
   ],
   "source": [
    "# import fast tunning related toolkits\n",
    "from bitblas.base.roller.policy import DefaultPolicy\n",
    "from bitblas.base.arch import CUDA\n",
    "from bitblas.base.utils import apply_and_build\n",
    "\n",
    "target = bitblas.auto_detect_nvidia_target()\n",
    "print(f\"{target=}\")\n",
    "arch = CUDA(target)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "17230883-f5cb-40d4-8ed2-f069a5c07d2a",
   "metadata": {},
   "source": [
    "### 2. Example Arch: CUDA\n",
    "Codebase: https://github.com/microsoft/BitBLAS/blob/main/bitblas/base/arch/cuda.py\n",
    "\n",
    "```python\n",
    "class CUDA(TileDevice):\n",
    "\n",
    "    def __init__(self, target: Union[Target, str]):\n",
    "        if isinstance(target, str):\n",
    "            target = tvm.target.Target(target)\n",
    "        self.target = target\n",
    "        self.sm_version = check_sm_version(self.target.arch)\n",
    "        device = tvm.runtime.cuda(0)\n",
    "        if not device.exist:\n",
    "            raise RuntimeError(\"Cannot find cuda device 0.\")\n",
    "        self.device: tvm.runtime.Device = device\n",
    "        self.platform: str = \"CUDA\"\n",
    "        self.smem_cap = device.max_shared_memory_per_block\n",
    "        self.compute_max_core = device.multi_processor_count\n",
    "        self.warp_size = device.warp_size\n",
    "        self.compute_capability = device.compute_version.replace(\".\", \"\")\n",
    "        self.reg_cap: int = 65536\n",
    "        self.max_smem_usage: int = 2 * self.smem_cap\n",
    "        self.sm_partition: int = 4\n",
    "        self.l2_cache_size_bytes: int = target.l2_cache_size_bytes\n",
    "        # the number of transaction size in bytes\n",
    "        self.transaction_size: List[int] = [32, 128]  # in bytes\n",
    "        # bandwidth in MB/s, will be used for recommend basic tile size\n",
    "        # TODO(lei): find some way to get the real bandwidth\n",
    "        # However, the ratio of bandwidth between different devices can\n",
    "        # be similar. The bandwidth can work for another devices as well.\n",
    "        self.bandwidth: List[int] = [750, 12080]\n",
    "        # get the available tensor instructions during runtime to avoid\n",
    "        # the dependency of the tensor intrinsics registration\n",
    "        self.available_tensor_instructions: List[TensorInstruction] = None\n",
    "\n",
    "    def get_avaliable_tensorintrin_shapes(self):\n",
    "        from tvm.tir.tensor_intrin.cuda import get_wmma_intrin_group, get_mma_intrin_group\n",
    "\n",
    "        self.available_tensor_instructions = (\n",
    "            TensorInstruction(\"mma\", get_mma_intrin_group, [16, 16]),\n",
    "            TensorInstruction(\"wmma\", get_wmma_intrin_group, [16, 16]),\n",
    "        )\n",
    "        return [t.shape for t in self.available_tensor_instructions]\n",
    "\n",
    "    def __repr__(self):\n",
    "        return f\"CUDA({self.target})\"\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "513fa032-0d29-482f-ba37-96823c826391",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'block': [128, 128], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [64, 128], 'thread': [8, 16], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [128, 64], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [128, 256], 'thread': [8, 16], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [256, 128], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [64, 64], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [32, 128], 'thread': [8, 16], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [128, 32], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [64, 256], 'thread': [8, 16], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [256, 64], 'thread': [16, 8], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [32, 64], 'thread': [8, 16], 'rstep': [128], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [64, 32], 'thread': [16, 8], 'rstep': [128], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [32, 32], 'thread': [16, 8], 'rstep': [128], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [16, 128], 'thread': [4, 32], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [128, 16], 'thread': [32, 4], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [32, 256], 'thread': [4, 32], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [256, 32], 'thread': [32, 4], 'rstep': [64], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [16, 64], 'thread': [8, 16], 'rstep': [128], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [64, 16], 'thread': [16, 8], 'rstep': [128], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n",
      "{'block': [16, 32], 'thread': [8, 16], 'rstep': [256], 'step': [1, 2], 'vectorize': {'A': 8, 'B': 8}}\n"
     ]
    }
   ],
   "source": [
    "policy = DefaultPolicy(func=func, arch=arch)\n",
    "configs = policy.emit_config(topk=20)\n",
    "\n",
    "for config in configs:\n",
    "    print(config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9ccebcf1-bdca-4734-a8e6-0c6b9ef5332f",
   "metadata": {},
   "outputs": [],
   "source": [
    "bitblas.set_log_level(\"Debug\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d72a8c4f-4947-473d-a537-53b0aafd605e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [2, 1024], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [4, 512], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [8, 256], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [16, 128], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [32, 64], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [1, 1024], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [2, 512], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [4, 256], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [8, 128], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [16, 64], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [1, 512], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [2, 256], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [4, 128], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [8, 64], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [1, 256], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [2, 128], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [4, 64], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [1, 128], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [2, 64], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:57:34 [BitBLAS:DEBUG]: Apply config {'block': [1, 64], 'thread': [1, 64], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [2, 1024], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [4, 512], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [8, 256], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [16, 128], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [32, 64], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [1, 1024], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [2, 512], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.006 ms\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Evaluation with config {'block': [4, 256], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:58:01 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [8, 128], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [16, 64], 'thread': [16, 8], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [1, 512], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [2, 256], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [4, 128], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [8, 64], 'thread': [8, 16], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [1, 256], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [2, 128], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.008 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [4, 64], 'thread': [4, 32], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.011 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [1, 128], 'thread': [1, 128], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.007 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [2, 64], 'thread': [2, 64], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Evaluation with config {'block': [1, 64], 'thread': [1, 64], 'rstep': []}\n",
      "2024-10-24 12:58:02 [BitBLAS:INFO]: Time cost of this config: 0.005 ms\n"
     ]
    }
   ],
   "source": [
    "cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "1ad2c473-d3c3-4ee1-9928-2ed1fc44b03b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# from tvm.script import ir as I\n",
      "# from tvm.script import tir as T\n",
      "\n",
      "@I.ir_module\n",
      "class Module:\n",
      "    @T.prim_func\n",
      "    def main(A: T.Buffer((1024, 1024), \"float16\"), B: T.Buffer((1024, 1024), \"float32\")):\n",
      "        T.func_attr({\"tir.noalias\": T.bool(True)})\n",
      "        # with T.block(\"root\"):\n",
      "        for ax0_0_ax1_0_fused in T.thread_binding(2048, thread=\"blockIdx.x\"):\n",
      "            for ax1_1_0 in T.thread_binding(4, thread=\"vthread.x\"):\n",
      "                for ax0_1_0 in T.thread_binding(1, thread=\"vthread.y\"):\n",
      "                    for ax0_1_1_0_ax1_1_1_0_fused in T.thread_binding(128, thread=\"threadIdx.x\"):\n",
      "                        for ax0_1_1_1, ax1_1_1_1 in T.grid(1, 1):\n",
      "                            with T.block(\"B\"):\n",
      "                                v0 = T.axis.spatial(1024, ax0_0_ax1_0_fused // 2 + ax0_1_0 + ax0_1_1_1)\n",
      "                                v1 = T.axis.spatial(1024, ax0_0_ax1_0_fused % 2 * 512 + ax1_1_0 * 128 + ax0_1_1_0_ax1_1_1_0_fused + ax1_1_1_1)\n",
      "                                T.reads(A[v0, v1])\n",
      "                                T.writes(B[v0, v1])\n",
      "                                B[v0, v1] = T.Cast(\"float32\", A[v0, v1]) + T.float32(1)\n"
     ]
    }
   ],
   "source": [
    "# get the scheduled ir\n",
    "print(best.sch.mod)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "0b596f7d-29ae-4db9-a207-287638a97053",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)\n",
      "\n",
      "typedef unsigned short uint16_t;\n",
      "typedef unsigned char uint8_t;\n",
      "typedef signed char int8_t;\n",
      "typedef int int32_t;\n",
      "typedef unsigned long long uint64_t;\n",
      "typedef unsigned int uint32_t;\n",
      "\n",
      "#define TVM_FORCE_INLINE inline __attribute__((always_inline))\n",
      "#define TVM_XINLINE TVM_FORCE_INLINE __device__ __host__\n",
      "#define TVM_ALIGNED(x) __attribute__ ((aligned(x)))\n",
      "#define TVM_HALF_OPERATOR(RTYPE, OP)                              \\\n",
      "  TVM_XINLINE RTYPE operator OP (half a, half b) {                \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE RTYPE operator OP (half a, T b) {                   \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE RTYPE operator OP (T a, half b) {                   \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }\n",
      "\n",
      "#define TVM_HALF_ASSIGNOP(AOP, OP)                                \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE half operator AOP (const T& a) {                    \\\n",
      "    return *this = half(float(*this) OP float(a));                \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE half operator AOP (const volatile T& a) volatile {  \\\n",
      "    return *this = half(float(*this) OP float(a));                \\\n",
      "  }\n",
      "\n",
      "class TVM_ALIGNED(2) half {\n",
      " public:\n",
      "  uint16_t half_;\n",
      "\n",
      "  static TVM_XINLINE half Binary(uint16_t value) {\n",
      "    half res;\n",
      "    res.half_ = value;\n",
      "    return res;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half() {}\n",
      "\n",
      "  TVM_XINLINE half(const float& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const double& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const int8_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint8_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const int32_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint32_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const long long& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint64_t& value) { constructor(value); }\n",
      "\n",
      "  TVM_XINLINE operator float() const {                          \\\n",
      "    return float(half2float(half_));                            \\\n",
      "  }                                                             \\\n",
      "  TVM_XINLINE operator float() const volatile {                 \\\n",
      "    return float(half2float(half_));                            \\\n",
      "  }\n",
      "\n",
      "\n",
      "  TVM_HALF_ASSIGNOP(+=, +)\n",
      "  TVM_HALF_ASSIGNOP(-=, -)\n",
      "  TVM_HALF_ASSIGNOP(*=, *)\n",
      "  TVM_HALF_ASSIGNOP(/=, /)\n",
      "\n",
      "  TVM_XINLINE half operator+() {\n",
      "    return *this;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator-() {\n",
      "    return half(-float(*this));\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator=(const half& a) {\n",
      "    half_ = a.half_;\n",
      "    return a;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE half operator=(const T& a) {\n",
      "    return *this = half(a);\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator=(const half& a) volatile {\n",
      "    half_ = a.half_;\n",
      "    return a;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE half operator=(const T& a) volatile {\n",
      "    return *this = half(a);\n",
      "  }\n",
      "\n",
      " private:\n",
      "  union Bits {\n",
      "    float f;\n",
      "    int32_t si;\n",
      "    uint32_t ui;\n",
      "  };\n",
      "\n",
      "  static int const fp16FractionBits = 10;\n",
      "  static int const fp32FractionBits = 23;\n",
      "  static int32_t const fp32FractionMask = ~(~0u << fp32FractionBits);   // == 0x7fffff\n",
      "  static int32_t const fp32HiddenBit = 1 << fp32FractionBits;   // == 0x800000\n",
      "  static int const shift = fp32FractionBits - fp16FractionBits;   // == 13\n",
      "  static int const shiftSign = 16;\n",
      "  static int32_t const expAdjust = 127 - 15;   // exp32-127 = exp16-15, so exp16 = exp32 - (127-15)\n",
      "\n",
      "  static int32_t const infN = 0x7F800000;   // flt32 infinity\n",
      "  static int32_t const maxN = 0x477FFFFF;   // max flt32 that's a flt16 normal after >> by shift\n",
      "  static int32_t const minN = 0x38800000;   // min flt16 normal as a flt32\n",
      "  static int32_t const maxZ = 0x33000000;   // max fp32 number that's still rounded to zero in fp16\n",
      "  static int32_t const signN = 0x80000000;  // flt32 sign bit\n",
      "\n",
      "  static int32_t const infC = infN >> shift;\n",
      "  static int32_t const nanN = (infC + 1) << shift;   // minimum flt16 nan as a flt32\n",
      "  static int32_t const maxC = maxN >> shift;\n",
      "  static int32_t const minC = minN >> shift;\n",
      "  static int32_t const signC = signN >> shiftSign;  // flt16 sign bit\n",
      "\n",
      "  static int32_t const mulN = 0x52000000;  // (1 << 23) / minN\n",
      "  static int32_t const mulC = 0x33800000;  // minN / (1 << (23 - shift))\n",
      "\n",
      "  static int32_t const subC = 0x003FF;  // max flt32 subnormal down shifted\n",
      "  static int32_t const norC = 0x00400;  // min flt32 normal down shifted\n",
      "\n",
      "  static int32_t const maxD = infC - maxC - 1;\n",
      "  static int32_t const minD = minC - subC - 1;\n",
      "\n",
      "  TVM_XINLINE uint16_t float2half(const float& value) const {\n",
      "    Bits v;\n",
      "    v.f = value;\n",
      "    uint32_t sign = v.si & signN;    // grab sign bit\n",
      "    v.si ^= sign;                    // clear sign bit from v\n",
      "    sign >>= shiftSign;              // logical shift sign to fp16 position\n",
      "\n",
      "    if (v.si <= maxZ) {\n",
      "      // Handle eventual zeros here to ensure\n",
      "      // vshift will not exceed 32 below.\n",
      "      v.ui = 0;\n",
      "    } else if (v.si < minN) {\n",
      "      // Handle denorms\n",
      "      uint32_t exp32 = v.ui >> fp32FractionBits;\n",
      "      int32_t exp16 = exp32 - expAdjust;\n",
      "      // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.\n",
      "      // Smaller (so negative) exp16 values should result in greater right shifts.\n",
      "      uint32_t vshift = 1 - exp16;\n",
      "      uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);\n",
      "      v.ui = significand >> vshift;\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;\n",
      "    } else if (v.si <= maxN) {\n",
      "      // Handle norms\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;\n",
      "      v.ui -= expAdjust << fp32FractionBits;\n",
      "    } else if (v.si <= infN) {\n",
      "      v.si = infN;\n",
      "    } else if (v.si < nanN) {\n",
      "      v.si = nanN;\n",
      "    }\n",
      "\n",
      "    v.ui >>= shift;\n",
      "    return sign | (v.ui & 0x7fff);\n",
      "  }\n",
      "\n",
      "  // Same as above routine, except for addition of volatile keyword\n",
      "  TVM_XINLINE uint16_t float2half(\n",
      "    const volatile float& value) const volatile {\n",
      "    Bits v;\n",
      "    v.f = value;\n",
      "    uint32_t sign = v.si & signN;    // grab sign bit\n",
      "    v.si ^= sign;                    // clear sign bit from v\n",
      "    sign >>= shiftSign;              // logical shift sign to fp16 position\n",
      "\n",
      "    if (v.si <= maxZ) {\n",
      "      // Handle eventual zeros here to ensure\n",
      "      // vshift will not exceed 32 below.\n",
      "      v.ui = 0;\n",
      "    } else if (v.si < minN) {\n",
      "      // Handle denorms\n",
      "      uint32_t exp32 = v.ui >> fp32FractionBits;\n",
      "      int32_t exp16 = exp32 - expAdjust;\n",
      "      // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.\n",
      "      // Smaller (so negative) exp16 values should result in greater right shifts.\n",
      "      uint32_t vshift = 1 - exp16;\n",
      "      uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);\n",
      "      v.ui = significand >> vshift;\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;\n",
      "    } else if (v.si <= maxN) {\n",
      "      // Handle norms\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;\n",
      "      v.ui -= expAdjust << fp32FractionBits;\n",
      "    } else if (v.si <= infN) {\n",
      "      v.si = infN;\n",
      "    } else if (v.si < nanN) {\n",
      "      v.si = nanN;\n",
      "    }\n",
      "\n",
      "    v.ui >>= shift;\n",
      "    return sign | (v.ui & 0x7fff);\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE float half2float(const uint16_t& value) const {\n",
      "    Bits v;\n",
      "    v.ui = value;\n",
      "    int32_t sign = v.si & signC;\n",
      "    v.si ^= sign;\n",
      "    sign <<= shiftSign;\n",
      "    v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);\n",
      "    v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);\n",
      "    Bits s;\n",
      "    s.si = mulC;\n",
      "    s.f *= v.si;\n",
      "    int32_t mask = -(norC > v.si);\n",
      "    v.si <<= shift;\n",
      "    v.si ^= (s.si ^ v.si) & mask;\n",
      "    v.si |= sign;\n",
      "    return v.f;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE float half2float(\n",
      "    const volatile uint16_t& value) const volatile {\n",
      "    Bits v;\n",
      "    v.ui = value;\n",
      "    int32_t sign = v.si & signC;\n",
      "    v.si ^= sign;\n",
      "    sign <<= shiftSign;\n",
      "    v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);\n",
      "    v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);\n",
      "    Bits s;\n",
      "    s.si = mulC;\n",
      "    s.f *= v.si;\n",
      "    int32_t mask = -(norC > v.si);\n",
      "    v.si <<= shift;\n",
      "    v.si ^= (s.si ^ v.si) & mask;\n",
      "    v.si |= sign;\n",
      "    return v.f;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE void constructor(const T& value) {\n",
      "    half_ = float2half(float(value));\n",
      "  }\n",
      "};\n",
      "\n",
      "TVM_HALF_OPERATOR(half, +)\n",
      "TVM_HALF_OPERATOR(half, -)\n",
      "TVM_HALF_OPERATOR(half, *)\n",
      "TVM_HALF_OPERATOR(half, /)\n",
      "TVM_HALF_OPERATOR(bool, >)\n",
      "TVM_HALF_OPERATOR(bool, <)\n",
      "TVM_HALF_OPERATOR(bool, >=)\n",
      "TVM_HALF_OPERATOR(bool, <=)\n",
      "\n",
      "TVM_XINLINE half __float2half_rn(const float a) {\n",
      "  return half(a);\n",
      "}\n",
      "#else\n",
      "#include <cuda_fp16.h>\n",
      "__device__ half max(half a, half b)\n",
      "{\n",
      "  return __hgt(__half(a), __half(b)) ? a : b;\n",
      "}\n",
      "__device__ half min(half a, half b)\n",
      "{\n",
      "  return __hlt(__half(a), __half(b)) ? a : b;\n",
      "}\n",
      "#endif\n",
      "\n",
      "\n",
      "// Pack two half values.\n",
      "static inline __device__ __host__ unsigned\n",
      "__pack_half2(const half x, const half y) {\n",
      "  unsigned v0 = *((unsigned short *)&x);\n",
      "  unsigned v1 = *((unsigned short *)&y);\n",
      "  return (v1 << 16) | v0;\n",
      "}\n",
      "\n",
      "#define CUDA_UNSUPPORTED_HALF_MATH_BINARY(HALF_MATH_NAME, FP32_MATH_NAME) \\\n",
      "static inline __device__ __host__ half HALF_MATH_NAME(half x, half y) {   \\\n",
      "  float tmp_x = __half2float(x);                                          \\\n",
      "  float tmp_y = __half2float(y);                                          \\\n",
      "  float result = FP32_MATH_NAME(tmp_x, tmp_y);                            \\\n",
      "  return __float2half(result);                                            \\\n",
      "}\n",
      "\n",
      "#define CUDA_UNSUPPORTED_HALF_MATH_UNARY(HALF_MATH_NAME, FP32_MATH_NAME) \\\n",
      "static inline __device__ __host__ half HALF_MATH_NAME(half x) {          \\\n",
      "  float tmp_x = __half2float(x);                                         \\\n",
      "  float result = FP32_MATH_NAME(tmp_x);                                  \\\n",
      "  return __float2half(result);                                           \\\n",
      "}\n",
      "\n",
      "// Some fp16 math functions are not supported in cuda_fp16.h,\n",
      "// so we define them here to make sure the generated CUDA code\n",
      "// is valid.\n",
      "#if defined(__CUDA_ARCH__)\n",
      "#if (__CUDA_ARCH__ >= 530)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_BINARY(hpow, powf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(htanh, tanhf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(htan, tanf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(hatan, atanf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(herf, erf)\n",
      "#else\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(hexp, exp)\n",
      "#endif\n",
      "#endif\n",
      "\n",
      "#undef CUDA_UNSUPPORTED_HALF_MATH_BINARY\n",
      "#undef CUDA_UNSUPPORTED_HALF_MATH_UNARY\n",
      "\n",
      "struct __align__(8) half4 {\n",
      "  __half x, y, z, w;\n",
      "  __host__ __device__ half4() : x(__half(0)), y(__half(0)), z(__half(0)), w(__half(0)) {}\n",
      "  __host__ __device__ half4(__half x, __half y, __half z, __half w) : x(x), y(y), z(z), w(w) {}\n",
      "\n",
      "};\n",
      "__host__ __device__ half4 make_half4(__half x, __half y, __half z, __half w) {\n",
      "    return half4(x, y, z, w);\n",
      "}\n",
      "\n",
      "#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \\\n",
      "     (__CUDACC_VER_MAJOR__ > 11))\n",
      "#define TVM_ENABLE_L2_PREFETCH 1\n",
      "#else\n",
      "#define TVM_ENABLE_L2_PREFETCH 0\n",
      "#endif\n",
      "\n",
      "#ifdef _WIN32\n",
      "  using uint = unsigned int;\n",
      "  using uchar = unsigned char;\n",
      "  using ushort = unsigned short;\n",
      "  using int64_t = long long;\n",
      "  using uint64_t = unsigned long long;\n",
      "#else\n",
      "  #define uint unsigned int\n",
      "  #define uchar unsigned char\n",
      "  #define ushort unsigned short\n",
      "  #define int64_t long long\n",
      "  #define uint64_t unsigned long long\n",
      "#endif\n",
      "\n",
      "#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800) \n",
      "#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 1\n",
      "#else\n",
      "#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 0\n",
      "#endif\n",
      "extern \"C\" __global__ void __launch_bounds__(128) main_kernel(half* __restrict__ A, float* __restrict__ B);\n",
      "extern \"C\" __global__ void __launch_bounds__(128) main_kernel(half* __restrict__ A, float* __restrict__ B) {\n",
      "  B[((((int)blockIdx.x) * 512) + ((int)threadIdx.x))] = (((float)A[((((int)blockIdx.x) * 512) + ((int)threadIdx.x))]) + 1.000000e+00f);\n",
      "  B[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 128)] = (((float)A[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 128)]) + 1.000000e+00f);\n",
      "  B[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 256)] = (((float)A[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 256)]) + 1.000000e+00f);\n",
      "  B[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 384)] = (((float)A[(((((int)blockIdx.x) * 512) + ((int)threadIdx.x)) + 384)]) + 1.000000e+00f);\n",
      "}\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# get generated cuda source\n",
    "print(best.code)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e59a8f78-9a32-4aa6-9d04-e04ee08f61f6",
   "metadata": {},
   "source": [
    "## 2. Gemm tuning with Tensor Core \n",
    "Tensor Expression: $C[m, n] = A[m, k] * B[n, k]$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "0a0411df-bafa-4dd5-b61d-85cf79911586",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# from tvm.script import tir as T\n",
      "\n",
      "@T.prim_func\n",
      "def main(A: T.Buffer((16384, 16384), \"float16\"), B: T.Buffer((16384, 16384), \"float16\"), C: T.Buffer((16384, 16384), \"float16\")):\n",
      "    T.func_attr({\"tir.noalias\": T.bool(True)})\n",
      "    # with T.block(\"root\"):\n",
      "    for i, j, k in T.grid(16384, 16384, 16384):\n",
      "        with T.block(\"C\"):\n",
      "            v_i, v_j, v_k = T.axis.remap(\"SSR\", [i, j, k])\n",
      "            T.reads(A[v_i, v_k], B[v_j, v_k])\n",
      "            T.writes(C[v_i, v_j])\n",
      "            with T.init():\n",
      "                C[v_i, v_j] = T.float16(0)\n",
      "            C[v_i, v_j] = C[v_i, v_j] + A[v_i, v_k] * B[v_j, v_k]\n"
     ]
    }
   ],
   "source": [
    "M = N = K = 16384\n",
    "# Describe the matrix multiplication in TE\n",
    "A = te.placeholder((M, K), name=\"A\", dtype=\"float16\")\n",
    "B = te.placeholder((N, K), name=\"B\", dtype=\"float16\")\n",
    "\n",
    "k = te.reduce_axis((0, K), name=\"k\")\n",
    "C = te.compute(\n",
    "    (M, N),\n",
    "    lambda i, j: te.sum(A[i, k].astype(\"float16\") * B[j, k].astype(\"float16\"), axis=k),\n",
    "    name=\"C\",\n",
    ")\n",
    "args = [A, B, C]\n",
    "func = te.create_prim_func(args)\n",
    "print(func)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "a1910bda-7a58-47ac-a76b-3902366fdd62",
   "metadata": {},
   "outputs": [],
   "source": [
    "from bitblas.base.roller.policy import TensorCorePolicy\n",
    "from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "7aa61ed1-22a4-424b-ac19-a1f67e0b2bb5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'block': [128, 128], 'warp': [64, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [64, 256], 'warp': [32, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [256, 64], 'warp': [128, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [64, 128], 'warp': [32, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [128, 64], 'warp': [64, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [128, 256], 'warp': [64, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [256, 128], 'warp': [128, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [64, 64], 'warp': [32, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [256, 256], 'warp': [128, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [32, 256], 'warp': [16, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [256, 32], 'warp': [128, 16], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [64, 512], 'warp': [32, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [512, 64], 'warp': [256, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [32, 128], 'warp': [16, 64], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [128, 32], 'warp': [64, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [128, 512], 'warp': [64, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [512, 128], 'warp': [256, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [32, 64], 'warp': [16, 32], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [64, 32], 'warp': [32, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "{'block': [32, 32], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n"
     ]
    }
   ],
   "source": [
    "tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)\n",
    "policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)\n",
    "configs = policy.emit_config(topk=20)\n",
    "\n",
    "for config in configs:\n",
    "    print(config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "4b26647f-6d65-496b-a442-8463fdacf24c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [128, 128], 'warp': [64, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [64, 256], 'warp': [32, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [256, 64], 'warp': [128, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [64, 128], 'warp': [32, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [128, 64], 'warp': [64, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [128, 256], 'warp': [64, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [256, 128], 'warp': [128, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [64, 64], 'warp': [32, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [256, 256], 'warp': [128, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [32, 256], 'warp': [16, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [256, 32], 'warp': [128, 16], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [64, 512], 'warp': [32, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [512, 64], 'warp': [256, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [32, 128], 'warp': [16, 64], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [128, 32], 'warp': [64, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [128, 512], 'warp': [64, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [512, 128], 'warp': [256, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [32, 64], 'warp': [16, 32], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [64, 32], 'warp': [32, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:31:32 [BitBLAS:DEBUG]: Apply config {'block': [32, 32], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:32:29 [BitBLAS:INFO]: Evaluation with config {'block': [128, 128], 'warp': [64, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:32:29 [BitBLAS:INFO]: Time cost of this config: 32.161 ms\n",
      "2024-10-24 13:32:39 [BitBLAS:INFO]: Evaluation with config {'block': [64, 256], 'warp': [32, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:32:39 [BitBLAS:INFO]: Time cost of this config: 43.725 ms\n",
      "2024-10-24 13:32:48 [BitBLAS:INFO]: Evaluation with config {'block': [256, 64], 'warp': [128, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:32:48 [BitBLAS:INFO]: Time cost of this config: 43.864 ms\n",
      "2024-10-24 13:32:57 [BitBLAS:INFO]: Evaluation with config {'block': [64, 128], 'warp': [32, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:32:57 [BitBLAS:INFO]: Time cost of this config: 46.601 ms\n",
      "2024-10-24 13:33:06 [BitBLAS:INFO]: Evaluation with config {'block': [128, 64], 'warp': [64, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:06 [BitBLAS:INFO]: Time cost of this config: 60.877 ms\n",
      "2024-10-24 13:33:16 [BitBLAS:INFO]: Evaluation with config {'block': [128, 256], 'warp': [64, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:16 [BitBLAS:INFO]: Time cost of this config: 30.349 ms\n",
      "2024-10-24 13:33:25 [BitBLAS:INFO]: Evaluation with config {'block': [256, 128], 'warp': [128, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:25 [BitBLAS:INFO]: Time cost of this config: 80.994 ms\n",
      "2024-10-24 13:33:34 [BitBLAS:INFO]: Evaluation with config {'block': [64, 64], 'warp': [32, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:34 [BitBLAS:INFO]: Time cost of this config: 44.942 ms\n",
      "2024-10-24 13:33:44 [BitBLAS:INFO]: Evaluation with config {'block': [256, 256], 'warp': [128, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:44 [BitBLAS:INFO]: Time cost of this config: 69.066 ms\n",
      "2024-10-24 13:33:53 [BitBLAS:INFO]: Evaluation with config {'block': [32, 256], 'warp': [16, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:33:53 [BitBLAS:INFO]: Time cost of this config: 30.514 ms\n",
      "2024-10-24 13:34:03 [BitBLAS:INFO]: Evaluation with config {'block': [256, 32], 'warp': [128, 16], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:03 [BitBLAS:INFO]: Time cost of this config: 70.178 ms\n",
      "2024-10-24 13:34:12 [BitBLAS:INFO]: Evaluation with config {'block': [64, 512], 'warp': [32, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:12 [BitBLAS:INFO]: Time cost of this config: 48.920 ms\n",
      "2024-10-24 13:34:21 [BitBLAS:INFO]: Evaluation with config {'block': [512, 64], 'warp': [256, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:21 [BitBLAS:INFO]: Time cost of this config: 37.340 ms\n",
      "2024-10-24 13:34:31 [BitBLAS:INFO]: Evaluation with config {'block': [32, 128], 'warp': [16, 64], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:31 [BitBLAS:INFO]: Time cost of this config: 73.770 ms\n",
      "2024-10-24 13:34:40 [BitBLAS:INFO]: Evaluation with config {'block': [128, 32], 'warp': [64, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:40 [BitBLAS:INFO]: Time cost of this config: 74.823 ms\n",
      "2024-10-24 13:34:50 [BitBLAS:INFO]: Evaluation with config {'block': [128, 512], 'warp': [64, 256], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:50 [BitBLAS:INFO]: Time cost of this config: 78.612 ms\n",
      "2024-10-24 13:34:59 [BitBLAS:INFO]: Evaluation with config {'block': [512, 128], 'warp': [256, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:34:59 [BitBLAS:INFO]: Time cost of this config: 71.001 ms\n",
      "2024-10-24 13:35:08 [BitBLAS:INFO]: Evaluation with config {'block': [32, 64], 'warp': [16, 32], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:35:08 [BitBLAS:INFO]: Time cost of this config: 108.881 ms\n",
      "2024-10-24 13:35:18 [BitBLAS:INFO]: Evaluation with config {'block': [64, 32], 'warp': [32, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:35:18 [BitBLAS:INFO]: Time cost of this config: 84.179 ms\n",
      "2024-10-24 13:35:27 [BitBLAS:INFO]: Evaluation with config {'block': [32, 32], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}\n",
      "2024-10-24 13:35:27 [BitBLAS:INFO]: Time cost of this config: 83.798 ms\n"
     ]
    }
   ],
   "source": [
    "cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "b6a83a8f-359f-4777-8505-67639a91f687",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "__device__ __inline__ dim3 rasterization2DColumn(const int panel_width) {\n",
      "    const auto baseBlockIdx = blockIdx.x + gridDim.x *blockIdx.y;\n",
      "    const auto totalPanel = (gridDim.x * gridDim.y +panel_width * gridDim.x - 1) / (panel_width * gridDim.x);\n",
      "    const auto totalBlock = gridDim.x * gridDim.y;\n",
      "    const auto panelIdx = baseBlockIdx / (panel_width *gridDim.x);\n",
      "    const auto strideLd = panelIdx + 1 < totalPanel ?panel_width : (totalBlock - panelIdx * (panel_width *gridDim.x)) / gridDim.x;\n",
      "    const auto bx = (panelIdx & 1) ? gridDim.x -(baseBlockIdx - panelIdx * panel_width * gridDim.x) /strideLd - 1 : (baseBlockIdx - panelIdx * panel_width *gridDim.x) / strideLd;\n",
      "    const auto by = (baseBlockIdx - panelIdx * panel_width *gridDim.x) % strideLd + panelIdx * panel_width;\n",
      "    const auto bz = blockIdx.z;\n",
      "    \n",
      "    dim3 blockIdx(bx, by, bz);\n",
      "    return blockIdx;\n",
      "}\n",
      "    #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)\n",
      "\n",
      "typedef unsigned short uint16_t;\n",
      "typedef unsigned char uint8_t;\n",
      "typedef signed char int8_t;\n",
      "typedef int int32_t;\n",
      "typedef unsigned long long uint64_t;\n",
      "typedef unsigned int uint32_t;\n",
      "\n",
      "#define TVM_FORCE_INLINE inline __attribute__((always_inline))\n",
      "#define TVM_XINLINE TVM_FORCE_INLINE __device__ __host__\n",
      "#define TVM_ALIGNED(x) __attribute__ ((aligned(x)))\n",
      "#define TVM_HALF_OPERATOR(RTYPE, OP)                              \\\n",
      "  TVM_XINLINE RTYPE operator OP (half a, half b) {                \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE RTYPE operator OP (half a, T b) {                   \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE RTYPE operator OP (T a, half b) {                   \\\n",
      "    return RTYPE(float(a) OP float(b));                           \\\n",
      "  }\n",
      "\n",
      "#define TVM_HALF_ASSIGNOP(AOP, OP)                                \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE half operator AOP (const T& a) {                    \\\n",
      "    return *this = half(float(*this) OP float(a));                \\\n",
      "  }                                                               \\\n",
      "  template<typename T>                                            \\\n",
      "  TVM_XINLINE half operator AOP (const volatile T& a) volatile {  \\\n",
      "    return *this = half(float(*this) OP float(a));                \\\n",
      "  }\n",
      "\n",
      "class TVM_ALIGNED(2) half {\n",
      " public:\n",
      "  uint16_t half_;\n",
      "\n",
      "  static TVM_XINLINE half Binary(uint16_t value) {\n",
      "    half res;\n",
      "    res.half_ = value;\n",
      "    return res;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half() {}\n",
      "\n",
      "  TVM_XINLINE half(const float& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const double& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const int8_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint8_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const int32_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint32_t& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const long long& value) { constructor(value); }\n",
      "  TVM_XINLINE explicit half(const uint64_t& value) { constructor(value); }\n",
      "\n",
      "  TVM_XINLINE operator float() const {                          \\\n",
      "    return float(half2float(half_));                            \\\n",
      "  }                                                             \\\n",
      "  TVM_XINLINE operator float() const volatile {                 \\\n",
      "    return float(half2float(half_));                            \\\n",
      "  }\n",
      "\n",
      "\n",
      "  TVM_HALF_ASSIGNOP(+=, +)\n",
      "  TVM_HALF_ASSIGNOP(-=, -)\n",
      "  TVM_HALF_ASSIGNOP(*=, *)\n",
      "  TVM_HALF_ASSIGNOP(/=, /)\n",
      "\n",
      "  TVM_XINLINE half operator+() {\n",
      "    return *this;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator-() {\n",
      "    return half(-float(*this));\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator=(const half& a) {\n",
      "    half_ = a.half_;\n",
      "    return a;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE half operator=(const T& a) {\n",
      "    return *this = half(a);\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE half operator=(const half& a) volatile {\n",
      "    half_ = a.half_;\n",
      "    return a;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE half operator=(const T& a) volatile {\n",
      "    return *this = half(a);\n",
      "  }\n",
      "\n",
      " private:\n",
      "  union Bits {\n",
      "    float f;\n",
      "    int32_t si;\n",
      "    uint32_t ui;\n",
      "  };\n",
      "\n",
      "  static int const fp16FractionBits = 10;\n",
      "  static int const fp32FractionBits = 23;\n",
      "  static int32_t const fp32FractionMask = ~(~0u << fp32FractionBits);   // == 0x7fffff\n",
      "  static int32_t const fp32HiddenBit = 1 << fp32FractionBits;   // == 0x800000\n",
      "  static int const shift = fp32FractionBits - fp16FractionBits;   // == 13\n",
      "  static int const shiftSign = 16;\n",
      "  static int32_t const expAdjust = 127 - 15;   // exp32-127 = exp16-15, so exp16 = exp32 - (127-15)\n",
      "\n",
      "  static int32_t const infN = 0x7F800000;   // flt32 infinity\n",
      "  static int32_t const maxN = 0x477FFFFF;   // max flt32 that's a flt16 normal after >> by shift\n",
      "  static int32_t const minN = 0x38800000;   // min flt16 normal as a flt32\n",
      "  static int32_t const maxZ = 0x33000000;   // max fp32 number that's still rounded to zero in fp16\n",
      "  static int32_t const signN = 0x80000000;  // flt32 sign bit\n",
      "\n",
      "  static int32_t const infC = infN >> shift;\n",
      "  static int32_t const nanN = (infC + 1) << shift;   // minimum flt16 nan as a flt32\n",
      "  static int32_t const maxC = maxN >> shift;\n",
      "  static int32_t const minC = minN >> shift;\n",
      "  static int32_t const signC = signN >> shiftSign;  // flt16 sign bit\n",
      "\n",
      "  static int32_t const mulN = 0x52000000;  // (1 << 23) / minN\n",
      "  static int32_t const mulC = 0x33800000;  // minN / (1 << (23 - shift))\n",
      "\n",
      "  static int32_t const subC = 0x003FF;  // max flt32 subnormal down shifted\n",
      "  static int32_t const norC = 0x00400;  // min flt32 normal down shifted\n",
      "\n",
      "  static int32_t const maxD = infC - maxC - 1;\n",
      "  static int32_t const minD = minC - subC - 1;\n",
      "\n",
      "  TVM_XINLINE uint16_t float2half(const float& value) const {\n",
      "    Bits v;\n",
      "    v.f = value;\n",
      "    uint32_t sign = v.si & signN;    // grab sign bit\n",
      "    v.si ^= sign;                    // clear sign bit from v\n",
      "    sign >>= shiftSign;              // logical shift sign to fp16 position\n",
      "\n",
      "    if (v.si <= maxZ) {\n",
      "      // Handle eventual zeros here to ensure\n",
      "      // vshift will not exceed 32 below.\n",
      "      v.ui = 0;\n",
      "    } else if (v.si < minN) {\n",
      "      // Handle denorms\n",
      "      uint32_t exp32 = v.ui >> fp32FractionBits;\n",
      "      int32_t exp16 = exp32 - expAdjust;\n",
      "      // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.\n",
      "      // Smaller (so negative) exp16 values should result in greater right shifts.\n",
      "      uint32_t vshift = 1 - exp16;\n",
      "      uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);\n",
      "      v.ui = significand >> vshift;\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;\n",
      "    } else if (v.si <= maxN) {\n",
      "      // Handle norms\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;\n",
      "      v.ui -= expAdjust << fp32FractionBits;\n",
      "    } else if (v.si <= infN) {\n",
      "      v.si = infN;\n",
      "    } else if (v.si < nanN) {\n",
      "      v.si = nanN;\n",
      "    }\n",
      "\n",
      "    v.ui >>= shift;\n",
      "    return sign | (v.ui & 0x7fff);\n",
      "  }\n",
      "\n",
      "  // Same as above routine, except for addition of volatile keyword\n",
      "  TVM_XINLINE uint16_t float2half(\n",
      "    const volatile float& value) const volatile {\n",
      "    Bits v;\n",
      "    v.f = value;\n",
      "    uint32_t sign = v.si & signN;    // grab sign bit\n",
      "    v.si ^= sign;                    // clear sign bit from v\n",
      "    sign >>= shiftSign;              // logical shift sign to fp16 position\n",
      "\n",
      "    if (v.si <= maxZ) {\n",
      "      // Handle eventual zeros here to ensure\n",
      "      // vshift will not exceed 32 below.\n",
      "      v.ui = 0;\n",
      "    } else if (v.si < minN) {\n",
      "      // Handle denorms\n",
      "      uint32_t exp32 = v.ui >> fp32FractionBits;\n",
      "      int32_t exp16 = exp32 - expAdjust;\n",
      "      // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.\n",
      "      // Smaller (so negative) exp16 values should result in greater right shifts.\n",
      "      uint32_t vshift = 1 - exp16;\n",
      "      uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);\n",
      "      v.ui = significand >> vshift;\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;\n",
      "    } else if (v.si <= maxN) {\n",
      "      // Handle norms\n",
      "      v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;\n",
      "      v.ui -= expAdjust << fp32FractionBits;\n",
      "    } else if (v.si <= infN) {\n",
      "      v.si = infN;\n",
      "    } else if (v.si < nanN) {\n",
      "      v.si = nanN;\n",
      "    }\n",
      "\n",
      "    v.ui >>= shift;\n",
      "    return sign | (v.ui & 0x7fff);\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE float half2float(const uint16_t& value) const {\n",
      "    Bits v;\n",
      "    v.ui = value;\n",
      "    int32_t sign = v.si & signC;\n",
      "    v.si ^= sign;\n",
      "    sign <<= shiftSign;\n",
      "    v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);\n",
      "    v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);\n",
      "    Bits s;\n",
      "    s.si = mulC;\n",
      "    s.f *= v.si;\n",
      "    int32_t mask = -(norC > v.si);\n",
      "    v.si <<= shift;\n",
      "    v.si ^= (s.si ^ v.si) & mask;\n",
      "    v.si |= sign;\n",
      "    return v.f;\n",
      "  }\n",
      "\n",
      "  TVM_XINLINE float half2float(\n",
      "    const volatile uint16_t& value) const volatile {\n",
      "    Bits v;\n",
      "    v.ui = value;\n",
      "    int32_t sign = v.si & signC;\n",
      "    v.si ^= sign;\n",
      "    sign <<= shiftSign;\n",
      "    v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);\n",
      "    v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);\n",
      "    Bits s;\n",
      "    s.si = mulC;\n",
      "    s.f *= v.si;\n",
      "    int32_t mask = -(norC > v.si);\n",
      "    v.si <<= shift;\n",
      "    v.si ^= (s.si ^ v.si) & mask;\n",
      "    v.si |= sign;\n",
      "    return v.f;\n",
      "  }\n",
      "\n",
      "  template<typename T>\n",
      "  TVM_XINLINE void constructor(const T& value) {\n",
      "    half_ = float2half(float(value));\n",
      "  }\n",
      "};\n",
      "\n",
      "TVM_HALF_OPERATOR(half, +)\n",
      "TVM_HALF_OPERATOR(half, -)\n",
      "TVM_HALF_OPERATOR(half, *)\n",
      "TVM_HALF_OPERATOR(half, /)\n",
      "TVM_HALF_OPERATOR(bool, >)\n",
      "TVM_HALF_OPERATOR(bool, <)\n",
      "TVM_HALF_OPERATOR(bool, >=)\n",
      "TVM_HALF_OPERATOR(bool, <=)\n",
      "\n",
      "TVM_XINLINE half __float2half_rn(const float a) {\n",
      "  return half(a);\n",
      "}\n",
      "#else\n",
      "#include <cuda_fp16.h>\n",
      "__device__ half max(half a, half b)\n",
      "{\n",
      "  return __hgt(__half(a), __half(b)) ? a : b;\n",
      "}\n",
      "__device__ half min(half a, half b)\n",
      "{\n",
      "  return __hlt(__half(a), __half(b)) ? a : b;\n",
      "}\n",
      "#endif\n",
      "\n",
      "\n",
      "// Pack two half values.\n",
      "static inline __device__ __host__ unsigned\n",
      "__pack_half2(const half x, const half y) {\n",
      "  unsigned v0 = *((unsigned short *)&x);\n",
      "  unsigned v1 = *((unsigned short *)&y);\n",
      "  return (v1 << 16) | v0;\n",
      "}\n",
      "\n",
      "#define CUDA_UNSUPPORTED_HALF_MATH_BINARY(HALF_MATH_NAME, FP32_MATH_NAME) \\\n",
      "static inline __device__ __host__ half HALF_MATH_NAME(half x, half y) {   \\\n",
      "  float tmp_x = __half2float(x);                                          \\\n",
      "  float tmp_y = __half2float(y);                                          \\\n",
      "  float result = FP32_MATH_NAME(tmp_x, tmp_y);                            \\\n",
      "  return __float2half(result);                                            \\\n",
      "}\n",
      "\n",
      "#define CUDA_UNSUPPORTED_HALF_MATH_UNARY(HALF_MATH_NAME, FP32_MATH_NAME) \\\n",
      "static inline __device__ __host__ half HALF_MATH_NAME(half x) {          \\\n",
      "  float tmp_x = __half2float(x);                                         \\\n",
      "  float result = FP32_MATH_NAME(tmp_x);                                  \\\n",
      "  return __float2half(result);                                           \\\n",
      "}\n",
      "\n",
      "// Some fp16 math functions are not supported in cuda_fp16.h,\n",
      "// so we define them here to make sure the generated CUDA code\n",
      "// is valid.\n",
      "#if defined(__CUDA_ARCH__)\n",
      "#if (__CUDA_ARCH__ >= 530)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_BINARY(hpow, powf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(htanh, tanhf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(htan, tanf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(hatan, atanf)\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(herf, erf)\n",
      "#else\n",
      "CUDA_UNSUPPORTED_HALF_MATH_UNARY(hexp, exp)\n",
      "#endif\n",
      "#endif\n",
      "\n",
      "#undef CUDA_UNSUPPORTED_HALF_MATH_BINARY\n",
      "#undef CUDA_UNSUPPORTED_HALF_MATH_UNARY\n",
      "\n",
      "struct __align__(8) half4 {\n",
      "  __half x, y, z, w;\n",
      "  __host__ __device__ half4() : x(__half(0)), y(__half(0)), z(__half(0)), w(__half(0)) {}\n",
      "  __host__ __device__ half4(__half x, __half y, __half z, __half w) : x(x), y(y), z(z), w(w) {}\n",
      "\n",
      "};\n",
      "__host__ __device__ half4 make_half4(__half x, __half y, __half z, __half w) {\n",
      "    return half4(x, y, z, w);\n",
      "}\n",
      "__forceinline__ __device__ unsigned int\n",
      "cast_smem_ptr_to_int(const void* const smem_ptr)\n",
      "{\n",
      "  unsigned int smem_int;\n",
      "  asm volatile (\"{ .reg .u64 smem_int; cvta.to.shared.u64 smem_int, %1; cvt.u32.u64 %0, smem_int; }\"\n",
      "    : \"=r\"(smem_int) : \"l\"(smem_ptr));\n",
      "  return smem_int;\n",
      "}\n",
      "\n",
      "#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \\\n",
      "     (__CUDACC_VER_MAJOR__ > 11))\n",
      "#define TVM_ENABLE_L2_PREFETCH 1\n",
      "#else\n",
      "#define TVM_ENABLE_L2_PREFETCH 0\n",
      "#endif\n",
      "\n",
      "#ifdef _WIN32\n",
      "  using uint = unsigned int;\n",
      "  using uchar = unsigned char;\n",
      "  using ushort = unsigned short;\n",
      "  using int64_t = long long;\n",
      "  using uint64_t = unsigned long long;\n",
      "#else\n",
      "  #define uint unsigned int\n",
      "  #define uchar unsigned char\n",
      "  #define ushort unsigned short\n",
      "  #define int64_t long long\n",
      "  #define uint64_t unsigned long long\n",
      "#endif\n",
      "\n",
      "#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800) \n",
      "#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 1\n",
      "#else\n",
      "#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 0\n",
      "#endif\n",
      "extern \"C\" __global__ void __launch_bounds__(128) main_kernel(half* __restrict__ A, half* __restrict__ B, half* __restrict__ C);\n",
      "extern \"C\" __global__ void __launch_bounds__(128) main_kernel(half* __restrict__ A, half* __restrict__ B, half* __restrict__ C) {\n",
      "  extern __shared__ uchar buf_dyn_shmem[];\n",
      "  half C_reindex_shared_dyn_warp[256];\n",
      "  half A_reindex_shared_dyn_warp[64];\n",
      "  half B_reindex_shared_dyn_warp[32];\n",
      "  for (int var = 0; var < 1; ++var) {\n",
      "\n",
      "    const dim3 blockIdx = rasterization2DColumn(11);\n",
      "    for (int ax1_0_3_init = 0; ax1_0_3_init < 8; ++ax1_0_3_init) {\n",
      "      for (int ax2_0_3_init = 0; ax2_0_3_init < 4; ++ax2_0_3_init) {\n",
      "        for (int i = 0; i < 8; ++i) {\n",
      "C_reindex_shared_dyn_warp[((ax1_0_3_init * 32) + (ax2_0_3_init * 8)) + i] = 0.0;}\n",
      ";\n",
      "      }\n",
      "    }\n",
      "    for (int ax3_0_0 = 0; ax3_0_0 < 512; ++ax3_0_0) {\n",
      "      __syncthreads();\n",
      "      #pragma unroll\n",
      "      for (int ax0_ax1_ax2_fused_2 = 0; ax0_ax1_ax2_fused_2 < 8; ++ax0_ax1_ax2_fused_2) {\n",
      "        *(uint4*)(((half*)buf_dyn_shmem) + (((((((int)threadIdx.y) * 4096) + (((int)threadIdx.z) * 2048)) + (ax0_ax1_ax2_fused_2 * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8))) = *(uint4*)(A + (((((((((int)blockIdx.y) * 4194304) + (((int)threadIdx.y) * 2097152)) + (((int)threadIdx.z) * 1048576)) + (ax0_ax1_ax2_fused_2 * 131072)) + ((((int)threadIdx.x) >> 2) * 16384)) + (ax3_0_0 * 32)) + ((((int)threadIdx.x) & 3) * 8)));\n",
      "      }\n",
      "      #pragma unroll\n",
      "      for (int ax0_ax1_ax2_fused_2_1 = 0; ax0_ax1_ax2_fused_2_1 < 4; ++ax0_ax1_ax2_fused_2_1) {\n",
      "        *(uint4*)(((half*)buf_dyn_shmem) + ((((((((int)threadIdx.y) * 2048) + (((int)threadIdx.z) * 1024)) + (ax0_ax1_ax2_fused_2_1 * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8)) + 8192)) = *(uint4*)(B + (((((((((int)blockIdx.x) * 2097152) + (((int)threadIdx.y) * 1048576)) + (((int)threadIdx.z) * 524288)) + (ax0_ax1_ax2_fused_2_1 * 131072)) + ((((int)threadIdx.x) >> 2) * 16384)) + (ax3_0_0 * 32)) + ((((int)threadIdx.x) & 3) * 8)));\n",
      "      }\n",
      "      __syncthreads();\n",
      "      for (int ax3_0_1 = 0; ax3_0_1 < 2; ++ax3_0_1) {\n",
      "        for (int ax0_0 = 0; ax0_0 < 8; ++ax0_0) {\n",
      "\n",
      "  {\n",
      "    unsigned int addr;\n",
      "#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST\n",
      "    addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(((half*)buf_dyn_shmem)[((((((int)threadIdx.y) * 4096) + (ax0_0 * 512)) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0)));\n",
      "#else\n",
      "    __asm__ __volatile__(\n",
      "      \"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\\n\"\n",
      "      : \"=r\"(addr)\n",
      "      : \"l\"((void *)((&(((half*)buf_dyn_shmem)[((((((int)threadIdx.y) * 4096) + (ax0_0 * 512)) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0))\n",
      "    );\n",
      "#endif\n",
      "    __asm__ __volatile__(\n",
      "      \"ldmatrix.sync.aligned.m8n8.x4.shared.b16\"\n",
      "      \"{%0, %1, %2, %3}, [%4];\\n\"\n",
      "      : \"=r\"(((unsigned *)(A_reindex_shared_dyn_warp + (ax0_0 * 8)))[0]), \"=r\"(((unsigned *)(A_reindex_shared_dyn_warp + (ax0_0 * 8)))[1]), \"=r\"(((unsigned *)(A_reindex_shared_dyn_warp + (ax0_0 * 8)))[2]), \"=r\"(((unsigned *)(A_reindex_shared_dyn_warp + (ax0_0 * 8)))[3])\n",
      "      : \"r\"(addr)\n",
      "    );\n",
      "  }\n",
      "        }\n",
      "        for (int ax0_0_1 = 0; ax0_0_1 < 4; ++ax0_0_1) {\n",
      "\n",
      "  {\n",
      "    unsigned int addr;\n",
      "#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST\n",
      "    addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(((half*)buf_dyn_shmem)[((((((((int)threadIdx.z) * 2048) + (ax0_0_1 * 512)) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8)) + 8192)])) + 0)));\n",
      "#else\n",
      "    __asm__ __volatile__(\n",
      "      \"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\\n\"\n",
      "      : \"=r\"(addr)\n",
      "      : \"l\"((void *)((&(((half*)buf_dyn_shmem)[((((((((int)threadIdx.z) * 2048) + (ax0_0_1 * 512)) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8)) + 8192)])) + 0))\n",
      "    );\n",
      "#endif\n",
      "    __asm__ __volatile__(\n",
      "      \"ldmatrix.sync.aligned.m8n8.x4.shared.b16\"\n",
      "      \"{%0, %1, %2, %3}, [%4];\\n\"\n",
      "      : \"=r\"(((unsigned *)(B_reindex_shared_dyn_warp + (ax0_0_1 * 8)))[0]), \"=r\"(((unsigned *)(B_reindex_shared_dyn_warp + (ax0_0_1 * 8)))[1]), \"=r\"(((unsigned *)(B_reindex_shared_dyn_warp + (ax0_0_1 * 8)))[2]), \"=r\"(((unsigned *)(B_reindex_shared_dyn_warp + (ax0_0_1 * 8)))[3])\n",
      "      : \"r\"(addr)\n",
      "    );\n",
      "  }\n",
      "        }\n",
      "        for (int ax1_0_3 = 0; ax1_0_3 < 8; ++ax1_0_3) {\n",
      "          for (int ax2_0_3 = 0; ax2_0_3 < 4; ++ax2_0_3) {\n",
      "\n",
      "  {\n",
      "    __asm__ __volatile__(\n",
      "      \"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16\"\n",
      "      \"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\\n\"\n",
      "      :  \"=r\"(((unsigned *)(C_reindex_shared_dyn_warp + ((ax1_0_3 * 32) + (ax2_0_3 * 8))))[0]), \"=r\"(((unsigned *)(C_reindex_shared_dyn_warp + ((ax1_0_3 * 32) + (ax2_0_3 * 8))))[1])\n",
      "      : \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[0]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[1]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[2]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[3]), \"r\"(((unsigned *)((half*)B_reindex_shared_dyn_warp + (ax2_0_3 * 8)))[0]), \"r\"(((unsigned *)((half*)B_reindex_shared_dyn_warp + (ax2_0_3 * 8)))[1]), \"r\"(((unsigned *)(C_reindex_shared_dyn_warp + ((ax1_0_3 * 32) + (ax2_0_3 * 8))))[0]), \"r\"(((unsigned *)(C_reindex_shared_dyn_warp + ((ax1_0_3 * 32) + (ax2_0_3 * 8))))[1]));\n",
      "  }\n",
      "\n",
      "  {\n",
      "    __asm__ __volatile__(\n",
      "      \"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16\"\n",
      "      \"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\\n\"\n",
      "      :  \"=r\"(((unsigned *)(C_reindex_shared_dyn_warp + (((ax1_0_3 * 32) + (ax2_0_3 * 8)) + 4)))[0]), \"=r\"(((unsigned *)(C_reindex_shared_dyn_warp + (((ax1_0_3 * 32) + (ax2_0_3 * 8)) + 4)))[1])\n",
      "      : \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[0]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[1]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[2]), \"r\"(((unsigned *)((half*)A_reindex_shared_dyn_warp + (ax1_0_3 * 8)))[3]), \"r\"(((unsigned *)((half*)B_reindex_shared_dyn_warp + ((ax2_0_3 * 8) + 4)))[0]), \"r\"(((unsigned *)((half*)B_reindex_shared_dyn_warp + ((ax2_0_3 * 8) + 4)))[1]), \"r\"(((unsigned *)(C_reindex_shared_dyn_warp + (((ax1_0_3 * 32) + (ax2_0_3 * 8)) + 4)))[0]), \"r\"(((unsigned *)(C_reindex_shared_dyn_warp + (((ax1_0_3 * 32) + (ax2_0_3 * 8)) + 4)))[1]));\n",
      "  }\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    }\n",
      "    for (int ax0 = 0; ax0 < 8; ++ax0) {\n",
      "      __syncthreads();\n",
      "      for (int ax1 = 0; ax1 < 4; ++ax1) {\n",
      "        for (int local_id = 0; local_id < 8; local_id+=2) {\n",
      "*((uint *)&(&(((half*)buf_dyn_shmem)[(((((int)threadIdx.y) * 16384) + (((int)threadIdx.z) * 1024)) + (ax1 * 256))]))[((((((local_id % 4) / 2) * 8) + (threadIdx.x / 4)) * 16) + ((((local_id / 4) * 8) + ((threadIdx.x % 4) * 2)) + (local_id % 2)))]) = *((uint *)&C_reindex_shared_dyn_warp[((ax0 * 32) + (ax1 * 8)) + local_id]);\n",
      "}\n",
      ";\n",
      "      }\n",
      "      __syncthreads();\n",
      "      #pragma unroll\n",
      "      for (int ax0_ax1_ax2_ax3_ax4_fused_0 = 0; ax0_ax1_ax2_ax3_ax4_fused_0 < 4; ++ax0_ax1_ax2_ax3_ax4_fused_0) {\n",
      "        *(uint4*)(C + ((((((((((int)blockIdx.y) * 4194304) + (((int)threadIdx.y) * 2097152)) + (ax0 * 262144)) + ((((int)threadIdx.x) >> 1) * 16384)) + (((int)blockIdx.x) * 128)) + (((int)threadIdx.z) * 64)) + (ax0_ax1_ax2_ax3_ax4_fused_0 * 16)) + ((((int)threadIdx.x) & 1) * 8))) = *(uint4*)(((half*)buf_dyn_shmem) + ((((((int)threadIdx.y) * 16384) + (((int)threadIdx.z) * 1024)) + (ax0_ax1_ax2_ax3_ax4_fused_0 * 256)) + (((int)threadIdx.x) * 8)));\n",
      "      }\n",
      "    }\n",
      "  }\n",
      "}\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# get generated cuda source\n",
    "print(best.code)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
