{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ff8f3122",
   "metadata": {},
   "source": [
    "# group conv2d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "56d4f1ae",
   "metadata": {
    "tags": [
     "remove-cell"
    ]
   },
   "outputs": [],
   "source": [
    "import set_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5d09ffd7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "GroupConv2DWorkload(batch=1, height=112, width=112, in_filter=32, out_filter=32, groups=2, hkernel=3, wkernel=3, hpad=1, wpad=1, hstride=1, wstride=1)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-07-27 21:31:35.460 INFO load_module /tmp/tmparv7xupm/conv2d.o\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU GROUP CONV2D TEST PASSED: Time cost = 0.0219721 sec/op, 5.26148 GOPS\n",
      "GroupConv2DWorkload(batch=1, height=112, width=112, in_filter=64, out_filter=64, groups=4, hkernel=3, wkernel=3, hpad=1, wpad=1, hstride=2, wstride=2)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-07-27 21:31:36.475 INFO load_module /tmp/tmparv7xupm/conv2d.o\n"
     ]
    },
    {
     "ename": "TVMError",
     "evalue": "Traceback (most recent call last):\n  4: tvm::runtime::RPCWrappedFunc::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const\n  3: tvm::runtime::LocalSession::CallFunc(void*, TVMValue const*, int const*, int, std::function<void (tvm::runtime::TVMArgs)> const&)\n  2: tvm::runtime::profiling::WrapTimeEvaluator(tvm::runtime::PackedFunc, DLDevice, int, int, int, int, int, int, int, tvm::runtime::PackedFunc)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) [clone .isra.0]\n  1: tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::WrapPackedFunc(int (*)(TVMValue*, int*, int, TVMValue*, int*, void*), tvm::runtime::ObjectPtr<tvm::runtime::Object> const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)\n  0: TVMThrowLastError.cold\nTVMError: Assert fail: T.Cast(\"int32\", conv2d_data_shape[1]) == 32, Argument conv2d.data.shape[1] has an unsatisfied constraint: 32 == T.Cast(\"int32\", conv2d_data_shape[1])",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTVMError\u001b[0m                                  Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[2], line 295\u001b[0m\n\u001b[1;32m    291\u001b[0m     vta\u001b[38;5;241m.\u001b[39mtesting\u001b[38;5;241m.\u001b[39mrun(_run)\n\u001b[1;32m    294\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__main__\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m--> 295\u001b[0m     test_conv2d(device\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124marm_cpu\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m    296\u001b[0m     test_conv2d(device\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mvta\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "Cell \u001b[0;32mIn[2], line 291\u001b[0m, in \u001b[0;36mtest_conv2d\u001b[0;34m(device)\u001b[0m\n\u001b[1;32m    288\u001b[0m             \u001b[38;5;28mprint\u001b[39m(wl)\n\u001b[1;32m    289\u001b[0m             run_group_conv2d(env, remote, wl, target)\n\u001b[0;32m--> 291\u001b[0m vta\u001b[38;5;241m.\u001b[39mtesting\u001b[38;5;241m.\u001b[39mrun(_run)\n",
      "File \u001b[0;32m/media/pc/data/board/arria10/lxw/tasks/tvm-new/vta/python/vta/testing/utils.py:53\u001b[0m, in \u001b[0;36mrun\u001b[0;34m(run_func)\u001b[0m\n\u001b[1;32m     51\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m env\u001b[38;5;241m.\u001b[39mTARGET \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msim\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m     52\u001b[0m             \u001b[38;5;28;01massert\u001b[39;00m simulator\u001b[38;5;241m.\u001b[39menabled()\n\u001b[0;32m---> 53\u001b[0m         run_func(env, rpc\u001b[38;5;241m.\u001b[39mLocalSession())\n\u001b[1;32m     55\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m env\u001b[38;5;241m.\u001b[39mTARGET \u001b[38;5;129;01min\u001b[39;00m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpynq\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124multra96\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mde10nano\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[1;32m     56\u001b[0m     \u001b[38;5;66;03m# The environment variables below should be set if we are using\u001b[39;00m\n\u001b[1;32m     57\u001b[0m     \u001b[38;5;66;03m# a tracker to obtain a remote for a test device\u001b[39;00m\n\u001b[1;32m     58\u001b[0m     tracker_host \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39menviron\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTVM_TRACKER_HOST\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n",
      "Cell \u001b[0;32mIn[2], line 289\u001b[0m, in \u001b[0;36mtest_conv2d.<locals>._run\u001b[0;34m(env, remote)\u001b[0m\n\u001b[1;32m    287\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m _, wl \u001b[38;5;129;01min\u001b[39;00m mobilenet_wkls:\n\u001b[1;32m    288\u001b[0m     \u001b[38;5;28mprint\u001b[39m(wl)\n\u001b[0;32m--> 289\u001b[0m     run_group_conv2d(env, remote, wl, target)\n",
      "Cell \u001b[0;32mIn[2], line 241\u001b[0m, in \u001b[0;36mrun_group_conv2d\u001b[0;34m(env, remote, wl, target, check_correctness, print_ir, samples)\u001b[0m\n\u001b[1;32m    239\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    240\u001b[0m         simulator\u001b[38;5;241m.\u001b[39mclear_stats()\n\u001b[0;32m--> 241\u001b[0m         cost \u001b[38;5;241m=\u001b[39m time_f(data_arr, kernel_arr, bias_arr, res_arr)\n\u001b[1;32m    242\u001b[0m         stats \u001b[38;5;241m=\u001b[39m simulator\u001b[38;5;241m.\u001b[39mstats()\n\u001b[1;32m    243\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
      "File \u001b[0;32m/media/pc/data/board/arria10/lxw/tasks/tvm-new/python/tvm/runtime/module.py:405\u001b[0m, in \u001b[0;36mModule.time_evaluator.<locals>.evaluator\u001b[0;34m(*args)\u001b[0m\n\u001b[1;32m    403\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Internal wrapped evaluator.\"\"\"\u001b[39;00m\n\u001b[1;32m    404\u001b[0m \u001b[38;5;66;03m# Wrap feval so we can add more stats in future.\u001b[39;00m\n\u001b[0;32m--> 405\u001b[0m blob \u001b[38;5;241m=\u001b[39m feval(\u001b[38;5;241m*\u001b[39margs)\n\u001b[1;32m    406\u001b[0m fmt \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m@\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124md\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m repeat)\n\u001b[1;32m    407\u001b[0m results \u001b[38;5;241m=\u001b[39m struct\u001b[38;5;241m.\u001b[39munpack(fmt, blob)\n",
      "File \u001b[0;32m/media/pc/data/board/arria10/lxw/tasks/tvm-new/python/tvm/_ffi/_ctypes/packed_func.py:245\u001b[0m, in \u001b[0;36mPackedFuncBase.__call__\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m    233\u001b[0m ret_tcode \u001b[38;5;241m=\u001b[39m ctypes\u001b[38;5;241m.\u001b[39mc_int()\n\u001b[1;32m    234\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m    235\u001b[0m     _LIB\u001b[38;5;241m.\u001b[39mTVMFuncCall(\n\u001b[1;32m    236\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    243\u001b[0m     \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m    244\u001b[0m ):\n\u001b[0;32m--> 245\u001b[0m     raise_last_ffi_error()\n\u001b[1;32m    246\u001b[0m _ \u001b[38;5;241m=\u001b[39m temp_args\n\u001b[1;32m    247\u001b[0m _ \u001b[38;5;241m=\u001b[39m args\n",
      "File \u001b[0;32m/media/pc/data/board/arria10/lxw/tasks/tvm-new/python/tvm/_ffi/base.py:481\u001b[0m, in \u001b[0;36mraise_last_ffi_error\u001b[0;34m()\u001b[0m\n\u001b[1;32m    475\u001b[0m \u001b[38;5;66;03m# The exception PyObject may contain a large amount of state,\u001b[39;00m\n\u001b[1;32m    476\u001b[0m \u001b[38;5;66;03m# including all stack frames that may be inspected in a later\u001b[39;00m\n\u001b[1;32m    477\u001b[0m \u001b[38;5;66;03m# PDB post-mortem.  Therefore, we must make sure to remove the\u001b[39;00m\n\u001b[1;32m    478\u001b[0m \u001b[38;5;66;03m# underlying PyObject* from the C++ side after we retrieve it.\u001b[39;00m\n\u001b[1;32m    479\u001b[0m _LIB\u001b[38;5;241m.\u001b[39mTVMDropLastPythonError()\n\u001b[0;32m--> 481\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m py_err\n",
      "\u001b[0;31mTVMError\u001b[0m: Traceback (most recent call last):\n  4: tvm::runtime::RPCWrappedFunc::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const\n  3: tvm::runtime::LocalSession::CallFunc(void*, TVMValue const*, int const*, int, std::function<void (tvm::runtime::TVMArgs)> const&)\n  2: tvm::runtime::profiling::WrapTimeEvaluator(tvm::runtime::PackedFunc, DLDevice, int, int, int, int, int, int, int, tvm::runtime::PackedFunc)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) [clone .isra.0]\n  1: tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::WrapPackedFunc(int (*)(TVMValue*, int*, int, TVMValue*, int*, void*), tvm::runtime::ObjectPtr<tvm::runtime::Object> const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)\n  0: TVMThrowLastError.cold\nTVMError: Assert fail: T.Cast(\"int32\", conv2d_data_shape[1]) == 32, Argument conv2d.data.shape[1] has an unsatisfied constraint: 32 == T.Cast(\"int32\", conv2d_data_shape[1])"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import os\n",
    "\n",
    "import pytest\n",
    "import numpy as np\n",
    "from collections import namedtuple\n",
    "\n",
    "import tvm\n",
    "from tvm import te\n",
    "from tvm import relay\n",
    "from tvm import autotvm\n",
    "from tvm.contrib import utils\n",
    "from tvm import topi\n",
    "import tvm.topi.testing\n",
    "import vta\n",
    "from vta import program_fpga, reconfig_runtime\n",
    "import vta.testing\n",
    "from vta.testing import simulator\n",
    "\n",
    "\n",
    "Workload = namedtuple(\n",
    "    \"GroupConv2DWorkload\",\n",
    "    [\n",
    "        \"batch\",\n",
    "        \"height\",\n",
    "        \"width\",\n",
    "        \"in_filter\",\n",
    "        \"out_filter\",\n",
    "        \"groups\",\n",
    "        \"hkernel\",\n",
    "        \"wkernel\",\n",
    "        \"hpad\",\n",
    "        \"wpad\",\n",
    "        \"hstride\",\n",
    "        \"wstride\",\n",
    "    ],\n",
    ")\n",
    "\n",
    "# Get batch info from env\n",
    "env = vta.get_env()\n",
    "\n",
    "# Mobilenet (grouped variant) workloads\n",
    "mobilenet_wkls = [\n",
    "    (\"mobilenet.D1\", Workload(env.BATCH, 112, 112, 32, 32, 2, 3, 3, 1, 1, 1, 1)),\n",
    "    (\"mobilenet.D2\", Workload(env.BATCH, 112, 112, 64, 64, 4, 3, 3, 1, 1, 2, 2)),\n",
    "    (\"mobilenet.D3\", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 1, 1)),\n",
    "    (\"mobilenet.D4\", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 2, 2)),\n",
    "    (\"mobilenet.D5\", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 1, 1)),\n",
    "    (\"mobilenet.D6\", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 2, 2)),\n",
    "    (\"mobilenet.D7\", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 1, 1)),\n",
    "    (\"mobilenet.D8\", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 2, 2)),\n",
    "    (\"mobilenet.D9\", Workload(env.BATCH, 7, 7, 1024, 1024, 64, 3, 3, 1, 1, 1, 1)),\n",
    "]\n",
    "\n",
    "# FIXME: we need a custom clip operator to circumvent a pattern detection limitation\n",
    "@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)\n",
    "def my_clip(x, a_min, a_max):\n",
    "    \"\"\"Unlike topi's current clip, put min and max into two stages.\"\"\"\n",
    "    const_min = tvm.tir.const(a_min, x.dtype)\n",
    "    const_max = tvm.tir.const(a_max, x.dtype)\n",
    "    x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name=\"clipA\")\n",
    "    x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name=\"clipB\")\n",
    "    return x\n",
    "\n",
    "\n",
    "def run_group_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):\n",
    "\n",
    "    # Workload assertions\n",
    "    assert wl.hpad == wl.wpad\n",
    "\n",
    "    # Perform packing only if we are targeting the accelerator\n",
    "    if \"arm_cpu\" in target.keys:\n",
    "        data_pack = False\n",
    "        layout = \"NCHW\"\n",
    "        fcompute = topi.nn.group_conv2d_nchw\n",
    "        fschedule = topi.generic.schedule_group_conv2d_nchw\n",
    "    elif \"vta\" in target.keys:\n",
    "        data_pack = True\n",
    "        layout = \"NCHW%dn%dc\" % (env.BATCH, env.BLOCK_IN)\n",
    "        fcompute = vta.top.group_conv2d_packed\n",
    "        fschedule = vta.top.schedule_group_conv2d_packed\n",
    "\n",
    "    # Derive shapes depending upon packing\n",
    "    CI_G = wl.in_filter // wl.groups\n",
    "    a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)\n",
    "    w_shape = (wl.out_filter, CI_G, wl.hkernel, wl.wkernel)\n",
    "    b_shape = (wl.batch, wl.out_filter, 1, 1)\n",
    "    if data_pack:\n",
    "        data_shape = (\n",
    "            wl.batch // env.BATCH,\n",
    "            wl.in_filter // env.BLOCK_IN,\n",
    "            wl.height,\n",
    "            wl.width,\n",
    "            env.BATCH,\n",
    "            env.BLOCK_IN,\n",
    "        )\n",
    "        kernel_shape = (\n",
    "            wl.out_filter // env.BLOCK_OUT,\n",
    "            CI_G // env.BLOCK_IN,\n",
    "            wl.hkernel,\n",
    "            wl.wkernel,\n",
    "            env.BLOCK_OUT,\n",
    "            env.BLOCK_IN,\n",
    "        )\n",
    "        bias_shape = (\n",
    "            wl.batch // env.BATCH,\n",
    "            wl.out_filter // env.BLOCK_OUT,\n",
    "            1,\n",
    "            1,\n",
    "            env.BATCH,\n",
    "            env.BLOCK_OUT,\n",
    "        )\n",
    "    else:\n",
    "        data_shape = a_shape\n",
    "        kernel_shape = w_shape\n",
    "        bias_shape = b_shape\n",
    "    data = te.placeholder(data_shape, name=\"data\", dtype=env.inp_dtype)\n",
    "    kernel = te.placeholder(kernel_shape, name=\"kernel\", dtype=env.wgt_dtype)\n",
    "    bias = te.placeholder(bias_shape, name=\"bias\", dtype=env.acc_dtype)\n",
    "    padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))\n",
    "\n",
    "    # Define base computation schedule\n",
    "    with target:\n",
    "        res = fcompute(\n",
    "            data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), wl.groups, env.acc_dtype\n",
    "        )\n",
    "        res = topi.right_shift(res, 8)\n",
    "        res = topi.add(res, bias)\n",
    "        res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)\n",
    "        res = topi.cast(res, env.out_dtype)\n",
    "        # Derive base schedule\n",
    "        s = fschedule([res])\n",
    "        if print_ir:\n",
    "            print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))\n",
    "\n",
    "    # Derive number of ops\n",
    "    fout_height = (wl.height + 2 * wl.hpad - wl.hkernel) // wl.hstride + 1\n",
    "    fout_width = (wl.width + 2 * wl.wpad - wl.wkernel) // wl.wstride + 1\n",
    "    num_ops = (\n",
    "        2\n",
    "        * wl.batch\n",
    "        * fout_height\n",
    "        * fout_width\n",
    "        * wl.hkernel\n",
    "        * wl.wkernel\n",
    "        * wl.out_filter\n",
    "        * wl.in_filter\n",
    "        // wl.groups\n",
    "    )\n",
    "\n",
    "    def get_ref_data():\n",
    "        # derive min max for act, wgt, and bias types (max non inclusive)\n",
    "        a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))\n",
    "        w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))\n",
    "        b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (\n",
    "            env.INP_WIDTH + env.WGT_WIDTH - 2\n",
    "        )\n",
    "        a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)\n",
    "        w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)\n",
    "        b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)\n",
    "        r_np = tvm.topi.testing.conv2d_nchw_python(\n",
    "            a_np.astype(env.acc_dtype),\n",
    "            w_np.astype(env.acc_dtype),\n",
    "            (wl.hstride, wl.wstride),\n",
    "            wl.hpad,\n",
    "            wl.groups,\n",
    "        ).astype(env.acc_dtype)\n",
    "        return a_np, w_np, b_np, r_np\n",
    "\n",
    "    # Data in original format\n",
    "    data_np, kernel_np, bias_np, res_ref = get_ref_data()\n",
    "    if data_pack:\n",
    "        data_np = data_np.reshape(\n",
    "            wl.batch // env.BATCH,\n",
    "            env.BATCH,\n",
    "            wl.in_filter // env.BLOCK_IN,\n",
    "            env.BLOCK_IN,\n",
    "            wl.height,\n",
    "            wl.width,\n",
    "        ).transpose((0, 2, 4, 5, 1, 3))\n",
    "        kernel_np = kernel_np.reshape(\n",
    "            wl.out_filter // env.BLOCK_OUT,\n",
    "            env.BLOCK_OUT,\n",
    "            CI_G // env.BLOCK_IN,\n",
    "            env.BLOCK_IN,\n",
    "            wl.hkernel,\n",
    "            wl.wkernel,\n",
    "        ).transpose((0, 2, 4, 5, 1, 3))\n",
    "        bias_np = bias_np.reshape(\n",
    "            wl.batch // env.BATCH, wl.out_filter // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT\n",
    "        )\n",
    "\n",
    "    # Build\n",
    "    if \"vta\" in target.keys:\n",
    "        with vta.build_config(disabled_pass={\"tir.CommonSubexprElimTIR\"}):\n",
    "            mod = vta.build(\n",
    "                s,\n",
    "                [data, kernel, bias, res],\n",
    "                target=tvm.target.Target(target, host=env.target_host),\n",
    "                name=\"conv2d\",\n",
    "            )\n",
    "    else:\n",
    "        mod = tvm.build(\n",
    "            s,\n",
    "            [data, kernel, bias, res],\n",
    "            target=tvm.target.Target(target, host=env.target_host),\n",
    "            name=\"conv2d\",\n",
    "        )\n",
    "    temp = utils.tempdir()\n",
    "    mod.save(temp.relpath(\"conv2d.o\"))\n",
    "    remote.upload(temp.relpath(\"conv2d.o\"))\n",
    "    f = remote.load_module(\"conv2d.o\")\n",
    "    dev = remote.device(str(target))\n",
    "\n",
    "    res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)\n",
    "    data_arr = tvm.nd.array(data_np, dev)\n",
    "    kernel_arr = tvm.nd.array(kernel_np, dev)\n",
    "    bias_arr = tvm.nd.array(bias_np, dev)\n",
    "    res_arr = tvm.nd.array(res_np, dev)\n",
    "    time_f = f.time_evaluator(\"conv2d\", dev, number=samples)\n",
    "\n",
    "    # In vta sim mode, collect simulator runtime statistics\n",
    "    stats = {}\n",
    "    cost = None\n",
    "    if env.TARGET in [\"sim\", \"tsim\"]:\n",
    "        # Check if we're in local RPC mode (allows us to rebuild the\n",
    "        # runtime on the fly when varying the VTA designs)\n",
    "        local_rpc = int(os.environ.get(\"VTA_LOCAL_SIM_RPC\", \"0\"))\n",
    "        if local_rpc:\n",
    "            if env.TARGET == \"sim\":\n",
    "                remote.get_function(\"vta.simulator.profiler_clear\")()\n",
    "            else:\n",
    "                remote.get_function(\"vta.tsim.profiler_clear\")()\n",
    "            cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)\n",
    "            if env.TARGET == \"sim\":\n",
    "                stats = json.loads(remote.get_function(\"vta.simulator.profiler_status\")())\n",
    "            else:\n",
    "                stats = json.loads(remote.get_function(\"vta.tsim.profiler_status\")())\n",
    "        else:\n",
    "            simulator.clear_stats()\n",
    "            cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)\n",
    "            stats = simulator.stats()\n",
    "    else:\n",
    "        cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)\n",
    "\n",
    "    # Check correctness\n",
    "    correct = False\n",
    "    if check_correctness:\n",
    "        res_orig = res_arr.numpy()\n",
    "        if data_pack:\n",
    "            res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(\n",
    "                wl.batch, wl.out_filter, fout_height, fout_width\n",
    "            )\n",
    "            bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)\n",
    "        res_ref = res_ref >> env.WGT_WIDTH\n",
    "        res_ref += bias_np\n",
    "        res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)\n",
    "        res_ref = res_ref.astype(env.out_dtype)\n",
    "        correct = np.allclose(res_orig, res_ref)\n",
    "\n",
    "    gops = (num_ops / cost.mean) / float(10**9)\n",
    "    status = \"PASSED\" if correct else \"FAILED\"\n",
    "    if \"arm_cpu\" in target.keys:\n",
    "        device = \"CPU\"\n",
    "    elif \"vta\" in target.keys:\n",
    "        device = \"VTA\"\n",
    "    print(\n",
    "        \"%s GROUP CONV2D TEST %s: Time cost = %g sec/op, %g GOPS\"\n",
    "        % (device, status, cost.mean, gops)\n",
    "    )\n",
    "\n",
    "    return correct, cost, stats\n",
    "\n",
    "\n",
    "@pytest.mark.parametrize(\"device\", [\"vta\", \"arm_cpu\"])\n",
    "def test_conv2d(device):\n",
    "    def _run(env, remote):\n",
    "        if device == \"vta\":\n",
    "            target = env.target\n",
    "            if env.TARGET not in [\"sim\", \"tsim\"]:\n",
    "                assert tvm.runtime.enabled(\"rpc\")\n",
    "                program_fpga(remote, bitstream=None)\n",
    "                reconfig_runtime(remote)\n",
    "        elif device == \"arm_cpu\":\n",
    "            target = env.target_vta_cpu\n",
    "        with autotvm.tophub.context(target):  # load pre-tuned schedule parameters\n",
    "            for _, wl in mobilenet_wkls:\n",
    "                print(wl)\n",
    "                run_group_conv2d(env, remote, wl, target)\n",
    "\n",
    "    vta.testing.run(_run)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    test_conv2d(device=\"arm_cpu\")\n",
    "    test_conv2d(device=\"vta\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
