{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# graph executor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "import tvm\n",
    "import json\n",
    "from tvm import relay\n",
    "from tvm.contrib import graph_executor\n",
    "from tvm.relay.op import add\n",
    "import tvm.testing\n",
    "from tvm.relay.testing import mlp\n",
    "from tvm import rpc\n",
    "from tvm.contrib import utils\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @tq, @jr should we put this in testing ns?\n",
    "def check_rts(expr, args, expected_result, mod=None):\n",
    "    \"\"\"\n",
    "    Check that evaluating `expr` applied to the arguments produces\n",
    "    `result` on both the evaluator and TVM runtime.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    expr:\n",
    "        The expression to evaluate\n",
    "\n",
    "    args: list of Expr\n",
    "        The arguments to supply the expr.\n",
    "\n",
    "    expected_result:\n",
    "        The expected result of running the expression.\n",
    "    \"\"\"\n",
    "    eval_result = relay.create_executor(\"debug\", mod=mod).evaluate(expr)(*args)\n",
    "    rts_result = relay.create_executor(\"graph\", mod=mod).evaluate(expr)(*args)\n",
    "    tvm.testing.assert_allclose(eval_result.numpy(), rts_result.numpy())\n",
    "    tvm.testing.assert_allclose(eval_result.numpy(), expected_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from unittest.mock import patch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_add_op_scalar():\n",
    "    \"\"\"\n",
    "    test_add_op_scalar:\n",
    "        fn (x, y) {\n",
    "            return x + y;\n",
    "        }\n",
    "    \"\"\"\n",
    "    x = relay.var(\"x\", shape=())  # Default to float32\n",
    "    y = relay.var(\"y\", shape=())  # Default to float32\n",
    "    func = relay.Function([x, y], add(x, y))\n",
    "    x_y_data = [\n",
    "        (np.array(10.0, dtype=\"float32\"), np.array(1.0, dtype=\"float32\")),\n",
    "        (np.float32(10.0), np.float32(1.0)),\n",
    "        (10.0, 1.0),\n",
    "    ]\n",
    "    for (x_data, y_data) in x_y_data:\n",
    "        check_rts(func, [x_data, y_data], x_data + y_data)\n",
    "\n",
    "\n",
    "def test_add_op_scalar_int():\n",
    "    \"\"\"\n",
    "    test_add_op_scalar_int:\n",
    "        fn (x, y) {\n",
    "            return x + y;\n",
    "        }\n",
    "    \"\"\"\n",
    "    x = relay.var(\"x\", shape=(), dtype=\"int32\")\n",
    "    y = relay.var(\"y\", shape=(), dtype=\"int32\")\n",
    "    func = relay.Function([x, y], add(x, y))\n",
    "    x_y_data = [\n",
    "        (np.array(10.0, dtype=\"int32\"), np.array(1.0, dtype=\"int32\")),\n",
    "        (np.int32(10), np.int32(1)),\n",
    "        (10, 1),\n",
    "    ]\n",
    "    for (x_data, y_data) in x_y_data:\n",
    "        check_rts(func, [x_data, y_data], x_data + y_data)\n",
    "\n",
    "\n",
    "def test_add_op_tensor():\n",
    "    \"\"\"\n",
    "    Program:\n",
    "        fn (x, y) {\n",
    "            return x + y;\n",
    "        }\n",
    "    \"\"\"\n",
    "    x = relay.var(\"x\", shape=(10, 5))\n",
    "    y = relay.var(\"y\", shape=(10, 5))\n",
    "    func = relay.Function([x, y], add(x, y))\n",
    "    x_data = np.random.rand(10, 5).astype(\"float32\")\n",
    "    y_data = np.random.rand(10, 5).astype(\"float32\")\n",
    "    check_rts(func, [x_data, y_data], x_data + y_data)\n",
    "\n",
    "\n",
    "def test_add_op_broadcast():\n",
    "    \"\"\"\n",
    "    Program:\n",
    "        fn (x, y) {\n",
    "            return x + y;\n",
    "        }\n",
    "    \"\"\"\n",
    "    x = relay.var(\"x\", shape=(10, 5))\n",
    "    y = relay.var(\"y\", shape=(1, 5))\n",
    "    func = relay.Function([x, y], add(x, y))\n",
    "    x_data = np.random.rand(10, 5).astype(\"float32\")\n",
    "    y_data = np.random.rand(1, 5).astype(\"float32\")\n",
    "    check_rts(func, [x_data, y_data], x_data + y_data)\n",
    "\n",
    "\n",
    "def test_with_params():\n",
    "    x = relay.var(\"x\", shape=(10, 5))\n",
    "    y = relay.var(\"y\", shape=(1, 5))\n",
    "    z = relay.add(x, y)\n",
    "    z = relay.exp(z)\n",
    "    func = relay.Function([x, y], z)\n",
    "    x_data = np.random.rand(10, 5).astype(\"float32\")\n",
    "    y_data = np.random.rand(1, 5).astype(\"float32\")\n",
    "    params = {\"y\": y_data}\n",
    "    graph, lib, params = relay.build(tvm.IRModule.from_expr(func), \"llvm\", params=params)\n",
    "    mod = graph_executor.create(graph, lib, device=tvm.cpu(0))\n",
    "    mod.set_input(**params)\n",
    "    mod.set_input(x=x_data)\n",
    "    mod.run()\n",
    "    res = mod.get_output(0).numpy()\n",
    "    ref_res = np.exp(y_data + x_data)\n",
    "    tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)\n",
    "\n",
    "\n",
    "def test_plan_memory():\n",
    "    # it is sufficient to cycle through two memories.\n",
    "\n",
    "    x = relay.var(\"x\", shape=(10,))\n",
    "    y = relay.var(\"x\", shape=(1,))\n",
    "    y2 = relay.exp(y)\n",
    "    z = relay.add(x, y2)\n",
    "    z = relay.exp(z)\n",
    "    z = relay.exp(z)\n",
    "    z = relay.exp(z)\n",
    "    z = relay.exp(z)\n",
    "    z = relay.exp(z)\n",
    "    func = relay.Function([x, y], z)\n",
    "    mod = tvm.IRModule.from_expr(func)\n",
    "    mod = relay.transform.InferType()(mod)\n",
    "    mod = relay.transform.FuseOps(0)(mod)\n",
    "    func = mod[\"main\"]\n",
    "    mod = relay.transform.InferType()(mod)\n",
    "    memory_plan = relay.backend._backend.GraphPlanMemory(func)\n",
    "    storage_ids = set()\n",
    "    device_types = set()\n",
    "    storage_sizes = {}\n",
    "\n",
    "    for k, v in memory_plan.expr_to_storage_info.items():\n",
    "        for x in v.storage_ids:\n",
    "            storage_ids.add(x)\n",
    "            storage_sizes[x] = v.storage_sizes\n",
    "        for x in v.device_types:\n",
    "            device_types.add(x)\n",
    "\n",
    "    # Current rule requires vars have unique storage id\n",
    "    # because we don't do inplace, we will need another\n",
    "    # two alternating temporary space.\n",
    "    assert len(storage_ids) == 4, f\"found storage_ids: {storage_ids}\"\n",
    "    assert len(device_types) == 1\n",
    "    assert len(storage_sizes) == 4\n",
    "\n",
    "    # Check the specific size of each sid\n",
    "    assert (\n",
    "        storage_sizes[0][0] == 40\n",
    "        and storage_sizes[1][0] == 4\n",
    "        and storage_sizes[2][0] == 4\n",
    "        and storage_sizes[3][0] == 40\n",
    "    )\n",
    "\n",
    "\n",
    "def test_plan_2d_memory():\n",
    "    \"\"\"Verification if GraphPlanMemory manages 2d memory reffered as\n",
    "    global.texture* memory scopes in json file.\"\"\"\n",
    "    global_virtual_device = tvm.target.VirtualDevice(memory_scope=\"global\")\n",
    "    texture_virtual_device = tvm.target.VirtualDevice(memory_scope=\"global.texture\")\n",
    "    metatable = {\n",
    "        \"VirtualDevice\": [\n",
    "            global_virtual_device,\n",
    "            texture_virtual_device,\n",
    "        ]\n",
    "    }\n",
    "\n",
    "    mod = tvm.relay.parse(\n",
    "        \"\"\"\n",
    "        #[version = \"0.0.5\"]\n",
    "        def @main(%data1: Tensor[(1, 32, 40, 40), float32],\n",
    "                  %data2: Tensor[(1, 32, 40, 40), float32]) {\n",
    "          %0 = fn (%a, Primitive=1) {\n",
    "            layout_transform(%a, src_layout=\"NCHW\", dst_layout=\"NCHW4c\")\n",
    "          };\n",
    "          %1 = %0(%data1);\n",
    "          %3 = %0(%data2);\n",
    "          %5 = fn (%a {virtual_device=meta[VirtualDevice][0]},  // global\n",
    "                   %b {virtual_device=meta[VirtualDevice][0]},  // global\n",
    "                   virtual_device=meta[VirtualDevice][1],       // texture\n",
    "                   Primitive=1) {\n",
    "            add(%a, %b)\n",
    "          };\n",
    "          %6 = %5(%1, %3);\n",
    "          %7 = fn (%a {virtual_device=meta[VirtualDevice][1]},  // texture\n",
    "                   %b {virtual_device=meta[VirtualDevice][0]},  // global\n",
    "                   virtual_device=meta[VirtualDevice][1],       // texture\n",
    "                   Primitive=1) {\n",
    "            add(%a, %b)\n",
    "          };\n",
    "          %8 = %7(%6, %3);\n",
    "          %9 = fn (%a {virtual_device=meta[VirtualDevice][1]},  // texture\n",
    "                   %b {virtual_device=meta[VirtualDevice][1]},  // texture\n",
    "                   virtual_device=meta[VirtualDevice][1],       // texture\n",
    "                   Primitive=1) {\n",
    "            add(%a, %b)\n",
    "          };\n",
    "          %10 = %9(%8, %6);\n",
    "          %11 = fn (%a,\n",
    "                    virtual_device=meta[VirtualDevice][0],      // global\n",
    "                    Primitive=1) {\n",
    "            layout_transform(%a, src_layout=\"NCHW4c\", dst_layout=\"NCHW\")\n",
    "          };\n",
    "          %11(%10)\n",
    "        }\n",
    "        \"\"\",\n",
    "        \"from_string\",\n",
    "        None,\n",
    "        metatable,\n",
    "    )\n",
    "\n",
    "    GPU_DEVICE = tvm.device(\"cuda\")\n",
    "    HOST_TARGET = tvm.target.Target(\"llvm\")\n",
    "    GPU_TARGET = tvm.target.Target(\"cuda\").with_host(HOST_TARGET)\n",
    "    GPU = tvm.target.VirtualDevice(GPU_DEVICE, GPU_TARGET)  # device_type=2\n",
    "    CTXT = tvm.transform.PassContext(config={\"relay.fallback_device_type\": GPU.device_type_int})\n",
    "    config = tvm.target.make_compilation_config(CTXT, GPU_TARGET)\n",
    "    mod = relay.transform.InferType()(mod)\n",
    "    # PlanDevices should succeed.\n",
    "    mod = relay.transform.PlanDevices(config)(mod)\n",
    "\n",
    "    func = mod[\"main\"]\n",
    "    memory_plan = relay.backend._backend.GraphPlanMemory(func)\n",
    "    virtual_devices = {}\n",
    "\n",
    "    # We do not have execution ordered information, the only order that we can stick\n",
    "    # in this place - storage_id\n",
    "    # for above graph we know that\n",
    "    # We have\n",
    "    #  - 8 manageable storages for above graph\n",
    "    #  - 5 of them are buffers\n",
    "    #  - 3 of them are textures (2d storages)\n",
    "    #  - 1 of buffer will be reused, since we have storage id maped data, we will have 4th\n",
    "    #      storage id reuesed and hidden in virtual_devices map\n",
    "    #  - no textures are reused so far\n",
    "    for k, v in memory_plan.expr_to_storage_info.items():\n",
    "        virtual_devices[v.storage_ids[0]] = v.virtual_devices[0].memory_scope\n",
    "\n",
    "    # Check the scopes according to abvoce expectaions\n",
    "    assert (\n",
    "        virtual_devices[0] == \"global\"\n",
    "        and virtual_devices[1] == \"global\"\n",
    "        and virtual_devices[2] == \"global\"\n",
    "        and virtual_devices[3] == \"global\"\n",
    "        and virtual_devices[4] == \"global.texture\"\n",
    "        and virtual_devices[5] == \"global.texture\"\n",
    "        and virtual_devices[6] == \"global.texture\"\n",
    "    )\n",
    "\n",
    "\n",
    "def test_reshape_nop():\n",
    "    # test that reshape can be turned into nop\n",
    "    x = relay.var(\"x\", shape=(10, 4))\n",
    "    xx = relay.abs(x)\n",
    "    y = relay.expand_dims(xx, axis=1)\n",
    "    t0 = relay.reshape(y, (1, 40))\n",
    "    t1 = relay.abs(y)\n",
    "\n",
    "    z0 = relay.reshape(t0, (2, 20))\n",
    "    z1 = relay.sqrt(t1)\n",
    "    z2 = relay.reshape(t1, (1, 40))\n",
    "\n",
    "    func = relay.Function([x], relay.Tuple([z0, z1, z2]))\n",
    "    x_data = np.random.rand(10, 4).astype(\"float32\")\n",
    "    graph = relay.build(tvm.IRModule.from_expr(func), \"llvm\")\n",
    "    graph_json_str = graph.get_graph_json()\n",
    "\n",
    "    graph_json = json.loads(graph_json_str)\n",
    "\n",
    "    # reshape must force sharing memory\n",
    "    storage_ids = graph_json[\"attrs\"][\"storage_id\"][1]\n",
    "    assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)\n",
    "    assert graph_json[\"nodes\"][2][\"attrs\"][\"func_name\"] == \"__nop\"\n",
    "    assert graph_json[\"nodes\"][5][\"attrs\"][\"func_name\"] == \"__nop\"\n",
    "\n",
    "    gmod = graph_executor.GraphModule(graph[\"default\"](tvm.cpu(0)))\n",
    "\n",
    "    gmod.set_input(x=x_data)\n",
    "    gmod.run()\n",
    "    z0_np = x_data.reshape(2, 20)\n",
    "    z1_np = np.sqrt(\n",
    "        np.abs(\n",
    "            x_data.reshape(\n",
    "                10,\n",
    "                1,\n",
    "                4,\n",
    "            )\n",
    "        )\n",
    "    )\n",
    "    z2_np = np.abs(x_data).reshape(1, 40)\n",
    "    tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)\n",
    "    tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)\n",
    "    tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)\n",
    "\n",
    "\n",
    "@tvm.testing.uses_gpu\n",
    "def test_gru_like():\n",
    "    def unit(rnn_dim):\n",
    "        X = relay.var(\"X\", shape=(1, rnn_dim))\n",
    "        W = relay.var(\"y\", shape=(3 * rnn_dim, rnn_dim))\n",
    "        matmul = relay.nn.dense(X, W)\n",
    "        splitted = relay.split(matmul, indices_or_sections=3, axis=1)\n",
    "        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])\n",
    "        return relay.Function([X, W], out)\n",
    "\n",
    "    def sigmoid(x):\n",
    "        return 1 / (1 + np.exp(-x))\n",
    "\n",
    "    def unit_numpy(X, W):\n",
    "        prod = np.dot(X, W.transpose())\n",
    "        splits = np.split(prod, indices_or_sections=3, axis=1)\n",
    "        return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])\n",
    "\n",
    "    dtype = \"float32\"\n",
    "    rnn_dim = 1000\n",
    "    x = np.random.rand(1, rnn_dim).astype(dtype)\n",
    "    y = np.random.rand(3 * rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005\n",
    "    out_shape = (1, rnn_dim)\n",
    "    z = unit(rnn_dim)\n",
    "\n",
    "    for target, dev in tvm.testing.enabled_targets():\n",
    "        with tvm.transform.PassContext(opt_level=2):\n",
    "            graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target)\n",
    "            m = graph_executor.create(graph, lib, dev)\n",
    "            m.set_input(\"X\", tvm.nd.array(x.astype(dtype)))\n",
    "            m.set_input(\"y\", tvm.nd.array(y.astype(dtype)))\n",
    "            m.set_input(**params)\n",
    "            m.run()\n",
    "            out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()\n",
    "            ref = unit_numpy(x, y)\n",
    "            tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)\n",
    "\n",
    "\n",
    "def test_compile_nested_tuples():\n",
    "    x = relay.var(\"x\", shape=(10,))\n",
    "    x1 = x + relay.const(1.0)\n",
    "    x2 = x1 + relay.const(1.0)\n",
    "    x3 = x2 + relay.const(1.0)\n",
    "    x4 = x3 + relay.const(1.0)\n",
    "    out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])])\n",
    "    func = relay.Function([x], out)\n",
    "\n",
    "    graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), \"llvm\")\n",
    "    mod = graph_executor.create(graph, lib, device=tvm.cpu(0))\n",
    "\n",
    "    x_data = np.random.uniform(size=(10,)).astype(np.float32)\n",
    "    mod.set_input(x=x_data)\n",
    "    mod.run()\n",
    "\n",
    "    assert mod.get_num_outputs() == 4\n",
    "\n",
    "    ref = x_data + 1\n",
    "    for i in range(mod.get_num_outputs()):\n",
    "        out = mod.get_output(i).numpy()\n",
    "        tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)\n",
    "        ref = ref + 1\n",
    "\n",
    "\n",
    "def test_compile_return_empty_tuple():\n",
    "    x = relay.var(\"x\", shape=[16], dtype=\"float32\")\n",
    "    mod = tvm.IRModule.from_expr(relay.Function([x], relay.Tuple([])))\n",
    "    graph, lib, _ = relay.build(mod, \"llvm\")\n",
    "    mod = graph_executor.create(graph, lib, device=tvm.cpu(0))\n",
    "    mod.run()\n",
    "\n",
    "\n",
    "@tvm.testing.uses_gpu\n",
    "def test_compile_fused_identity_cast():\n",
    "    # a fused function that would optimized to identity\n",
    "    x = relay.var(\"x\", shape=[16], dtype=\"float32\")\n",
    "    y = relay.cast(x, \"float32\")\n",
    "    func1 = relay.Function([x], y).with_attr(\"Primitive\", 1)\n",
    "\n",
    "    # a fused function with param pass-through\n",
    "    x = relay.var(\"x\", shape=[16], dtype=\"float32\")\n",
    "    y = relay.add(x, relay.const(3.14, \"float32\"))\n",
    "    func2 = relay.Function([x], relay.Tuple([x, y])).with_attr(\"Primitive\", 1)\n",
    "\n",
    "    x_global = relay.var(\"xx\", shape=[16], dtype=\"float32\")\n",
    "    tup = func2(x_global)\n",
    "    y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))\n",
    "\n",
    "    mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))\n",
    "    for target, device in tvm.testing.enabled_targets():\n",
    "        with tvm.transform.PassContext(opt_level=2):\n",
    "            graph, lib, _ = relay.build(mod, target=target)\n",
    "            executor = graph_executor.create(graph, lib, device=device)\n",
    "            executor.run()\n",
    "\n",
    "\n",
    "def test_graph_executor_nested_tuples():\n",
    "    x, y, z, w = [relay.var(c, shape=(2, 3), dtype=\"float32\") for c in \"xyzw\"]\n",
    "    out = relay.Tuple([x, relay.Tuple([y, relay.Tuple([z, w])])])\n",
    "    func = relay.Function([x, y, z, w], out)\n",
    "\n",
    "    f = relay.create_executor(\n",
    "        kind=\"graph\", mod=tvm.IRModule.from_expr(func), device=tvm.cpu(0), target=\"llvm\"\n",
    "    ).evaluate()\n",
    "\n",
    "    data = [np.random.uniform(size=(2, 3)).astype(\"float32\") for _ in \"xyzw\"]\n",
    "    out = f(*data)\n",
    "    assert len(out) == 2\n",
    "    tvm.testing.assert_allclose(out[0].numpy(), data[0])\n",
    "    assert len(out[1]) == 2\n",
    "    tvm.testing.assert_allclose(out[1][0].numpy(), data[1])\n",
    "    assert len(out[1][1]) == 2\n",
    "    tvm.testing.assert_allclose(out[1][1][0].numpy(), data[2])\n",
    "    tvm.testing.assert_allclose(out[1][1][1].numpy(), data[3])\n",
    "\n",
    "\n",
    "def test_graph_executor_api():\n",
    "    dname_0, dname_1 = \"data_0\", \"data_1\"\n",
    "    data_0, data_1 = [relay.var(c, shape=(1, 1), dtype=\"float32\") for c in [dname_0, dname_1]]\n",
    "    net = relay.add(data_0, data_1)\n",
    "    func = relay.Function((data_0, data_1), net)\n",
    "\n",
    "    lib = relay.build(tvm.IRModule.from_expr(func), \"llvm\")\n",
    "    mod = graph_executor.GraphModule(lib[\"default\"](tvm.cpu(0)))\n",
    "\n",
    "    assert mod.get_input_index(dname_1) == 1\n",
    "    assert mod.get_input_index(dname_0) == 0\n",
    "    assert mod.get_input_index(\"Invalid\") == -1\n",
    "\n",
    "    shape_dict, dtype_dict = mod.get_input_info()\n",
    "    assert isinstance(shape_dict, tvm.container.Map)\n",
    "    assert isinstance(dtype_dict, tvm.container.Map)\n",
    "    for data in [data_0, data_1]:\n",
    "        name = data.name_hint\n",
    "        ty = data.type_annotation\n",
    "        # verify shape\n",
    "        assert name in shape_dict\n",
    "        assert isinstance(shape_dict[name], tvm.runtime.container.ShapeTuple)\n",
    "        assert shape_dict[name] == tvm.runtime.container.ShapeTuple([i.value for i in ty.shape])\n",
    "        # verify dtype\n",
    "        assert name in dtype_dict\n",
    "        assert isinstance(dtype_dict[name], tvm.runtime.container.String)\n",
    "        assert dtype_dict[name] == ty.dtype\n",
    "\n",
    "\n",
    "@tvm.testing.requires_llvm\n",
    "def test_benchmark():\n",
    "    mod, params = mlp.get_workload(1)\n",
    "    lib = relay.build(mod, target=\"llvm\", params=params)\n",
    "    exe = graph_executor.create(lib.get_graph_json(), lib.lib, tvm.cpu())\n",
    "    data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype(\"float32\"))\n",
    "    result = exe.benchmark(tvm.cpu(), data=data, func_name=\"run\", repeat=2, number=1)\n",
    "    assert result.mean == result.median\n",
    "    assert result.mean > 0\n",
    "    assert len(result.results) == 2\n",
    "\n",
    "    with patch.object(\n",
    "        tvm.runtime.module.Module,\n",
    "        \"time_evaluator\",\n",
    "        return_value=lambda: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),\n",
    "    ) as method:\n",
    "        result = exe.benchmark(tvm.cpu(), data=data, func_name=\"run\", repeat=2, number=1)\n",
    "        assert result.mean == 2.5\n",
    "        assert result.median == 2.0\n",
    "        assert result.max == 5\n",
    "        assert result.min == 1\n",
    "        assert result.std == 1.5\n",
    "\n",
    "\n",
    "@tvm.testing.parametrize_targets(\"cuda\", \"llvm\")\n",
    "def test_benchmark_end_to_end(dev, target):\n",
    "    mod, params = mlp.get_workload(1)\n",
    "    lib = relay.build(mod, target=target, params=params)\n",
    "    exe = graph_executor.create(lib.get_graph_json(), lib.lib, dev)\n",
    "    data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype(\"float32\"))\n",
    "    result = exe.benchmark(dev, data=data, func_name=\"run\", repeat=2, number=1, end_to_end=True)\n",
    "    assert result.mean > 0\n",
    "    assert len(result.results) == 2\n",
    "\n",
    "\n",
    "@tvm.testing.requires_cuda\n",
    "def test_benchmark_end_to_end_rpc():\n",
    "    server = rpc.Server(\"127.0.0.1\")\n",
    "    remote = rpc.connect(server.host, server.port)\n",
    "\n",
    "    mod, params = mlp.get_workload(1)\n",
    "    lib = relay.build(mod, target=\"cuda\", params=params)\n",
    "\n",
    "    temp = utils.tempdir()\n",
    "    path = temp.relpath(\"library.so\")\n",
    "    lib.export_library(path)\n",
    "    remote.upload(path)\n",
    "    rlib = remote.load_module(\"library.so\")\n",
    "\n",
    "    dev = remote.device(\"cuda\")\n",
    "    exe = graph_executor.create(lib.get_graph_json(), rlib, dev)\n",
    "\n",
    "    data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype(\"float32\"), device=dev)\n",
    "    result = exe.benchmark(dev, data=data, func_name=\"run\", repeat=2, number=1, end_to_end=True)\n",
    "    assert result.mean > 0\n",
    "    assert len(result.results) == 2\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
