{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# memory-passes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "from tvm import te\n",
    "import numpy as np\n",
    "from tvm import relay"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def check_memory_plan(func, check_fn):\n",
    "    # Build Module\n",
    "    mod = tvm.IRModule().from_expr(func)\n",
    "\n",
    "    # Convert arguments.\n",
    "    args = []\n",
    "    for param in func.params:\n",
    "        param = param.type_annotation\n",
    "        sh = [int(sh) for sh in param.shape]\n",
    "        data = np.random.rand(*sh).astype(param.dtype)\n",
    "        args.append(tvm.nd.array(data))\n",
    "\n",
    "    # TODO(mbs): Why does the executor need to be shared? Seems wrong.\n",
    "    ex = relay.create_executor(\"vm\", mod)\n",
    "\n",
    "    # Compute without memory planning.\n",
    "    no_plan_result = ex.evaluate()(*args)\n",
    "\n",
    "    # Compute with memory planning.\n",
    "    with tvm.transform.PassContext(opt_level=1, disabled_pass=[\"MemoryPlan\"]):\n",
    "        plan_result = ex.evaluate()(*args)\n",
    "\n",
    "    # Compute Python result.\n",
    "    py_res = check_fn(*[arg.numpy() for arg in args])\n",
    "\n",
    "    # First check that the two VM results agree.\n",
    "    np.testing.assert_allclose(no_plan_result.numpy(), plan_result.numpy())\n",
    "\n",
    "    # Finally check that the results match the Python result.\n",
    "    np.testing.assert_allclose(plan_result.numpy(), py_res)\n",
    "\n",
    "\n",
    "def storage_type(mod):\n",
    "    return relay.TypeCall(mod.get_global_type_var(\"Storage\"), [])\n",
    "\n",
    "\n",
    "def test_tyck_alloc_storage():\n",
    "    mod = tvm.IRModule()\n",
    "    mod.import_from_std(\"core.rly\")\n",
    "\n",
    "\n",
    "def test_tyck_alloc_tensor():\n",
    "    mod = tvm.IRModule()\n",
    "    mod.import_from_std(\"core.rly\")\n",
    "    sto = relay.Var(\"x\", storage_type(mod))\n",
    "    sh = relay.const(np.array([1, 2]), dtype=\"int64\")\n",
    "    at = relay.op.memory.alloc_tensor(sto, relay.const(0, dtype=\"int64\"), sh)\n",
    "    mod[\"main\"] = relay.Function([sto], at)\n",
    "    relay.transform.InferType()(mod)\n",
    "\n",
    "\n",
    "def check_add(x):\n",
    "    return x + x\n",
    "\n",
    "\n",
    "def test_add():\n",
    "    x = relay.var(\"x\", shape=(2,))\n",
    "    z = x + x\n",
    "    func = relay.Function(\n",
    "        [\n",
    "            x,\n",
    "        ],\n",
    "        z,\n",
    "    )\n",
    "    check_memory_plan(func, check_add)\n",
    "\n",
    "\n",
    "def check_add_sub(x, y):\n",
    "    z = x + x\n",
    "    return z - y\n",
    "\n",
    "\n",
    "def test_add_sub():\n",
    "    x = relay.var(\"x\", shape=(10,))\n",
    "    y = relay.var(\"y\", shape=(10,))\n",
    "    z = x + x\n",
    "    z = z - y\n",
    "    func = relay.Function([x, y], z)\n",
    "    check_memory_plan(func, check_add_sub)\n",
    "\n",
    "\n",
    "def check_no_fuse(x, y, w):\n",
    "    z = x + y\n",
    "    return np.matmul(z, np.transpose(w))\n",
    "\n",
    "\n",
    "def test_no_fuse():\n",
    "    x = relay.var(\"x\", shape=(5, 1))\n",
    "    y = relay.var(\"y\", shape=(5, 1))\n",
    "    w = relay.var(\"w\", shape=(5, 1))\n",
    "    z = x + y\n",
    "    out = relay.op.nn.dense(z, w)\n",
    "    func = relay.Function([x, y, w], out)\n",
    "    check_memory_plan(func, check_no_fuse)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    test_tyck_alloc_tensor()\n",
    "    test_add()\n",
    "    test_add_sub()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
