{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Relay 后端解释器\n",
    "\n",
    "参考：`tvm/tests/python/relay/test_backend_interpreter.py`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/media/pc/data/lxw/ai/tvm-book/doc/read/relay\n"
     ]
    }
   ],
   "source": [
    "%cd ..\n",
    "import testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tvm\n",
    "from tvm.testing import device_enabled\n",
    "from tvm import nd\n",
    "from tvm import relay\n",
    "from tvm.runtime import container\n",
    "from tvm.relay.backend.interpreter import RefValue, ConstructorValue\n",
    "from tvm.relay.scope_builder import ScopeBuilder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):\n",
    "    # TODO(tqchen) add more types once the schedule register is fixed.\n",
    "    for target in [\"llvm\"]:\n",
    "        dev = tvm.device(target, 0)\n",
    "        if not device_enabled(target):\n",
    "            return\n",
    "        func = relay.create_executor(mod=mod, device=dev, target=target).evaluate(expr)\n",
    "        result = func if args is None else func(*args)\n",
    "        # use testing which also set atol\n",
    "        np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def test_tuple_value():\n",
    "    tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)])\n",
    "    np.testing.assert_allclose(tv[0].data.numpy(), 1)\n",
    "    np.testing.assert_allclose(tv[1].data.numpy(), 2)\n",
    "    np.testing.assert_allclose(tv[2].data.numpy(), 3)\n",
    "\n",
    "\n",
    "def test_tuple_getitem():\n",
    "    two = relay.add(relay.const(1), relay.const(1))\n",
    "    func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))\n",
    "    check_eval(func, [], 1)\n",
    "\n",
    "\n",
    "def test_id():\n",
    "    x = relay.var(\"x\", \"float32\")\n",
    "    ident = relay.Function([x], x)\n",
    "    one = np.array(1.0, \"float32\")\n",
    "    check_eval(ident, [one], one)\n",
    "\n",
    "\n",
    "def test_add_const():\n",
    "    two = relay.add(relay.const(1), relay.const(1))\n",
    "    func = relay.Function([], two)\n",
    "    check_eval(func, [], 2)\n",
    "\n",
    "\n",
    "def test_mul_param():\n",
    "    x = relay.var(\"x\", shape=(10, 10))\n",
    "    y = relay.var(\"y\", shape=(1, 10))\n",
    "    func = relay.Function([x, y], relay.multiply(x, y))\n",
    "    x_data = np.random.rand(10, 10).astype(\"float32\")\n",
    "    y_data = np.random.rand(1, 10).astype(\"float32\")\n",
    "    check_eval(func, [x_data, y_data], x_data * y_data)\n",
    "\n",
    "\n",
    "def test_equal():\n",
    "    i = relay.var(\"i\", shape=[], dtype=\"int32\")\n",
    "    j = relay.var(\"i\", shape=[], dtype=\"int32\")\n",
    "    z = relay.equal(i, j)\n",
    "    func = relay.Function([i, j], z, ret_type=relay.TensorType([], \"bool\"))\n",
    "    i_data = relay.const(0, \"int32\")\n",
    "    j_data = relay.const(0, \"int32\")\n",
    "    check_eval(func, [i_data, j_data], True)\n",
    "\n",
    "\n",
    "def test_subtract():\n",
    "    i = relay.var(\"i\", shape=[], dtype=\"int32\")\n",
    "    sub = relay.subtract(i, relay.const(1, dtype=\"int32\"))\n",
    "    func = relay.Function([i], sub, ret_type=relay.TensorType([], \"int32\"))\n",
    "    i_data = np.array(1, dtype=\"int32\")\n",
    "    check_eval(func, [i_data], 0)\n",
    "\n",
    "\n",
    "def test_simple_loop():\n",
    "    mod = tvm.IRModule({})\n",
    "    sum_up = relay.GlobalVar(\"sum_up\")\n",
    "    i = relay.var(\"i\", shape=[], dtype=\"int32\")\n",
    "    sb = ScopeBuilder()\n",
    "    with sb.if_scope(relay.equal(i, relay.const(0, dtype=\"int32\"))):\n",
    "        sb.ret(i)\n",
    "    with sb.else_scope():\n",
    "        one_less = relay.subtract(i, relay.const(1, dtype=\"int32\"))\n",
    "        rec_call = relay.Call(sum_up, [one_less])\n",
    "        sb.ret(relay.add(rec_call, i))\n",
    "    func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], \"int32\"))\n",
    "    mod[sum_up] = func\n",
    "    i_data = np.array(10, dtype=\"int32\")\n",
    "    check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)\n",
    "\n",
    "\n",
    "def test_loop():\n",
    "    mod = tvm.IRModule({})\n",
    "    sum_up = relay.GlobalVar(\"sum_up\")\n",
    "    i = relay.var(\"i\", shape=[], dtype=\"int32\")\n",
    "    accum = relay.var(\"accum\", shape=[], dtype=\"int32\")\n",
    "    sb = ScopeBuilder()\n",
    "    with sb.if_scope(relay.equal(i, relay.const(0, \"int32\"))):\n",
    "        sb.ret(accum)\n",
    "    with sb.else_scope():\n",
    "        one_less = relay.subtract(i, relay.const(1, \"int32\"))\n",
    "        new_accum = relay.add(accum, i)\n",
    "        sb.ret(relay.Call(sum_up, [one_less, new_accum]))\n",
    "    func = relay.Function([i, accum], sb.get())\n",
    "    mod[sum_up] = func\n",
    "    i_data = np.array(10, dtype=\"int32\")\n",
    "    accum_data = np.array(0, dtype=\"int32\")\n",
    "    check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)\n",
    "\n",
    "\n",
    "def test_ref():\n",
    "    mod = tvm.IRModule()\n",
    "    three_with_ref = relay.GlobalVar(\"three_with_ref\")\n",
    "    i = relay.Var(\"i\")\n",
    "    iv = relay.Var(\"iv\")\n",
    "    u = relay.Var(\"u\")\n",
    "    uv = relay.Var(\"uv\")\n",
    "    body = relay.add(iv, uv)\n",
    "    body = relay.Let(uv, relay.RefRead(i), body)\n",
    "    body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)\n",
    "    body = relay.Let(iv, relay.RefRead(i), body)\n",
    "    body = relay.Let(i, relay.RefCreate(relay.const(1)), body)\n",
    "    mod[three_with_ref] = relay.Function([], body)\n",
    "    check_eval(three_with_ref, [], 3, mod=mod)\n",
    "\n",
    "\n",
    "def test_binds():\n",
    "    x = relay.var(\"x\")\n",
    "    y = relay.add(x, x)\n",
    "    xx = np.ones((10, 20))\n",
    "    res = relay.create_executor().evaluate(y, binds={x: xx}).numpy()\n",
    "    np.testing.assert_allclose(xx + xx, res)\n",
    "\n",
    "\n",
    "def test_kwargs_params():\n",
    "    x = relay.var(\"x\", shape=(1, 10))\n",
    "    y = relay.var(\"y\", shape=(1, 10))\n",
    "    z = relay.var(\"z\", shape=(1, 10))\n",
    "    f = relay.Function([x, y, z], x + y + z)\n",
    "    x_data = np.random.rand(1, 10).astype(\"float32\")\n",
    "    y_data = np.random.rand(1, 10).astype(\"float32\")\n",
    "    z_data = np.random.rand(1, 10).astype(\"float32\")\n",
    "    params = {\"y\": y_data, \"z\": z_data}\n",
    "    res = relay.create_executor().evaluate(f)(x_data, **params)\n",
    "    np.testing.assert_allclose(res.numpy(), x_data + y_data + z_data)\n",
    "\n",
    "\n",
    "def test_function_taking_adt_ref_tuple():\n",
    "    mod = tvm.IRModule()\n",
    "    prelude = relay.prelude.Prelude(mod)\n",
    "    _, cons, nil = prelude.mod.get_type(\"List\")\n",
    "\n",
    "    nil_value = ConstructorValue(nil.tag, [], nil)\n",
    "    cons_value = ConstructorValue(\n",
    "        cons.tag,\n",
    "        [nd.array(np.random.rand(1, 10).astype(\"float32\")), nil_value],\n",
    "        cons,\n",
    "    )\n",
    "\n",
    "    ref_value = RefValue(nd.array(np.random.rand(1, 10).astype(\"float32\")))\n",
    "    tuple_value = container.tuple_object(\n",
    "        [nd.array(np.random.rand(1, 10).astype(\"float32\")) for _ in range(10)]\n",
    "    )\n",
    "\n",
    "    id_func = relay.create_executor(mod=mod).evaluate(prelude.id)\n",
    "\n",
    "    res_nil = id_func(nil_value)\n",
    "    assert res_nil.tag == nil_value.tag\n",
    "    assert len(res_nil.fields) == 0\n",
    "\n",
    "    res_cons = id_func(cons_value)\n",
    "    assert res_cons.tag == cons_value.tag\n",
    "    assert len(res_cons.fields) == len(cons_value.fields)\n",
    "    np.testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy())\n",
    "    assert isinstance(res_cons.fields[1], ConstructorValue)\n",
    "    assert res_cons.fields[1].tag == nil.tag\n",
    "    assert len(res_cons.fields[1].fields) == 0\n",
    "\n",
    "    res_ref = id_func(ref_value)\n",
    "    np.testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())\n",
    "\n",
    "    res_tuple = id_func(tuple_value)\n",
    "    for i in range(10):\n",
    "        np.testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy())\n",
    "\n",
    "\n",
    "def test_tuple_passing():\n",
    "    x = relay.var(\n",
    "        \"x\",\n",
    "        type_annotation=relay.ty.TupleType(\n",
    "            [relay.ty.TensorType((), \"int64\"), relay.ty.TensorType((), \"int64\")]\n",
    "        ),\n",
    "    )\n",
    "\n",
    "    fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))\n",
    "    mod = tvm.IRModule({})\n",
    "    gv = relay.GlobalVar(\"main\")\n",
    "    mod[gv] = fn\n",
    "    mod = relay.transform.InferType()(mod)\n",
    "\n",
    "    dev = tvm.cpu()\n",
    "    target = tvm.target.Target(\"llvm\")\n",
    "    f = relay.create_executor(mod=mod, device=dev, target=target).evaluate(gv)\n",
    "    # First use a Python tuple.\n",
    "    out = f((10, 8))\n",
    "    np.testing.assert_allclose(out.numpy(), np.array(10))\n",
    "    # Second use a tuple value.\n",
    "    value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])\n",
    "    out = f(value_tuple)\n",
    "    np.testing.assert_allclose(out.numpy(), np.array(11))\n",
    "\n",
    "\n",
    "def test_dynamic():\n",
    "    n = 3\n",
    "    m = 2\n",
    "    x = relay.Var(\"x\", relay.TensorType([relay.Any(), m], \"float32\"))\n",
    "    y = relay.Var(\"y\", relay.TensorType([relay.Any(), m], \"float32\"))\n",
    "    xx = x - relay.expr.const(3.0)\n",
    "    yy = y * relay.expr.const(5.0)\n",
    "    z = relay.op.concatenate([xx, yy], axis=0)\n",
    "    mod = tvm.IRModule()\n",
    "    mod[\"main\"] = relay.Function([x, y], z)\n",
    "    x_np = np.random.uniform(size=(n, m)).astype(\"float32\")\n",
    "    y_np = np.random.uniform(size=(n, m)).astype(\"float32\")\n",
    "    expected = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)\n",
    "    check_eval(None, [x_np, y_np], expected, mod)\n",
    "\n",
    "\n",
    "def test_ref_global_from_expr():\n",
    "    n = 3\n",
    "    x = relay.Var(\"x\", relay.TensorType([n], \"float32\"))\n",
    "    y = relay.Var(\"y\", relay.TensorType([n], \"float32\"))\n",
    "    mod = tvm.IRModule()\n",
    "    mod[\"add\"] = relay.Function([x, y], relay.add(x, y))\n",
    "    x_np = np.random.uniform(size=(n,)).astype(\"float32\")\n",
    "    y_np = np.random.uniform(size=(n,)).astype(\"float32\")\n",
    "    expected = np.add(x_np, y_np)\n",
    "    expr = relay.Call(mod.get_global_var(\"add\"), [relay.const(x_np), relay.const(y_np)])\n",
    "    check_eval(expr, None, expected, mod)\n",
    "\n",
    "\n",
    "def test_keyword_args():\n",
    "    n = 3\n",
    "    x = relay.Var(\"x\", relay.TensorType([n], \"float32\"))\n",
    "    y = relay.Var(\"y\", relay.TensorType([n], \"float32\"))\n",
    "    z = relay.add(x, y)\n",
    "    mod = tvm.IRModule()\n",
    "    mod[\"main\"] = relay.Function([x, y], z)\n",
    "    x_np = np.random.uniform(size=(n,)).astype(\"float32\")\n",
    "    y_np = np.random.uniform(size=(n,)).astype(\"float32\")\n",
    "    expected = np.add(x_np, y_np)\n",
    "    actual = relay.create_executor(mod=mod).evaluate()(y=y_np, x=x_np)\n",
    "    np.testing.assert_allclose(actual.numpy(), expected)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "'Object' object is not callable",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[5], line 7\u001b[0m\n\u001b[1;32m      5\u001b[0m c \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39mrand(n)\u001b[38;5;241m.\u001b[39mastype(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfloat32\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m      6\u001b[0m result1, result2 \u001b[38;5;241m=\u001b[39m relay\u001b[38;5;241m.\u001b[39mcreate_executor()\u001b[38;5;241m.\u001b[39mevaluate(t)\n\u001b[0;32m----> 7\u001b[0m np\u001b[38;5;241m.\u001b[39mtesting\u001b[38;5;241m.\u001b[39massert_allclose(\u001b[43mresult1\u001b[49m\u001b[43m(\u001b[49m\u001b[43mc\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mnumpy(), c)\n\u001b[1;32m      8\u001b[0m np\u001b[38;5;241m.\u001b[39mtesting\u001b[38;5;241m.\u001b[39massert_allclose(result2(c)\u001b[38;5;241m.\u001b[39mnumpy(), c)\n",
      "\u001b[0;31mTypeError\u001b[0m: 'Object' object is not callable"
     ]
    }
   ],
   "source": [
    "# TODO(mbs): Support? Would help reduce wasted work when we need to prepare\n",
    "# multiple functions w.r.t. the same module.\n",
    "# \"closures are currently not directly Python callable\"\n",
    "def test_functional_returns():\n",
    "    n = 3\n",
    "    x = relay.Var(\"x\", relay.TensorType([n], \"float32\"))\n",
    "    f = relay.Function([x], x)\n",
    "    t = relay.Tuple([f, f])\n",
    "    c = np.random.rand(n).astype(\"float32\")\n",
    "    result1, result2 = relay.create_executor().evaluate(t)\n",
    "    testing.assert_allclose(result1(c).numpy(), c)\n",
    "    testing.assert_allclose(result2(c).numpy(), c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312x",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
