{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "649b75bd",
   "metadata": {},
   "source": [
    "# 测试优化器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1abe5348",
   "metadata": {
    "tags": [
     "hide-cell"
    ]
   },
   "outputs": [],
   "source": [
    "from typing import Callable\n",
    "\n",
    "import numpy as np\n",
    "import tvm\n",
    "from tvm import relax\n",
    "from tvm import IRModule\n",
    "from tvm.relax.training.optimizer import Adam, SGD, MomentumSGD\n",
    "from tvm.script.parser import relax as R\n",
    "from tvm.runtime.relax_vm import VirtualMachine\n",
    "from tvm.testing import assert_allclose\n",
    "\n",
    "def _legalize_and_build(mod: IRModule, target, dev):\n",
    "    ex = tvm.compile(mod, target)\n",
    "    vm = VirtualMachine(ex, dev)\n",
    "    return vm\n",
    "\n",
    "\n",
    "def _numpy_to_tvm(data):\n",
    "    if isinstance(data, (list, tuple)):\n",
    "        return [_numpy_to_tvm(_data) for _data in data]\n",
    "    return tvm.nd.array(data)\n",
    "\n",
    "\n",
    "def _tvm_to_numpy(data):\n",
    "    if isinstance(data, (list, tuple, tvm.ir.Array)):\n",
    "        return [_tvm_to_numpy(_data) for _data in data]\n",
    "    return data.numpy()\n",
    "\n",
    "\n",
    "def _assert_allclose_nested(data1, data2):\n",
    "    if isinstance(data1, (list, tuple)):\n",
    "        assert isinstance(data2, (list, tuple))\n",
    "        assert len(data1) == len(data2)\n",
    "        for x, y in zip(data1, data2):\n",
    "            _assert_allclose_nested(x, y)\n",
    "    else:\n",
    "        assert_allclose(data1, data2)\n",
    "\n",
    "\n",
    "def _assert_run_result_same(tvm_func: Callable, np_func: Callable, np_inputs: list):\n",
    "    result = _tvm_to_numpy(tvm_func(*[_numpy_to_tvm(i) for i in np_inputs]))\n",
    "    expected = np_func(*np_inputs)\n",
    "    _assert_allclose_nested(result, expected)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8acd047c",
   "metadata": {},
   "outputs": [],
   "source": [
    "@tvm.testing.parametrize_targets(\"llvm\")\n",
    "def _test_optimizer(target, dev, np_func, opt_type, *args, **kwargs):\n",
    "    # 创建两个待优化的张量变量（3x3矩阵和3维向量）\n",
    "    x = relax.Var(\"x\", R.Tensor((3, 3), \"float32\"))\n",
    "    y = relax.Var(\"y\", R.Tensor((3,), \"float32\"))\n",
    "    \n",
    "    # 初始化优化器（如SGD/Adam等），参数通过*args,**kwargs传递\n",
    "    opt = opt_type(*args, **kwargs).init([x, y])\n",
    "    \n",
    "    # 将优化器的更新函数编译为可执行模块\n",
    "    mod = IRModule.from_expr(opt.get_function().with_attr(\"global_symbol\", \"main\"))\n",
    "    tvm_func = _legalize_and_build(mod, target, dev)[\"main\"]\n",
    "\n",
    "    # 生成随机测试数据\n",
    "    param_arr = [np.random.rand(3, 3).astype(np.float32), # 参数初始值\n",
    "                np.random.rand(3).astype(np.float32)]\n",
    "    grad_arr = [np.random.rand(3, 3).astype(np.float32),  # 梯度初始值\n",
    "               np.random.rand(3).astype(np.float32)]\n",
    "    state_arr = _tvm_to_numpy(opt.state)  # 优化器状态转换\n",
    "\n",
    "    # 对比TVM实现与NumPy参考实现的输出结果\n",
    "    _assert_run_result_same(tvm_func, np_func, [param_arr, grad_arr, state_arr])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0da69051",
   "metadata": {},
   "outputs": [],
   "source": [
    "args = (\n",
    "    (0.01, 0),\n",
    "    (0.01, 0.02),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c87ee5a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "target = \"llvm\"\n",
    "dev = tvm.device(target, 0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5c9d9bc1",
   "metadata": {},
   "source": [
    "## 测试 SGD 优化器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "12f8172c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def np_func(param_tuple, grad_tuple, state_tuple,):\n",
    "    num_steps = state_tuple[0]\n",
    "    param_tuple_new, state_tuple_new = [], []\n",
    "    state_tuple_new.append(num_steps + 1)\n",
    "    for i in range(len(param_tuple)):\n",
    "        param = param_tuple[i]\n",
    "        grad = grad_tuple[i]\n",
    "        param_tuple_new.append(param - lr * (grad + weight_decay * param))\n",
    "    return param_tuple_new, state_tuple_new\n",
    "\n",
    "for lr, weight_decay in args:\n",
    "    _test_optimizer(target, dev, np_func, SGD, lr, weight_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "26d351b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "lr, momentum, dampening, weight_decay, nesterov = tvm.testing.parameters(\n",
    "    (0.01, 0.9, 0, 0, False),\n",
    "    (0.01, 0.9, 0.85, 0.02, False),\n",
    "    (0.01, 0.9, 0.85, 0.02, True),\n",
    ")\n",
    "\n",
    "\n",
    "@tvm.testing.parametrize_targets(\"llvm\")\n",
    "def test_momentum_sgd(target, dev, lr, momentum, dampening, weight_decay, nesterov):\n",
    "    def np_func(param_tuple, grad_tuple, state_tuple):\n",
    "        num_steps = state_tuple[0]\n",
    "        param_tuple_new, state_tuple_new = [], []\n",
    "        state_tuple_new.append(num_steps + 1)\n",
    "\n",
    "        for i in range(len(param_tuple)):\n",
    "            param = param_tuple[i]\n",
    "            grad = grad_tuple[i]\n",
    "            velocity = state_tuple[i + 1]\n",
    "            grad = param * weight_decay + grad\n",
    "            velocity = momentum * velocity + grad * (1 - dampening)\n",
    "            if nesterov:\n",
    "                param = param - (grad + momentum * velocity) * lr\n",
    "            else:\n",
    "                param = param - velocity * lr\n",
    "            param_tuple_new.append(param)\n",
    "            state_tuple_new.append(velocity)\n",
    "\n",
    "        return param_tuple_new, state_tuple_new\n",
    "\n",
    "    _test_optimizer(\n",
    "        target, dev, np_func, MomentumSGD, lr, momentum, dampening, weight_decay, nesterov\n",
    "    )\n",
    "\n",
    "\n",
    "lr, betas, eps, weight_decay = tvm.testing.parameters(\n",
    "    (0.01, (0.9, 0.999), 1e-08, 0),\n",
    "    (0.01, (0.8, 0.85), 1e-07, 0.1),\n",
    ")\n",
    "\n",
    "\n",
    "@tvm.testing.parametrize_targets(\"llvm\")\n",
    "def test_adam(target, dev, lr, betas, eps, weight_decay):\n",
    "    def np_func(param_tuple, grad_tuple, state_tuple):\n",
    "        num_steps = state_tuple[0]\n",
    "        num_steps_new = num_steps + 1\n",
    "\n",
    "        param_tuple_new = []\n",
    "        state_tuple_new = [None] * len(state_tuple)  # type: ignore\n",
    "        state_tuple_new[0] = num_steps_new\n",
    "        state_tuple_new[1] = state_tuple[1] * betas[0]\n",
    "        state_tuple_new[2] = state_tuple[2] * betas[1]\n",
    "\n",
    "        for i in range(len(param_tuple)):\n",
    "            param = param_tuple[i]\n",
    "            grad = grad_tuple[i]\n",
    "            m = state_tuple[i + 3]\n",
    "            v = state_tuple[i + 3 + len(param_tuple)]\n",
    "            grad = grad + weight_decay * param\n",
    "            m = betas[0] * m + (1 - betas[0]) * grad\n",
    "            v = betas[1] * v + (1 - betas[1]) * grad * grad\n",
    "            m_hat = m / (1 - betas[0] ** num_steps_new)\n",
    "            v_hat = v / (1 - betas[1] ** num_steps_new)\n",
    "            param = param - lr * m_hat / (np.sqrt(v_hat) + eps)\n",
    "            param_tuple_new.append(param)\n",
    "            state_tuple_new[i + 3] = m\n",
    "            state_tuple_new[i + 3 + len(param_tuple)] = v\n",
    "\n",
    "        return param_tuple_new, state_tuple_new\n",
    "\n",
    "    _test_optimizer(target, dev, np_func, Adam, lr, betas, eps, weight_decay)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9f2c2eb6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
