{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "dfc9f73c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\nclass torch.optim.Adam(params, lr=0.001, \\n                       betas=(0.9, 0.999),\\n                       eps=1e-08, weight_decay=0,\\n                       amsgrad=False)\\nparams (iterable)：需要优化的网络参数，传进来的网络参数必须是Iterable\\n（官网对这个参数用法讲的不太清楚，下面有例子清楚的说明param具体用法）\\nlr (float, optional)：学习率\\nbetas 这个就是对计算过程中的梯度的平均值和标准差进行的操作\\nweight_decay L2 正则化对应的参数\\n\\n'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 首先介绍一下Adam其中的参数\n",
    "\"\"\"\n",
    "class torch.optim.Adam(params, lr=0.001, \n",
    "                       betas=(0.9, 0.999),\n",
    "                       eps=1e-08, weight_decay=0,\n",
    "                       amsgrad=False)\n",
    "params (iterable)：需要优化的网络参数，传进来的网络参数必须是Iterable\n",
    "（官网对这个参数用法讲的不太清楚，下面有例子清楚的说明param具体用法）\n",
    "lr (float, optional)：学习率\n",
    "betas 这个就是对计算过程中的梯度的平均值和标准差进行的操作\n",
    "weight_decay L2 正则化对应的参数\n",
    "\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fa6f3887",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch \n",
    "import torch.nn as nn\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "import itertools\n",
    "\n",
    "init_LR = 0.1\n",
    "\n",
    "\n",
    "class model(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.conv1 = nn.Conv2d(\n",
    "            in_channels=3,out_channels=3,kernel_size=3\n",
    "        )\n",
    "        self.conv2 = nn.Conv2d(\n",
    "            in_channels=3,out_channels=3, kernel_size=3\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        ...\n",
    "        \n",
    "\n",
    "net1 = model()\n",
    "net2 = model()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "43dbf5da",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Parameter containing:\n",
      "tensor([[[[ 1.1173e-01,  1.3834e-01, -1.0241e-01],\n",
      "          [ 9.7670e-02, -1.6146e-02,  9.6993e-02],\n",
      "          [ 6.7396e-02,  1.1824e-01, -7.0258e-02]],\n",
      "\n",
      "         [[ 1.1969e-01, -6.1980e-02,  5.7361e-02],\n",
      "          [ 1.1529e-01,  6.9888e-02, -1.4224e-01],\n",
      "          [ 7.2615e-02, -1.2732e-01, -1.0465e-01]],\n",
      "\n",
      "         [[ 1.2901e-01,  1.8008e-01,  8.5272e-02],\n",
      "          [ 1.5156e-01,  3.7380e-02,  1.4759e-01],\n",
      "          [-9.5120e-02,  1.1713e-01,  9.4658e-02]]],\n",
      "\n",
      "\n",
      "        [[[ 7.0259e-02, -2.0099e-02,  9.9235e-02],\n",
      "          [ 1.3246e-01, -3.6956e-02,  6.2281e-02],\n",
      "          [ 1.6534e-01, -1.7869e-01, -7.0579e-02]],\n",
      "\n",
      "         [[ 1.7990e-01,  1.6560e-02, -1.5501e-01],\n",
      "          [-1.7700e-01,  5.2562e-02, -4.7751e-02],\n",
      "          [ 4.8298e-02, -1.8070e-01, -3.4908e-02]],\n",
      "\n",
      "         [[-1.4132e-01,  1.5364e-01, -2.4115e-02],\n",
      "          [-4.9262e-02,  1.8072e-01,  3.3784e-02],\n",
      "          [-1.8307e-01, -1.3055e-01,  1.7173e-01]]],\n",
      "\n",
      "\n",
      "        [[[ 5.3951e-02,  1.7411e-04,  7.1757e-02],\n",
      "          [-1.0932e-01,  6.9550e-02,  1.4545e-01],\n",
      "          [ 1.4439e-01, -3.1089e-02,  1.1409e-01]],\n",
      "\n",
      "         [[-1.7693e-01,  3.5921e-02, -9.7103e-02],\n",
      "          [ 8.6660e-02,  1.8328e-01,  1.7036e-01],\n",
      "          [-1.4501e-01, -1.1895e-01,  1.1571e-01]],\n",
      "\n",
      "         [[ 1.1315e-01, -1.2834e-01, -1.7029e-03],\n",
      "          [ 1.0730e-01,  5.9267e-03, -1.4942e-01],\n",
      "          [-1.0651e-02, -1.6442e-01, -1.8818e-01]]]], requires_grad=True)\n",
      "Parameter containing:\n",
      "tensor([-0.1519,  0.1068, -0.1313], requires_grad=True)\n",
      "Parameter containing:\n",
      "tensor([[[[ 0.1602, -0.1702,  0.0637],\n",
      "          [-0.1827,  0.0524, -0.1452],\n",
      "          [ 0.0840,  0.0874,  0.0297]],\n",
      "\n",
      "         [[-0.0403,  0.0669,  0.0487],\n",
      "          [-0.1667, -0.0084,  0.0660],\n",
      "          [ 0.0346, -0.1161,  0.1835]],\n",
      "\n",
      "         [[-0.0559,  0.1041, -0.1057],\n",
      "          [ 0.0391, -0.0528, -0.1549],\n",
      "          [-0.1007,  0.0554, -0.0912]]],\n",
      "\n",
      "\n",
      "        [[[ 0.1308,  0.0439,  0.1861],\n",
      "          [ 0.1256,  0.0869,  0.1551],\n",
      "          [ 0.0837,  0.0960,  0.0207]],\n",
      "\n",
      "         [[-0.0995, -0.0821,  0.0808],\n",
      "          [-0.1217,  0.1306, -0.0174],\n",
      "          [ 0.1036,  0.0449, -0.0681]],\n",
      "\n",
      "         [[-0.0918,  0.0058, -0.0511],\n",
      "          [ 0.1899, -0.0921,  0.0687],\n",
      "          [ 0.1864, -0.0447, -0.1676]]],\n",
      "\n",
      "\n",
      "        [[[-0.0876, -0.1297,  0.1730],\n",
      "          [ 0.0617,  0.1452,  0.1470],\n",
      "          [ 0.1873,  0.1385,  0.0586]],\n",
      "\n",
      "         [[-0.1832, -0.0367,  0.1918],\n",
      "          [ 0.0366, -0.0680, -0.0874],\n",
      "          [-0.0920, -0.0193,  0.1198]],\n",
      "\n",
      "         [[ 0.1311, -0.1709,  0.0277],\n",
      "          [ 0.1521,  0.1184,  0.0458],\n",
      "          [-0.0961, -0.0070,  0.0162]]]], requires_grad=True)\n",
      "Parameter containing:\n",
      "tensor([-0.1452, -0.0170,  0.0869], requires_grad=True)\n"
     ]
    }
   ],
   "source": [
    "for _ in net1.parameters():\n",
    "    print(_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "fdc5d19a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----optimizer 1----\n",
      "{'lr': 0.1, 'betas': (0.9, 0.999), 'eps': 1e-08, 'weight_decay': 0, 'amsgrad': False}\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(net1.parameters(), lr=init_LR)\n",
    "print(\"----optimizer 1----\")\n",
    "print(optimizer_1.defaults)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6b4dfeaa",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(optimizer_1.param_groups)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "998bc5bd",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_items([('params', [Parameter containing:\n",
       "tensor([[[[ 1.1173e-01,  1.3834e-01, -1.0241e-01],\n",
       "          [ 9.7670e-02, -1.6146e-02,  9.6993e-02],\n",
       "          [ 6.7396e-02,  1.1824e-01, -7.0258e-02]],\n",
       "\n",
       "         [[ 1.1969e-01, -6.1980e-02,  5.7361e-02],\n",
       "          [ 1.1529e-01,  6.9888e-02, -1.4224e-01],\n",
       "          [ 7.2615e-02, -1.2732e-01, -1.0465e-01]],\n",
       "\n",
       "         [[ 1.2901e-01,  1.8008e-01,  8.5272e-02],\n",
       "          [ 1.5156e-01,  3.7380e-02,  1.4759e-01],\n",
       "          [-9.5120e-02,  1.1713e-01,  9.4658e-02]]],\n",
       "\n",
       "\n",
       "        [[[ 7.0259e-02, -2.0099e-02,  9.9235e-02],\n",
       "          [ 1.3246e-01, -3.6956e-02,  6.2281e-02],\n",
       "          [ 1.6534e-01, -1.7869e-01, -7.0579e-02]],\n",
       "\n",
       "         [[ 1.7990e-01,  1.6560e-02, -1.5501e-01],\n",
       "          [-1.7700e-01,  5.2562e-02, -4.7751e-02],\n",
       "          [ 4.8298e-02, -1.8070e-01, -3.4908e-02]],\n",
       "\n",
       "         [[-1.4132e-01,  1.5364e-01, -2.4115e-02],\n",
       "          [-4.9262e-02,  1.8072e-01,  3.3784e-02],\n",
       "          [-1.8307e-01, -1.3055e-01,  1.7173e-01]]],\n",
       "\n",
       "\n",
       "        [[[ 5.3951e-02,  1.7411e-04,  7.1757e-02],\n",
       "          [-1.0932e-01,  6.9550e-02,  1.4545e-01],\n",
       "          [ 1.4439e-01, -3.1089e-02,  1.1409e-01]],\n",
       "\n",
       "         [[-1.7693e-01,  3.5921e-02, -9.7103e-02],\n",
       "          [ 8.6660e-02,  1.8328e-01,  1.7036e-01],\n",
       "          [-1.4501e-01, -1.1895e-01,  1.1571e-01]],\n",
       "\n",
       "         [[ 1.1315e-01, -1.2834e-01, -1.7029e-03],\n",
       "          [ 1.0730e-01,  5.9267e-03, -1.4942e-01],\n",
       "          [-1.0651e-02, -1.6442e-01, -1.8818e-01]]]], requires_grad=True), Parameter containing:\n",
       "tensor([-0.1519,  0.1068, -0.1313], requires_grad=True), Parameter containing:\n",
       "tensor([[[[ 0.1602, -0.1702,  0.0637],\n",
       "          [-0.1827,  0.0524, -0.1452],\n",
       "          [ 0.0840,  0.0874,  0.0297]],\n",
       "\n",
       "         [[-0.0403,  0.0669,  0.0487],\n",
       "          [-0.1667, -0.0084,  0.0660],\n",
       "          [ 0.0346, -0.1161,  0.1835]],\n",
       "\n",
       "         [[-0.0559,  0.1041, -0.1057],\n",
       "          [ 0.0391, -0.0528, -0.1549],\n",
       "          [-0.1007,  0.0554, -0.0912]]],\n",
       "\n",
       "\n",
       "        [[[ 0.1308,  0.0439,  0.1861],\n",
       "          [ 0.1256,  0.0869,  0.1551],\n",
       "          [ 0.0837,  0.0960,  0.0207]],\n",
       "\n",
       "         [[-0.0995, -0.0821,  0.0808],\n",
       "          [-0.1217,  0.1306, -0.0174],\n",
       "          [ 0.1036,  0.0449, -0.0681]],\n",
       "\n",
       "         [[-0.0918,  0.0058, -0.0511],\n",
       "          [ 0.1899, -0.0921,  0.0687],\n",
       "          [ 0.1864, -0.0447, -0.1676]]],\n",
       "\n",
       "\n",
       "        [[[-0.0876, -0.1297,  0.1730],\n",
       "          [ 0.0617,  0.1452,  0.1470],\n",
       "          [ 0.1873,  0.1385,  0.0586]],\n",
       "\n",
       "         [[-0.1832, -0.0367,  0.1918],\n",
       "          [ 0.0366, -0.0680, -0.0874],\n",
       "          [-0.0920, -0.0193,  0.1198]],\n",
       "\n",
       "         [[ 0.1311, -0.1709,  0.0277],\n",
       "          [ 0.1521,  0.1184,  0.0458],\n",
       "          [-0.0961, -0.0070,  0.0162]]]], requires_grad=True), Parameter containing:\n",
       "tensor([-0.1452, -0.0170,  0.0869], requires_grad=True)]), ('lr', 0.1), ('betas', (0.9, 0.999)), ('eps', 1e-08), ('weight_decay', 0), ('amsgrad', False)])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "optimizer_1.param_groups[0].items()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "e3bdf6cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# torch.optim.lr_scheduler中大部分调整学习率的方法都是根据epoch训练次数来调整\n",
    "# 每个调整方法中都有get_lr方法，来定义了更新策略"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "536ac245",
   "metadata": {},
   "outputs": [],
   "source": [
    "## 如何使用warmup, 就是在前几个epoch内使得学习率逐渐的上升\n",
    "## 在随后的几个epoch内使得学习率逐渐的减低，最终减低为0\n",
    "import torch\n",
    "from torch.optim.lr_scheduler import StepLR, ExponentialLR\n",
    "from torch.optim.sgd import SGD\n",
    "\n",
    "from warmup_scheduler import GradualWarmupScheduler\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]\n",
    "    optim = SGD(model, 0.1)\n",
    "\n",
    "    # scheduler_warmup is chained with schduler_steplr\n",
    "    scheduler_steplr = StepLR(optim, step_size=10, gamma=0.1)\n",
    "    scheduler_warmup = GradualWarmupScheduler(optim, multiplier=1, total_epoch=5, after_scheduler=scheduler_steplr)\n",
    "\n",
    "    # this zero gradient update is needed to avoid a warning message, issue #8.\n",
    "    optim.zero_grad()\n",
    "    optim.step()\n",
    "\n",
    "    for epoch in range(1, 20):\n",
    "        scheduler_warmup.step(epoch)\n",
    "        print(epoch, optim.param_groups[0]['lr'])\n",
    "\n",
    "        optim.step()    # backward pass (update network)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
