{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2d6f3444-e70a-4b91-a091-c6f861b0e662",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\envs\\torch_gpu\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3c591ece-a93e-4eee-b23d-50202bfac53a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 649.4127807617188\n",
      "20 148.45880126953125\n",
      "40 164.07215881347656\n"
     ]
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "import torch\n",
    "N, D_in, H, D_out = 64, 1000, 100, 10\n",
    "# Create random input and output data\n",
    "x = torch.randn(N, D_in)   # x = np.random.randn(N, D_in)\n",
    "y = torch.randn(N, D_out)\n",
    "\n",
    "class TwoLayerNet(torch.nn.Module):\n",
    "    def __init__(self, D_in, H, D_out):\n",
    "        super(TwoLayerNet, self).__init__()\n",
    "        self.linear1 = torch.nn.Linear(D_in,H,bias = False)\n",
    "        self.linear2 = torch.nn.Linear(H,D_out,bias = False) \n",
    "    \n",
    "    def forward(self, x):\n",
    "        y_pred = self.linear2(self.linear1(x).clamp(min=0))\n",
    "        return y_pred\n",
    "        \n",
    "model =  TwoLayerNet(D_in, H, D_out)\n",
    "loss_fn = nn.MSELoss(reduction=\"sum\")\n",
    "learning_rate = 1e-4\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)\n",
    "\n",
    "for t in range(50):\n",
    "  # Forward pass: compute predicted y\n",
    "    y_pred = model(x)   #这里就是调用  model.forward(x)\n",
    "  # Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor\n",
    "  # of shape (); we can get its value as a Python number with loss.item().\n",
    "    loss = loss_fn(y_pred, y)\n",
    "    if t % 20 ==0:\n",
    "        print(t, loss.item())\n",
    "    optimizer.zero_grad\n",
    "  # Backprop to compute gradients of w1 and w2 with respect to loss\n",
    "    loss.backward()\n",
    "    optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "c449a802-e8ea-45f6-aa9a-2736fc6f5d77",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.1\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "b82164a2-46ce-438a-b510-f0e88978c808",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始的学习率：0.1\n",
      "第 1 epoch的lr:0.1\n",
      "第 2 epoch的lr:0.1\n",
      "第 3 epoch的lr:0.1\n",
      "第 4 epoch的lr:0.010000000000000002\n",
      "第 5 epoch的lr:0.010000000000000002\n",
      "第 6 epoch的lr:0.010000000000000002\n",
      "第 7 epoch的lr:0.0010000000000000002\n",
      "第 8 epoch的lr:0.0010000000000000002\n",
      "第 9 epoch的lr:0.0010000000000000002\n",
      "第 10 epoch的lr:0.00010000000000000003\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(model.parameters(), lr = 0.1)\n",
    "scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=3, gamma=0.1)\n",
    "print(f\"初始的学习率：{optimizer_1.defaults['lr']}\")\n",
    "for epoch in range(1,11):\n",
    "    optimizer_1.zero_grad()\n",
    "    optimizer_1.step()\n",
    "    print(f\"第 {epoch} epoch的lr:{optimizer_1.param_groups[0]['lr']}\")\n",
    "    scheduler_1.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eba36118-6f96-41b5-a407-17e0b8e3d845",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "31ef8e76-95c9-489d-975b-77ebd3efd2bb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始的学习率：0.1\n",
      "第 1 epoch的lr:0.1\n",
      "第 2 epoch的lr:0.1\n",
      "第 3 epoch的lr:0.1\n",
      "第 4 epoch的lr:0.010000000000000002\n",
      "第 5 epoch的lr:0.010000000000000002\n",
      "第 6 epoch的lr:0.010000000000000002\n",
      "第 7 epoch的lr:0.010000000000000002\n",
      "第 8 epoch的lr:0.010000000000000002\n",
      "第 9 epoch的lr:0.0010000000000000002\n",
      "第 10 epoch的lr:0.0010000000000000002\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(model.parameters(), lr = 0.1)\n",
    "scheduler_1 = torch.optim.lr_scheduler.MultiStepLR(optimizer_1, milestones=[3,8], gamma=0.1)\n",
    "print(f\"初始的学习率：{optimizer_1.defaults['lr']}\")\n",
    "for epoch in range(1,11):\n",
    "    optimizer_1.zero_grad()\n",
    "    optimizer_1.step()\n",
    "    print(f\"第 {epoch} epoch的lr:{optimizer_1.param_groups[0]['lr']}\")\n",
    "    scheduler_1.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60002470-543a-45c6-9e2f-856461bb51af",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "ded23001-10f1-4a01-9c01-85ea003ee60a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始的学习率：0.1\n",
      "第 1 epoch的lr:0.1\n",
      "第 2 epoch的lr:0.010000000000000002\n",
      "第 3 epoch的lr:0.0010000000000000002\n",
      "第 4 epoch的lr:0.00010000000000000003\n",
      "第 5 epoch的lr:1.0000000000000004e-05\n",
      "第 6 epoch的lr:1.0000000000000004e-06\n",
      "第 7 epoch的lr:1.0000000000000005e-07\n",
      "第 8 epoch的lr:1.0000000000000005e-08\n",
      "第 9 epoch的lr:1.0000000000000005e-09\n",
      "第 10 epoch的lr:1.0000000000000006e-10\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(model.parameters(), lr = 0.1)\n",
    "scheduler_1 = torch.optim.lr_scheduler.ExponentialLR(optimizer_1, gamma=0.1)\n",
    "print(f\"初始的学习率：{optimizer_1.defaults['lr']}\")\n",
    "for epoch in range(1,11):\n",
    "    optimizer_1.zero_grad()\n",
    "    optimizer_1.step()\n",
    "    print(f\"第 {epoch} epoch的lr:{optimizer_1.param_groups[0]['lr']}\")\n",
    "    scheduler_1.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fc739ee1-383c-4ccf-afe9-0573822410cc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "d9225b13-6e4f-48ce-8fd4-c588ae0418d4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始的学习率：0.1\n",
      "第 1 epoch的lr:0.1\n",
      "第 2 epoch的lr:0.08535533905932738\n",
      "第 3 epoch的lr:0.05\n",
      "第 4 epoch的lr:0.014644660940672627\n",
      "第 5 epoch的lr:0.0\n",
      "第 6 epoch的lr:0.014644660940672622\n",
      "第 7 epoch的lr:0.05000000000000001\n",
      "第 8 epoch的lr:0.0853553390593274\n",
      "第 9 epoch的lr:0.10000000000000003\n",
      "第 10 epoch的lr:0.0853553390593274\n",
      "第 11 epoch的lr:0.05000000000000003\n",
      "第 12 epoch的lr:0.014644660940672672\n",
      "第 13 epoch的lr:0.0\n",
      "第 14 epoch的lr:0.014644660940672622\n",
      "第 15 epoch的lr:0.0499999999999999\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(model.parameters(), lr = 0.1)\n",
    "scheduler_1 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_1, T_max=4)\n",
    "print(f\"初始的学习率：{optimizer_1.defaults['lr']}\")\n",
    "for epoch in range(1,16):\n",
    "    optimizer_1.zero_grad()\n",
    "    optimizer_1.step()\n",
    "    print(f\"第 {epoch} epoch的lr:{optimizer_1.param_groups[0]['lr']}\")\n",
    "    scheduler_1.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb1a7984-0542-4fcf-9690-43d0259a5fa4",
   "metadata": {},
   "outputs": [],
   "source": [
    "`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "ec561fee-ba7b-49e4-b194-4954da16d03f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始的学习率：0.1\n",
      "第 1 epoch的lr:0.1\n",
      "第 2 epoch的lr:0.1\n",
      "第 3 epoch的lr:0.1\n",
      "第 4 epoch的lr:0.1\n",
      "第 5 epoch的lr:0.1\n",
      "第 6 epoch的lr:0.010000000000000002\n",
      "第 7 epoch的lr:0.010000000000000002\n",
      "第 8 epoch的lr:0.010000000000000002\n",
      "第 9 epoch的lr:0.010000000000000002\n",
      "第 10 epoch的lr:0.0010000000000000002\n",
      "第 11 epoch的lr:0.0010000000000000002\n",
      "第 12 epoch的lr:0.0010000000000000002\n",
      "第 13 epoch的lr:0.0010000000000000002\n",
      "第 14 epoch的lr:0.00010000000000000003\n",
      "第 15 epoch的lr:0.00010000000000000003\n"
     ]
    }
   ],
   "source": [
    "optimizer_1 = torch.optim.Adam(model.parameters(), lr = 0.1)\n",
    "scheduler_1 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_1, mode='min', factor=0.1, patience=3)\n",
    "print(f\"初始的学习率：{optimizer_1.defaults['lr']}\")\n",
    "for epoch in range(1,16):\n",
    "    optimizer_1.zero_grad()\n",
    "    optimizer_1.step()\n",
    "    print(f\"第 {epoch} epoch的lr:{optimizer_1.param_groups[0]['lr']}\")\n",
    "    scheduler_1.step(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "623abfe7-66b3-40fe-98b5-9d9b534bb672",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<filter at 0x2d71c886250>"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())\n",
    "base_params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "e2750ddd-dab3-450d-9bd2-673d0cb4b1e4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<generator object Module.parameters at 0x000002D7257BBF90>"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.linear2.parameters()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "a8ef3821-45e4-48a5-bb36-9b49ddb9f632",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch:  0 lr:  [0.0, 0.1]\n",
      "epoch:  1 lr:  [0.0, 0.095]\n",
      "epoch:  2 lr:  [0.0, 0.09025]\n",
      "epoch:  3 lr:  [0.001, 0.0857375]\n",
      "epoch:  4 lr:  [0.001, 0.081450625]\n",
      "epoch:  5 lr:  [0.001, 0.07737809374999999]\n",
      "epoch:  6 lr:  [0.002, 0.07350918906249998]\n",
      "epoch:  7 lr:  [0.002, 0.06983372960937498]\n",
      "epoch:  8 lr:  [0.002, 0.06634204312890622]\n",
      "epoch:  9 lr:  [0.003, 0.0630249409724609]\n",
      "epoch:  10 lr:  [0.003, 0.05987369392383787]\n",
      "epoch:  11 lr:  [0.003, 0.05688000922764597]\n",
      "epoch:  12 lr:  [0.004, 0.05403600876626367]\n",
      "epoch:  13 lr:  [0.004, 0.051334208327950485]\n",
      "epoch:  14 lr:  [0.004, 0.04876749791155296]\n",
      "epoch:  15 lr:  [0.005, 0.046329123015975304]\n",
      "epoch:  16 lr:  [0.005, 0.04401266686517654]\n",
      "epoch:  17 lr:  [0.005, 0.04181203352191771]\n",
      "epoch:  18 lr:  [0.006, 0.039721431845821824]\n",
      "epoch:  19 lr:  [0.006, 0.03773536025353073]\n"
     ]
    }
   ],
   "source": [
    "ignored_params = list(map(id, model.linear2.parameters()))\n",
    "base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())\n",
    "\n",
    "optimizer = torch.optim.SGD([\n",
    "{'params': base_params},\n",
    "{'params': model.linear2.parameters(), 'lr': 0.1}], 0.001, momentum=0.9, weight_decay=1e-4)\n",
    "\n",
    "lambda1 = lambda epoch: epoch // 3\n",
    "lambda2 = lambda epoch: 0.95 ** epoch\n",
    "\n",
    "scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])\n",
    "for epoch in range(20):\n",
    "    print('epoch: ', epoch, 'lr: ', scheduler.get_lr())\n",
    "    scheduler.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2c004953-1fe8-4ae8-88ab-670661cbc7f3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1f4ad9a6-ee82-430a-890b-e2dcfc5c4cf4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "714b6f02-5f2a-4755-bdcf-3d16da10788a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "663405ec-93a5-45d0-adb7-1f5ab422a771",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c25c9ede-6945-4e01-b876-0329e9f0f67d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5ff8695-2b2b-44fe-a0db-4f6cce99d925",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
