{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "871d6d7c-a048-4972-ad98-a889c60304f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "权重衰减\n",
    "面对过拟合现象时训练集的误差远小于测试集的误差，同时数据集比较难获取时采用权重衰减"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "249b516b-31d0-4bcf-bab0-4a91db00d144",
   "metadata": {},
   "outputs": [],
   "source": [
    "权重衰减等价于L2范数正则化（regularization）。正则化通过为模型损失函数添加惩罚项使学出的\n",
    "模型参数值较⼩，是应对过拟合的常⽤⼿段。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9f2a9498-b82a-4d07-be51-d8bea3d505a9",
   "metadata": {},
   "outputs": [],
   "source": [
    "通过将超参数与权重向量的平方和加入到损失函数中，当超参数λ越大时，在损失函数中的比重较大\n",
    "当惩罚绝对值较大时，权重衰减为需要学习的模型增加了限制，可能可以更好的处理过拟合现象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1048576f-d524-472d-bb95-d1a0f925b00f",
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import sys\n",
    "sys.path.append(\"..\") \n",
    "import d2lzh_pytorch as d2l\n",
    "n_train, n_test, num_inputs = 20, 100, 200\n",
    "true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05\n",
    "features = torch.randn((n_train + n_test, num_inputs))\n",
    "labels = torch.matmul(features, true_w) + true_b\n",
    "labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)\n",
    "train_features, test_features = features[:n_train, :], features[n_train:, :]\n",
    "train_labels, test_labels = labels[:n_train], labels[n_train:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "366679a2-e5dd-476c-88be-3f24db48ff9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#初始化模型参数\n",
    "def init_params():\n",
    "    w = torch.randn((num_inputs, 1), requires_grad=True)\n",
    "    b = torch.zeros(1, requires_grad=True)\n",
    "    return [w, b]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "bb3a011f-22f0-4515-a915-51928b480b4b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义范数惩罚项\n",
    "def l2_penalty(w):\n",
    "    return (w**2).sum() / 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "445b15ab-4da8-4d56-bcb0-f4fa79c40765",
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义训练与测试\n",
    "batch_size, num_epochs, lr = 1, 100, 0.003\n",
    "net, loss = d2l.linreg, d2l.squared_loss\n",
    "dataset = torch.utils.data.TensorDataset(train_features, train_labels)\n",
    "train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)\n",
    "def fit_and_plot(lambd):\n",
    "    w, b = init_params()\n",
    "    train_ls, test_ls = [], []\n",
    "    for _ in range(num_epochs):\n",
    "        for X, y in train_iter:\n",
    "            # 添加了L2范数惩罚项\n",
    "            l = loss(net(X, w, b), y) + lambd * l2_penalty(w)\n",
    "            l = l.sum()\n",
    "            \n",
    "            if w.grad is not None:\n",
    "                w.grad.data.zero_()\n",
    "                b.grad.data.zero_()\n",
    "            l.backward()\n",
    "            d2l.sgd([w, b], lr, batch_size)\n",
    "        train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())\n",
    "        test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())\n",
    "    d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',range(1, num_epochs + 1), test_ls, ['train', 'test'])\n",
    "    print('L2 norm of w:', w.norm().item())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "482758f3-08e5-4561-87ac-1ebb4ce7d579",
   "metadata": {},
   "outputs": [],
   "source": [
    "#观察过拟合现象\n",
    "fit_and_plot(lambd=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46521673-72db-49de-98ec-7c47fb52d9c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "#简洁实现\n",
    "def fit_and_plot_pytorch(wd):\n",
    "    # 对权重参数衰减。权重名称⼀般是以weight结尾\n",
    "    net = nn.Linear(num_inputs, 1)\n",
    "    nn.init.normal_(net.weight, mean=0, std=1)\n",
    "    nn.init.normal_(net.bias, mean=0, std=1)\n",
    "    optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd) # 对权᯿参数衰减\n",
    "    optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr) # 不对偏差参数衰减\n",
    "    \n",
    "    train_ls, test_ls = [], []\n",
    "    for _ in range(num_epochs):\n",
    "        for X, y in train_iter:\n",
    "            l = loss(net(X), y).mean()\n",
    "            optimizer_w.zero_grad()\n",
    "            optimizer_b.zero_grad()\n",
    "            \n",
    "            l.backward()\n",
    "            \n",
    "            # 对两个optimizer实例分别调⽤step函数，从⽽分别更新权᯿和偏差\n",
    "            optimizer_w.step()\n",
    "            optimizer_b.step()\n",
    "        train_ls.append(loss(net(train_features), train_labels).mean().item())\n",
    "        test_ls.append(loss(net(test_features), test_labels).mean().item())\n",
    "    d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test'])\n",
    "    print('L2 norm of w:', net.weight.data.norm().item())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c95b43c-b949-4466-8855-762153bfe70a",
   "metadata": {},
   "outputs": [],
   "source": [
    "为什么使用随机梯度下降算法？\n",
    "1、计算效率提高：每次计算只是用一个样本（小批量样本进行计算），显著降低了每次的计算成本\n",
    "2、避免陷入局部最小值\n",
    "3、具有更快的收敛速度，可以快速的找到局部最小值\n",
    "4、可以用于在线学习与动态学习\n",
    "5、更低的内存需求"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:pytorch]",
   "language": "python",
   "name": "deep_learning"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
