{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca7c20b6-917e-408a-95ff-210ba8da491a",
   "metadata": {},
   "outputs": [],
   "source": [
    "训练误差与泛化误差"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec89e847-a72d-4d73-9df0-d2cfb7a60026",
   "metadata": {},
   "outputs": [],
   "source": [
    "需要区分训练误差（training error）和泛化误差（generalization\n",
    "error）。通俗来讲，前者指模型在训练数据集上表现出的误差，后者指模型在任意⼀个测试数据样本上\n",
    "表现出的误差的期望，并常常通过测试数据集上的误差来近似。\n",
    "⼀般情况下，由训练数据集学到的模型参数会使模型在训练数据集上的表\n",
    "现优于或等于在测试数据集上的表现。由于⽆法从训练误差估计泛化误差，⼀味地降低训练误差并不意\n",
    "味着泛化误差⼀定会降低。\n",
    "机器学习模型应该关注降低泛化误差"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70b71a44-2130-4f51-9b8b-da1cac7f57ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "模型选择\n",
    "可供选择的候选模型可以是有着不同超参数的同类模型。以多层感知机为例，我们可以选\n",
    "择隐藏层的个数，以及每个隐藏层中隐藏单元个数和激活函数。\n",
    "我们可以预留⼀部分在训练数据集和测试数据集以外的数据来进⾏模型选择。这部分数据被称为验证数据\n",
    "集，简称验证集（validation set）。\n",
    "我们可以从给定的训练集中随机选取⼀⼩部分作为验证集，⽽将剩余部分作为真正的训练集。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0459257d-caf2-42fc-9260-d41c53821f4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "K折交叉验证\n",
    "由于验证数据集不参与模型训练，当训练数据不够⽤时，预留⼤量的验证数据显得太奢侈。⼀种改善的\n",
    "⽅法是 折交叉验证（K-fold cross-validation）。在 折交叉验证中，我们把原始训练数据集分割成\n",
    "K个不重合的⼦数据集，然后我们做 次模型训练和验证。每⼀次，我们使⽤⼀个⼦数据集验证模型，\n",
    "并使⽤其他K-1个⼦数据集来训练模型。在这 次训练和验证中，每次⽤来验证模型的⼦数据集都不\n",
    "同。最后，我们对这 次训练误差和验证误差分别求平均。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58a2b070-6699-445f-9ccb-d2c08f6c3508",
   "metadata": {},
   "outputs": [],
   "source": [
    "欠拟合、过拟合\n",
    "⼀类是模型⽆法得到较低的训练误差，我们将这⼀现象称作⽋拟合（underfitting）；\n",
    "另⼀类是模型的训练误差远⼩于它在测试数据集上的误差，我们称该现象为过拟合（overfitting）。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f007d79-4953-47bf-98c9-83168f518b5b",
   "metadata": {},
   "outputs": [],
   "source": [
    "模型复杂度\n",
    "    因为⾼阶多项式函数模型参数更多，模型函数的选择空间更⼤，所以⾼阶多项式函数⽐低阶多项式函数\n",
    "的复杂度更⾼。因此，⾼阶多项式函数⽐低阶多项式函数更容易在相同的训练数据集上得到更低的训练误差。\n",
    "    ⼀般来说，如果训练数据集中样本数过\n",
    "少，特别是⽐模型参数数量（按元素计）更少时，过拟合更容易发⽣。此外，泛化误差不会随训练数据\n",
    "集⾥样本数量增加⽽增⼤。因此，在计算资源允许的范围之内，我们通常希望训练数据集⼤⼀些，特别\n",
    "是在模型复杂度较⾼时，例如层数较多的深度学习模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "7062d9c5-1330-4b00-aec9-62533f27ebe1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "D:\\py_deep_learn\\deep_learning\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "import torch\n",
    "import numpy as np\n",
    "# 导入必要的库\n",
    "import os\n",
    "import sys\n",
    "# 获取当前路径的上一级目录\n",
    "parent_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n",
    "print(parent_dir)\n",
    "# 将上一级目录添加到 sys.path\n",
    "sys.path.append(parent_dir)\n",
    "# 现在可以导入 d2lzh_pytorch 模块\n",
    "import d2lzh_pytorch as d2l\n",
    "# 其他必要的库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "62afd6a4-b456-4b8a-96d6-9bcb0c8a124e",
   "metadata": {},
   "outputs": [],
   "source": [
    "n_train, n_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5\n",
    "features = torch.randn((n_train + n_test, 1))\n",
    "poly_features = torch.cat((features, torch.pow(features, 2), torch.pow(features, 3)), 1) \n",
    "labels = (true_w[0] * poly_features[:, 0] + true_w[1] * poly_features[:, 1] + true_w[2] * poly_features[:, 2] + true_b)\n",
    "labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3cd47d31-ddaf-4f08-bdf8-fbc86cdb8754",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[1.7224],\n",
       "         [0.7588]]),\n",
       " tensor([[1.7224, 2.9666, 5.1095],\n",
       "         [0.7588, 0.5758, 0.4369]]),\n",
       " tensor([25.6011,  6.4038]))"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "features[:2], poly_features[:2], labels[:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "163055ce-5829-4044-8f81-b17fe320a09b",
   "metadata": {},
   "outputs": [],
   "source": [
    "num_epochs, loss = 100, torch.nn.MSELoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "3ed902de-5656-4891-84f9-d23cff706149",
   "metadata": {},
   "outputs": [],
   "source": [
    "def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,legend=None, figsize=(3.5, 2.5)):\n",
    "    d2l.set_figsize(figsize)\n",
    "    d2l.plt.xlabel(x_label)\n",
    "    d2l.plt.ylabel(y_label)\n",
    "    d2l.plt.semilogy(x_vals, y_vals)\n",
    "    if x2_vals and y2_vals:\n",
    "        d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':')\n",
    "        d2l.plt.legend(legend)\n",
    "\n",
    "def fit_and_plot(train_features, test_features, train_labels, test_labels):\n",
    "    net = torch.nn.Linear(train_features.shape[-1], 1)\n",
    "# 通过Linear⽂档可知，pytorch已经将参数初始化了，所以我们这⾥就不⼿动初始化了\n",
    "\n",
    "    batch_size = min(10, train_labels.shape[0]) \n",
    "    dataset = torch.utils.data.TensorDataset(train_features, train_labels)\n",
    "    train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)\n",
    "\n",
    "    optimizer = torch.optim.SGD(net.parameters(), lr=0.01)\n",
    "    train_ls, test_ls = [], []\n",
    "    for _ in range(num_epochs):\n",
    "        for X, y in train_iter:\n",
    "            l = loss(net(X), y.view(-1, 1))\n",
    "            optimizer.zero_grad()\n",
    "            l.backward()\n",
    "            optimizer.step()\n",
    "        train_labels = train_labels.view(-1, 1)\n",
    "        test_labels = test_labels.view(-1, 1)\n",
    "        train_ls.append(loss(net(train_features), train_labels).item())\n",
    "        test_ls.append(loss(net(test_features), test_labels).item())\n",
    "    print('final epoch: train loss', train_ls[-1], 'test loss', test_ls[-1])\n",
    "    semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',range(1, num_epochs + 1), test_ls, ['train', 'test'])\n",
    "    print('weight:', net.weight.data,'\\nbias:', net.bias.data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db4fa71d-dee6-494e-87a0-a5ce88b213dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "三阶多项式函数拟合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "b7d5dd33-02cc-4828-a27c-19d927a7cf0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "final epoch: train loss 0.00011437509238021448 test loss 0.0001551488385302946\n",
      "weight: tensor([[ 1.1930, -3.3980,  5.6016]]) \n",
      "bias: tensor([4.9981])\n"
     ]
    }
   ],
   "source": [
    "fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ea251d6-e91b-40df-a38c-ae7bbb15a37c",
   "metadata": {},
   "outputs": [],
   "source": [
    "线性函数拟合（⽋拟合）\n",
    "模型的训练误差再迭代早期下降后便很难继续下降，完成迭代周期后仍然有很高的误差。\n",
    "线性模型在非线性的模型（三阶多项式函数）生成的数据集中容易欠拟合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "da609180-fce3-49ca-a1fd-8914549e2a01",
   "metadata": {},
   "outputs": [],
   "source": [
    "过拟合\n",
    "训练样本不足，导致模型显得过于复杂，容易被训练中的噪声影响。在训练集中的误差很低\n",
    "但是在测试集中的误差很高。这种属于过拟合现象。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:pytorch]",
   "language": "python",
   "name": "deep_learning"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
