{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "init_cell": true,
    "slideshow": {
     "slide_type": "notes"
    }
   },
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import random\n",
    "import torch\n",
    "from torch.utils import data\n",
    "import matplotlib.pyplot as plt\n",
    "from IPython.core.interactiveshell import InteractiveShell\n",
    "InteractiveShell.ast_node_interactivity = \"all\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "<div class=\"jumbotron\">\n",
    "    <h1 class=\"display-1\">线性回归的简洁实现</h1>\n",
    "    <hr class=\"my-4\">\n",
    "    <p>主讲：李岩</p>\n",
    "    <p>管理学院</p>\n",
    "    <p>liyan@cumtb.edu.cn</p>\n",
    "</div>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "## 准备批量化数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [],
   "source": [
    "def synthetic_data(w, b, num_examples):  \n",
    "    \"\"\"生成y=Xw+b+噪声\"\"\"\n",
    "    X = torch.normal(0, 1, (num_examples, len(w)))\n",
    "    y = torch.matmul(X, w) + b\n",
    "    y += torch.normal(0, 0.01, y.shape) # y.shape: torch.Size([1000,1])\n",
    "    return X, y.reshape(-1,1) # y.shape: torch.Size([1000, 1])\n",
    "    # return 为什么y要进行reshape？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.095796Z",
     "iopub.status.busy": "2022-07-31T02:22:41.095316Z",
     "iopub.status.idle": "2022-07-31T02:22:41.099772Z",
     "shell.execute_reply": "2022-07-31T02:22:41.099104Z"
    },
    "origin_pos": 4,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [],
   "source": [
    "true_w = torch.tensor([2, -3.4])  # 给定真实的权重\n",
    "true_b = 4.2                      # 给定真实的偏置\n",
    "features, labels = synthetic_data(true_w, true_b, 1000)   # 生成特征矩阵与标签向量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "- 调用torch框架中现有的API来读取数据，并构造数据迭代器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.115436Z",
     "iopub.status.busy": "2022-07-31T02:22:41.114992Z",
     "iopub.status.idle": "2022-07-31T02:22:41.123304Z",
     "shell.execute_reply": "2022-07-31T02:22:41.122663Z"
    },
    "origin_pos": 11,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [],
   "source": [
    "def load_array(data_arrays, batch_size, is_train=True):  \n",
    "    \"\"\"构造一个PyTorch数据迭代器\n",
    "    is_train：是否在每个迭代周期打乱数据排序\n",
    "    \"\"\"\n",
    "    dataset = data.TensorDataset(*data_arrays) # data_arrays: (features, labels)全数据样本\n",
    "    return data.DataLoader(dataset, batch_size, shuffle=is_train) # torch.utils.data.dataloader.DataLoader"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- ```python\n",
    "data.TensorDataset(*tensors)\n",
    "```\n",
    "    - `tensors`：一系列张量，这些张量的第一维度拥有相同的大小\n",
    "    - 类似于`Python`的`zip`函数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- ```python\n",
    "from torch.utils import data\n",
    "data.DataLoader(dataset, batch_size=1, shuffle=False)\n",
    "```\n",
    "\n",
    "    - `dataset`：需要加载的数据集\n",
    "    - `batch_size`：一次加载的样本数量（批量），是`int`类型\n",
    "    - `shuffle`：每代（epoch）训练是否随机样本排序，是`bool`类型，降低对数据的过拟合\n",
    "    - 返回一个可迭代对象`iterable`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([1, 2, 4]), tensor(10))"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[1,2,4],[4,5,6],[7,8,9]])\n",
    "b = torch.tensor([10,11,12])\n",
    "c = data.TensorDataset(a,b)\n",
    "c[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "outputs": [],
   "source": [
    "batch_size = 10\n",
    "data_iter = load_array((features, labels), batch_size)  # （features,labels）构成一个tuple作为load_array函数的第一个参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    ">【注意】data_iter并不是一个真的iterator, 要想当iterator来用，方法是："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[tensor([[ 1.2509, -0.6020],\n",
       "         [ 1.0021,  0.4213],\n",
       "         [-0.1978, -0.1003],\n",
       "         [-0.0545, -0.6706],\n",
       "         [-1.4072,  0.7688],\n",
       "         [ 1.6437,  0.6058],\n",
       "         [-1.8432, -1.5889],\n",
       "         [ 0.0451,  0.8303],\n",
       "         [-0.2651, -0.1089],\n",
       "         [ 0.2316, -1.2513]]),\n",
       " tensor([[ 8.7474],\n",
       "         [ 4.7749],\n",
       "         [ 4.1285],\n",
       "         [ 6.3988],\n",
       "         [-1.2307],\n",
       "         [ 5.4103],\n",
       "         [ 5.9093],\n",
       "         [ 1.4725],\n",
       "         [ 4.0492],\n",
       "         [ 8.9318]])]"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "my_iter = iter(data_iter)  # Python内置的迭代器构造函数iter()\n",
    "next(my_iter)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "## 构建拟合数据的模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "### 容器（containers）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 容器：用`pytorch`构建网络图的基本组件"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 容器的子类：\n",
    "    - `Module`：所有神经网络的基类\n",
    "    - `Sequential`：顺序构成的容器组合\n",
    "    - 。。。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "- `Module`：通过继承该类建立自己的模型\n",
    "- `Module`可以包含其他`Module`，即允许嵌套"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [],
   "source": [
    "# 简单线性模型\n",
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "class Linear_Net(nn.Module):\n",
    "    def __init__(self, n_features, n_out):\n",
    "        # 参数n_features：输入预测属性的个数，n_out：预测值的个数\n",
    "        super().__init__()\n",
    "        # 构建网络结构\n",
    "        self.predict = nn.Linear(n_features, n_out)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # 前向计算，x是输入的样本\n",
    "        x = self.predict(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "subslide"
    }
   },
   "source": [
    "- ```python\n",
    "torch.nn.Linear(in_features,out_features,bias=True)\n",
    "```\n",
    "    - `in_features`：输入样本的维度，`int`类型\n",
    "    - `out_features`：输出样本的维度，`int`类型\n",
    "    - `bias`：是否学习偏置参数，`bool`类型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 对输入数据$\\mathbf{X}$进行线性变换\n",
    "\n",
    "$$\n",
    "\\boldsymbol{y} = \\boldsymbol{w}\\mathbf{X}+b\n",
    "$$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- $\\mathbf{X}$的形状为 (*, in_features)\n",
    "- $\\boldsymbol{y}$的形状为（*，out_features)\n",
    "- *代表数据包含的样本数量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 权重$\\boldsymbol{w}$的形状为(out_features,in_features)，权重初始值从均匀分布$U(-\\sqrt{k},\\sqrt{k})$中随机产生，其中$k=\\tfrac{1}{\\text {in_features}}$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 偏置$b$的形状为(out_features)，偏置的初始值也从均匀分布$U(-\\sqrt{k},\\sqrt{k})$中随机产生，其中$k=\\tfrac{1}{\\text{in_features}}$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "- `Sequential`将多个`Module`**串联**在一起\n",
    "    - 包含的`Module`的顺序与构建该`Sequential`时候输入的`Module`顺序一致"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "<center><img src=\"../img/3_linear_network/sequential.png\" width=80%></center>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- `Sequential`的`forward()`方法将接收的输入张量传递给第一个`Module`，\n",
    "- 然后将其计算结果作为第二个`Module`的输入，\n",
    "- 依次向后续`Module`传递"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "### 使用框架预定义的层构建模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 定义模型变量`net`，是`Sequential`类的一个实例"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 在`PyTorch`中，通过`Linear`类定义全连接层\n",
    "- 向`Linear`传递两个参数\n",
    "    - 输入特征形状，即2\n",
    "    - 输出特征形状，输出特征形状为单个标量，因此为1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.126448Z",
     "iopub.status.busy": "2022-07-31T02:22:41.125870Z",
     "iopub.status.idle": "2022-07-31T02:22:41.129793Z",
     "shell.execute_reply": "2022-07-31T02:22:41.129180Z"
    },
    "origin_pos": 17,
    "slideshow": {
     "slide_type": "slide"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [],
   "source": [
    "from torch import nn    ## nn是neural network\n",
    "\n",
    "net = nn.Sequential(nn.Linear(2, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Sequential(\n",
       "  (0): Linear(in_features=2, out_features=1, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "### 初始化模型参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 深度学习框架通常用预定义的方法初始化参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 初始化参数的方法\n",
    "    - 用`net[0]`选择网络的第一图层，如果是多层网络，用序号选择相应的图层\n",
    "    - `weight.data`访问权重参数，`bias.data`访问偏置参数\n",
    "    - `normal_()`和`fill_()`重新填写参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.132837Z",
     "iopub.status.busy": "2022-07-31T02:22:41.132403Z",
     "iopub.status.idle": "2022-07-31T02:22:41.137625Z",
     "shell.execute_reply": "2022-07-31T02:22:41.137014Z"
    },
    "origin_pos": 24,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net网络的权重初始化从均值为0、标准差为0.01的正态分布随机采样，      \n",
      "tensor([[-0.0032, -0.0032]])\n",
      "net网络的偏置初始化为0，\n",
      "tensor([0.])\n"
     ]
    }
   ],
   "source": [
    "print(f'net网络的权重初始化从均值为0、标准差为0.01的正态分布随机采样，\\\n",
    "      \\n{net[0].weight.data.normal_(0, 0.01)}')\n",
    "print(f'net网络的偏置初始化为0，\\n{net[0].bias.data.fill_(0)}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "- 查看网络的参数\n",
    "\n",
    "- ```python\n",
    "net.parameters()\n",
    "```\n",
    "\n",
    "    - `net`：建立的网络模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"网络参数的类型为<class 'generator'>\""
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/plain": [
       "Parameter containing:\n",
       "tensor([[-0.0208,  0.0055]], requires_grad=True)"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/plain": [
       "Parameter containing:\n",
       "tensor([0.], requires_grad=True)"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/plain": [
       "'end'"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "f'网络参数的类型为{type(net.parameters())}'\n",
    "paras = net.parameters()  # 实例化对象\n",
    "next(paras,'end')  # next()有一个默认参数，当迭代对象完毕的时候输出该默认参数，否则会报错\n",
    "next(paras,'end')\n",
    "next(paras,'end')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "## 定义损失函数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 计算均方误差使用的是`torch.nn`的`MSELoss`类，也称为平方$L_2$范数\n",
    "    - 返回所有样本损失的平均值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.140732Z",
     "iopub.status.busy": "2022-07-31T02:22:41.140224Z",
     "iopub.status.idle": "2022-07-31T02:22:41.143455Z",
     "shell.execute_reply": "2022-07-31T02:22:41.142815Z"
    },
    "origin_pos": 34,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [],
   "source": [
    "loss = nn.MSELoss() # MeanSquaredError"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "## 定义优化算法"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 小批量随机梯度下降算法（stochastic gradient descent, SGD）是一种优化神经网络的标准工具实例\n",
    "- `PyTorch`在`optim`模块中实现了该算法的许多变种"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 使用优化算法的方法：\n",
    "\n",
    "\n",
    "1. 需要实例化一个`SGD`对象\n",
    "1. 指定参数\n",
    "    - 需要优化的参数，可以通过`net.parameters()`获得\n",
    "    - 算法的超参，例如学习速率`lr`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.146539Z",
     "iopub.status.busy": "2022-07-31T02:22:41.145974Z",
     "iopub.status.idle": "2022-07-31T02:22:41.149489Z",
     "shell.execute_reply": "2022-07-31T02:22:41.148897Z"
    },
    "origin_pos": 41,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [],
   "source": [
    "trainer = torch.optim.SGD(net.parameters(), lr=0.03)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "3. 进行单次优化\n",
    "    - 一旦梯度经过backward()计算后，调用`step()`函数进行优化\n",
    "```python\n",
    "trainer.step()\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "## 正式开始训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 每个迭代周期将完整遍历一次数据集（`train_data`），不停地从中获取一个小批量的输入和相应的标签"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "fragment"
    }
   },
   "source": [
    "- 对于每一个小批量，我们会进行以下步骤:\n",
    "    * 通过调用`net(X)`生成预测并计算损失$l$（前向传播）\n",
    "    * 通过进行反向传播来计算梯度\n",
    "    * 通过调用优化器来更新模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.152472Z",
     "iopub.status.busy": "2022-07-31T02:22:41.151984Z",
     "iopub.status.idle": "2022-07-31T02:22:41.303819Z",
     "shell.execute_reply": "2022-07-31T02:22:41.303070Z"
    },
    "origin_pos": 45,
    "slideshow": {
     "slide_type": "slide"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 1, loss 0.000247\n",
      "epoch 2, loss 0.000104\n",
      "epoch 3, loss 0.000104\n",
      "epoch 4, loss 0.000104\n",
      "epoch 5, loss 0.000105\n",
      "epoch 6, loss 0.000104\n",
      "epoch 7, loss 0.000104\n",
      "epoch 8, loss 0.000105\n",
      "epoch 9, loss 0.000105\n",
      "epoch 10, loss 0.000104\n"
     ]
    }
   ],
   "source": [
    "num_epochs = 10    # 10代训练\n",
    "for epoch in range(num_epochs):\n",
    "    for X, y in data_iter:  # 遍历所有数据\n",
    "        trainer.zero_grad() # 梯度置0\n",
    "        l = loss(net(X) ,y) # 正向计算损失，损失函数的参数为预测值和真值\n",
    "        l.backward()        # 反向传播计算梯度\n",
    "        trainer.step()      # 用SGD算法更新参数\n",
    "\n",
    "    l = loss(net(features), labels) # 计算每代训练的损失，注意用全部数据\n",
    "    print(f'epoch {epoch + 1}, loss {l:f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "slide"
    }
   },
   "source": [
    "- 比较生成数据集的真实参数和通过有限数据训练获得的模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-31T02:22:41.307429Z",
     "iopub.status.busy": "2022-07-31T02:22:41.306897Z",
     "iopub.status.idle": "2022-07-31T02:22:41.312606Z",
     "shell.execute_reply": "2022-07-31T02:22:41.311914Z"
    },
    "origin_pos": 49,
    "slideshow": {
     "slide_type": "fragment"
    },
    "tab": [
     "pytorch"
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "w的估计误差：, tensor([0.0003, 0.0004])\n",
      "b的估计误差：, tensor([4.9114e-05])\n"
     ]
    }
   ],
   "source": [
    "w = net[0].weight.data\n",
    "print(f'w的估计误差：, {true_w - w.reshape(true_w.shape)}')\n",
    "b = net[0].bias.data\n",
    "print(f'b的估计误差：, {true_b - b}')"
   ]
  }
 ],
 "metadata": {
  "celltoolbar": "幻灯片",
  "hide_input": false,
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": true,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "rise": {
   "autolaunch": false,
   "enable_chalkboard": true,
   "scroll": true
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "vscode": {
   "interpreter": {
    "hash": "34418ffb6f02e522390adb0e13441cc75f901cd11cccb4f6f613643b4b4d2a0b"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
