{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": []
    }
   ],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "\n",
    "# 1. 生成数据集\n",
    "def synthetic_data(w, b, num_examples):\n",
    "    \"\"\"生成 y = Xw + b + 噪声\"\"\"\n",
    "    X = np.random.normal(0, 1, (num_examples, len(w)))  # 生成特征\n",
    "    y = np.dot(X, w) + b  # 线性组合\n",
    "    y += np.random.normal(0, 0.01, y.shape)  # 加入噪声\n",
    "    return X, y.reshape(-1, 1)\n",
    "\n",
    "# 真实权重和偏差\n",
    "true_w = np.array([2.0])  # 真实权重\n",
    "true_b = 4                # 真实偏差\n",
    "features, labels = synthetic_data(true_w, true_b, 1000)\n",
    "\n",
    "# 2. 读取数据集\n",
    "def data_iter(batch_size, features, labels):\n",
    "    \"\"\"生成小批量数据迭代器\"\"\"\n",
    "    num_examples = len(features)\n",
    "    indices = list(range(num_examples))\n",
    "    random.shuffle(indices)  # 随机打乱数据\n",
    "    for i in range(0, num_examples, batch_size):\n",
    "        batch_indices = indices[i: min(i + batch_size, num_examples)]\n",
    "        yield features[batch_indices], labels[batch_indices]\n",
    "\n",
    "# 3. 初始化模型参数\n",
    "#填空1 初始化权重和偏差\n",
    "w = \n",
    "b = \n",
    "\n",
    "# 4. 定义线性回归模型\n",
    "def linreg_model(X, w, b):\n",
    "    \"\"\"线性回归模型\"\"\"\n",
    "    return np.dot(X, w) + b\n",
    "\n",
    "# 5. 定义损失函数\n",
    "def squared_loss(y_hat, y):\n",
    "    \"\"\"均方损失\"\"\"\n",
    "    #填空2 补充均方损失函数\n",
    "    \n",
    "\n",
    "# 6. 定义优化算法\n",
    "def sgd(params, grads, lr, batch_size):\n",
    "    \"\"\"小批量随机梯度下降\"\"\"\n",
    "    for param, grad in zip(params, grads):\n",
    "        param -= lr * grad / batch_size\n",
    "\n",
    "# 7. 训练模型\n",
    "lr = 0.03              # 学习率\n",
    "num_epochs = 50        # 训练轮数\n",
    "batch_size = 10        # 批量大小\n",
    "\n",
    "# 训练循环\n",
    "for epoch in range(num_epochs):\n",
    "    for X_batch, y_batch in data_iter(batch_size, features, labels):\n",
    "        # 填空3 计算预测值和误差\n",
    "        y_hat = \n",
    "        loss = \n",
    "\n",
    "        # 计算梯度\n",
    "        grad_w = np.dot(X_batch.T, (y_hat - y_batch)) / batch_size\n",
    "        grad_b = np.sum(y_hat - y_batch) / batch_size\n",
    "\n",
    "        # 更新参数\n",
    "        sgd([w, b], [grad_w, grad_b], lr, batch_size)\n",
    "\n",
    "    # 填空4 计算整个数据集的损失\n",
    "    train_l = \n",
    "    print(f'epoch {epoch + 1}, loss {train_l:.6f}')\n",
    "\n",
    "# 8. 评估训练效果\n",
    "print(f'w的估计误差: {true_w - w.flatten()}')\n",
    "print(f'b的估计误差: {true_b - b.item()}')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
