{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "\n",
    "import torch\n",
    "\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "EPOCH = 40\n",
    "\n",
    "BATCH_SIZE = 32\n",
    "\n",
    "m = 3\n",
    "\n",
    "NUM = 1000\n",
    "\n",
    "LR = 0.03\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "'''\n",
    "\n",
    "实际上的w和b\n",
    "\n",
    "'''\n",
    "\n",
    "# w_acc.shape [m, 1]\n",
    "\n",
    "w_acc = torch.tensor([[-2.0], [2.5], [-1.9]])\n",
    "\n",
    "# b_acc.shape [1]\n",
    "\n",
    "b_acc = torch.tensor([8.1])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创造数据\n",
    "\n",
    "def create_data(w, b, num_examples):\n",
    "\n",
    "  # w.shape [m, 1]\n",
    "\n",
    "  # b.shape [1]\n",
    "\n",
    "\n",
    "  # X.shape [num_examples, m]\n",
    "\n",
    "  X = torch.normal(0, 1, (num_examples, len(w)))\n",
    "\n",
    "\n",
    "  # y = X*w = [num_examples, 1]\n",
    "\n",
    "  y = torch.matmul(X, w) + b\n",
    "\n",
    "\n",
    "  # Y.shape [num_examples, 1]\n",
    "\n",
    "  Y = torch.normal(0, 0.01, y.shape)+y\n",
    "\n",
    "  return X, Y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 生成有噪音的数据\n",
    "\n",
    "features, labels = create_data(w_acc, b_acc, NUM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1000, 1])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# w.shape [m, 1] 可导\n",
    "\n",
    "w = torch.normal(0, 0.01, (m,1), requires_grad=True)\n",
    "\n",
    "# b.shape [1] 可导\n",
    "\n",
    "b = torch.zeros(1, requires_grad=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch数据截取\n",
    "\n",
    "def data_iteration(batch_size, features, labels):\n",
    "\n",
    "  # features.shape [num_examples, m]\n",
    "\n",
    "  # labels.shape [num_examples, 1]\n",
    "\n",
    "\n",
    "  num_examples = len(features)\n",
    "\n",
    "  indices_num = list(range(num_examples))\n",
    "\n",
    "  random.shuffle(indices_num)\n",
    "\n",
    "  for i in range(0, num_examples, batch_size):\n",
    "\n",
    "    data_indices = torch.tensor(\n",
    "\n",
    "      # 当数据不足切片时，取到num_examples\n",
    "      \n",
    "\n",
    "      indices_num[i:min(i+batch_size, num_examples)]\n",
    "\n",
    "    )\n",
    "\n",
    "    # features.shape [batch_size, m]\n",
    "\n",
    "    # labels.shape [batch_size, 1]\n",
    "\n",
    "    yield features[data_indices], labels[data_indices]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def SGD(y_acc, y_pre):\n",
    "    \n",
    "  loss = ((y_acc - y_pre)**2/2).sum()\n",
    "\n",
    "  return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def linear_predict(x, w, b):\n",
    "    \n",
    "  y = torch.matmul(x, w)+b\n",
    "\n",
    "  return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch:0,test_loss=5765.787109375\n",
      "epoch:1,test_loss=819.8209838867188\n",
      "epoch:2,test_loss=116.76295471191406\n",
      "epoch:3,test_loss=16.695026397705078\n",
      "epoch:4,test_loss=2.4258999824523926\n",
      "epoch:5,test_loss=0.3864735960960388\n",
      "epoch:6,test_loss=0.09552615880966187\n",
      "epoch:7,test_loss=0.05404534935951233\n",
      "epoch:8,test_loss=0.04797201231122017\n",
      "epoch:9,test_loss=0.04704342037439346\n",
      "epoch:10,test_loss=0.046905435621738434\n",
      "epoch:11,test_loss=0.04687980189919472\n",
      "epoch:12,test_loss=0.04687759280204773\n",
      "epoch:13,test_loss=0.04688816890120506\n",
      "epoch:14,test_loss=0.04687681421637535\n",
      "epoch:15,test_loss=0.04688011109828949\n",
      "epoch:16,test_loss=0.04688210412859917\n",
      "epoch:17,test_loss=0.04688197001814842\n",
      "epoch:18,test_loss=0.0468779020011425\n",
      "epoch:19,test_loss=0.046880919486284256\n",
      "epoch:20,test_loss=0.046879302710294724\n",
      "epoch:21,test_loss=0.04687552526593208\n",
      "epoch:22,test_loss=0.0468769408762455\n",
      "epoch:23,test_loss=0.04688415676355362\n",
      "epoch:24,test_loss=0.04687477648258209\n",
      "epoch:25,test_loss=0.04688217490911484\n",
      "epoch:26,test_loss=0.04687783867120743\n",
      "epoch:27,test_loss=0.046881068497896194\n",
      "epoch:28,test_loss=0.04687562584877014\n",
      "epoch:29,test_loss=0.046876661479473114\n",
      "epoch:30,test_loss=0.04687703773379326\n",
      "epoch:31,test_loss=0.04687459021806717\n",
      "epoch:32,test_loss=0.04687558487057686\n",
      "epoch:33,test_loss=0.04687454551458359\n",
      "epoch:34,test_loss=0.046879447996616364\n",
      "epoch:35,test_loss=0.04687757045030594\n",
      "epoch:36,test_loss=0.046876538544893265\n",
      "epoch:37,test_loss=0.04688694328069687\n",
      "epoch:38,test_loss=0.046882010996341705\n",
      "epoch:39,test_loss=0.046885229647159576\n",
      "预测的w=tensor([[-1.9999],\n",
      "        [ 2.4998],\n",
      "        [-1.9000]], requires_grad=True),实际的w=tensor([[-2.0000],\n",
      "        [ 2.5000],\n",
      "        [-1.9000]])\n",
      "预测的b=tensor([8.1001], requires_grad=True),实际的b=tensor([8.1000])\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(EPOCH):\n",
    "    \n",
    "    # features.shape [batch_size, m]\n",
    "\n",
    "    # labels.shape [batch_size, 1]\n",
    "\n",
    "    for f, l in data_iteration(BATCH_SIZE, features, labels):\n",
    "\n",
    "\n",
    "      # 进行y值预测 y_pre.shape [batch_size, 1]\n",
    "\n",
    "      y_pre = linear_predict(f, w, b)\n",
    "\n",
    "\n",
    "      # SGD梯度下降\n",
    "\n",
    "      loss = SGD(l, y_pre)\n",
    "\n",
    "\n",
    "      # 求导\n",
    "\n",
    "      loss.backward()\n",
    "\n",
    "\n",
    "      with torch.no_grad():\n",
    "\n",
    "        for param in [w, b]:\n",
    "\n",
    "          param -= LR * param.grad / BATCH_SIZE\n",
    "\n",
    "          param.grad.zero_()\n",
    "\n",
    "\n",
    "    # test\n",
    "\n",
    "    with torch.no_grad():\n",
    "\n",
    "      test_loss = SGD(labels, linear_predict(features, w, b)).mean()\n",
    "\n",
    "      print(f'epoch:{epoch},test_loss={test_loss}')\n",
    "\n",
    "print(f'预测的w={w},实际的w={w_acc}\\n预测的b={b},实际的b={b_acc}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "8609a2ff3146babfae3f245e7d74cdf153d6410b24df464c2d1c458659fe584c"
  },
  "kernelspec": {
   "display_name": "Python 3.6.13 64-bit ('ai-sport': conda)",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": ""
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}