{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9074a269",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f9ed7f18",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.7805, -1.3118,  0.4155,  0.6380],\n",
       "        [ 0.8244, -1.0857, -0.6530, -0.4182],\n",
       "        [ 1.8305, -0.3150,  1.8406, -0.4592],\n",
       "        [ 0.3168,  1.8575, -0.2420,  0.8173],\n",
       "        [-1.0945, -0.2184, -1.2477, -0.2104]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w=torch.randn(size=(5,4))\n",
    "w"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "12554bb2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.1108, 0.0651, 0.3664, 0.4577],\n",
       "        [0.6006, 0.0889, 0.1371, 0.1734],\n",
       "        [0.4488, 0.0525, 0.4533, 0.0455],\n",
       "        [0.1268, 0.5917, 0.0725, 0.2091],\n",
       "        [0.1497, 0.3595, 0.1284, 0.3624]])"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res = F.softmax(w,dim=1)\n",
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "012751fd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.11080252, 0.06512825, 0.366383, 0.45768625]\n",
      "[0.6006278, 0.088935226, 0.13707843, 0.1733586]\n",
      "[0.44876182, 0.05250856, 0.45327505, 0.045454584]\n",
      "[0.12675352, 0.59168255, 0.07248601, 0.20907786]\n",
      "[0.14969723, 0.35949543, 0.12843065, 0.36237666]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0.1108, 0.0651, 0.3664, 0.4577],\n",
       "        [0.6006, 0.0889, 0.1371, 0.1734],\n",
       "        [0.4488, 0.0525, 0.4533, 0.0455],\n",
       "        [0.1268, 0.5917, 0.0725, 0.2091],\n",
       "        [0.1497, 0.3595, 0.1284, 0.3624]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 手动计算softmax\n",
    "def softmax_(x):\n",
    "    return np.exp(x)\n",
    "w\n",
    "w2=[]\n",
    "for _ in w:\n",
    "    d = _.detach().numpy()\n",
    "    d_ = [softmax_(i) for i in d]\n",
    "    d_sum = np.sum(d_)\n",
    "    d_softmax = [j/d_sum for j in d_]\n",
    "    print(d_softmax)\n",
    "    w2.append(d_softmax)\n",
    "\n",
    "torch.from_numpy(np.array(w2))\n",
    "    \n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "0ce4848e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0000],\n",
       "        [1.0000],\n",
       "        [1.0000],\n",
       "        [1.0000],\n",
       "        [1.0000]])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res.sum(1,keepdim=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "2128a189",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.1108, 0.0651, 0.3664, 0.4577],\n",
       "        [0.6006, 0.0889, 0.1371, 0.1734],\n",
       "        [0.4488, 0.0525, 0.4533, 0.0455],\n",
       "        [0.1268, 0.5917, 0.0725, 0.2091],\n",
       "        [0.1497, 0.3595, 0.1284, 0.3624]])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e93fcf3d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[0.1108, 0.0651, 0.3664, 0.4577],\n",
       "         [0.6006, 0.0889, 0.1371, 0.1734],\n",
       "         [0.4488, 0.0525, 0.4533, 0.0455],\n",
       "         [0.1268, 0.5917, 0.0725, 0.2091],\n",
       "         [0.1497, 0.3595, 0.1284, 0.3624]]),\n",
       " tensor([2., 1., 1., 1., 2.]))"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res,torch.empty(5,).random_(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "id": "07d33d77",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([2, 2, 1, 2, 0])"
      ]
     },
     "execution_count": 105,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.empty(5,).random_(3).long()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dbf23702",
   "metadata": {},
   "source": [
    "使用pytorch中的函数计算 交叉熵损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "id": "514206d1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([2, 3, 1, 1, 1])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0.1108, 0.0651, 0.3664, 0.4577],\n",
       "        [0.6006, 0.0889, 0.1371, 0.1734],\n",
       "        [0.4488, 0.0525, 0.4533, 0.0455],\n",
       "        [0.1268, 0.5917, 0.0725, 0.2091],\n",
       "        [0.1497, 0.3595, 0.1284, 0.3624]])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor(1.4502)"
      ]
     },
     "execution_count": 107,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# nn.CrossEntropyLoss()(res,torch.randn(size=(5,4)))\n",
    "# target = torch.empty(5,4).random_(4).float()\n",
    "\n",
    "target = torch.empty(5,).random_(4).long()\n",
    "\n",
    "display(target,res)\n",
    "nn.CrossEntropyLoss()(w,target)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "id": "add805eb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[2, 1, 0, 0],\n",
       "        [3, 0, 1, 0],\n",
       "        [2, 3, 0, 3],\n",
       "        [0, 3, 2, 2],\n",
       "        [2, 1, 0, 1]])"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "target"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72c59383",
   "metadata": {},
   "outputs": [
    {
     "ename": "IndexError",
     "evalue": "arrays used as indices must be of integer (or boolean) type",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[106], line 7\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m exp_x \u001b[38;5;241m/\u001b[39m np\u001b[38;5;241m.\u001b[39msum(exp_x, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m, keepdims\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m      6\u001b[0m res1 \u001b[38;5;241m=\u001b[39m res\u001b[38;5;241m.\u001b[39mdetach()\u001b[38;5;241m.\u001b[39mnumpy()\n\u001b[1;32m----> 7\u001b[0m r \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m-\u001b[39mnp\u001b[38;5;241m.\u001b[39mlog(\u001b[43mres1\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;28;43mrange\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mres1\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mshape\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtarget\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdetach\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnumpy\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m)\n\u001b[0;32m      8\u001b[0m np\u001b[38;5;241m.\u001b[39mmean(r)\n",
      "\u001b[1;31mIndexError\u001b[0m: arrays used as indices must be of integer (or boolean) type"
     ]
    }
   ],
   "source": [
    "# 手动计算   只有在 target是一维数组才可以执行\n",
    "def softmax(x):\n",
    "    exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n",
    "    return exp_x / np.sum(exp_x, axis=1, keepdims=True)\n",
    "\n",
    "res1 = res.detach().numpy()\n",
    "r = -np.log(res1[range(res1.shape[0]),target.detach().numpy()])\n",
    "np.mean(r)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4229c9ac",
   "metadata": {},
   "source": [
    "-- 注意   CrossEntropyLoss  input不需要进行 softmax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dec813b4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[tensor(6.6000), tensor(2.7314), tensor(0.), tensor(1.5631)],\n",
       " [tensor(1.0196), tensor(2.4198), tensor(5.9616), tensor(3.5048)],\n",
       " [tensor(2.4038), tensor(8.8403), tensor(1.5825), tensor(0.)],\n",
       " [tensor(2.0655), tensor(1.5744), tensor(5.2487), tensor(4.6951)],\n",
       " [tensor(3.7983), tensor(3.0692), tensor(2.0524), tensor(2.0301)]]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "array([10.894557, 12.905798, 12.826639, 13.583735, 10.949951],\n",
       "      dtype=float32)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "12.232136"
      ]
     },
     "execution_count": 104,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 手动计算损失 \n",
    "all_sum = []\n",
    "for i,_ in enumerate(res):\n",
    "    sum_ = []\n",
    "    for j,__ in enumerate(_):\n",
    "        # print(target[i,j])\n",
    "        # print(__)\n",
    "        sum_.append(-target[i,j]*np.log(__))\n",
    "    all_sum.append(sum_)\n",
    "\n",
    "all_sum\n",
    "r = np.sum(all_sum,axis=1)\n",
    "display(all_sum,r)\n",
    "# display(r)\n",
    "np.mean(r)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "id": "2fc95b87",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.0723, 0.1966, 0.5344, 0.1966],\n",
       "        [0.0432, 0.3189, 0.3189, 0.3189],\n",
       "        [0.0723, 0.1966, 0.1966, 0.5344],\n",
       "        [0.2060, 0.5601, 0.2060, 0.0279],\n",
       "        [0.1470, 0.3995, 0.3995, 0.0541]])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor([2, 1, 3, 2, 2])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "res = torch.empty(5,4).random_(4).float()\n",
    "res = F.softmax(res,dim=1)\n",
    "target = torch.empty(5,).random_(4).long()\n",
    "display(res,target)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6410cfd1",
   "metadata": {},
   "source": [
    "注意：在 PyTorch 中，nn.CrossEntropyLoss 要求目标标签（target）为 torch.long 类型，这主要与该损失函数的实现原理和底层计算逻辑有关"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "7d1f7a11",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3"
      ]
     },
     "execution_count": 84,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(res.argmax(1) == target).sum().item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e2df972",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "loss_calc = loss(w1,target)\n",
    "loss_calc.backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 129,
   "id": "c5dcf840",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.3883,  2.6746,  0.1608, -0.3933,  0.5442], requires_grad=True)"
      ]
     },
     "execution_count": 129,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 146,
   "id": "1808796e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.2709, 0.1248, 0.2790, 0.1998, 0.0765])"
      ]
     },
     "execution_count": 146,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "5a820b7d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.4109,  0.5509,  0.3199,  0.2338,  0.3887], grad_fn=<DivBackward0>)"
      ]
     },
     "execution_count": 131,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 手动计算提取\n",
    "2*(w1-target)/5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 132,
   "id": "1b9fc849",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss.zero_grad()\n",
    "loss_calc = loss(w1,target)\n",
    "loss_calc.backward()\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 133,
   "id": "9d730465",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.8218,  1.1018,  0.6398,  0.4676,  0.7775])"
      ]
     },
     "execution_count": 133,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "id": "72cbad92",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.0278"
      ]
     },
     "execution_count": 123,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.0139*2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "019bd4e5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss 1.729964256286621\n",
      "loss 1.7313482761383057\n",
      "loss 1.732733964920044\n",
      "loss 1.7341203689575195\n",
      "loss 1.7355079650878906\n",
      "loss 1.7368965148925781\n",
      "loss 1.7382863759994507\n",
      "loss 1.7396774291992188\n",
      "loss 1.7410694360733032\n",
      "loss 1.742462396621704\n",
      "loss 1.7438567876815796\n",
      "loss 1.745252013206482\n",
      "loss 1.7466485500335693\n",
      "loss 1.7480461597442627\n",
      "loss 1.7494449615478516\n",
      "loss 1.7508447170257568\n",
      "loss 1.7522456645965576\n",
      "loss 1.753647804260254\n",
      "loss 1.7550510168075562\n",
      "loss 1.7564551830291748\n",
      "loss 1.757860779762268\n",
      "loss 1.7592674493789673\n",
      "loss 1.760675072669983\n",
      "loss 1.7620837688446045\n",
      "loss 1.7634937763214111\n",
      "loss 1.7649047374725342\n",
      "loss 1.7663170099258423\n",
      "loss 1.7677303552627563\n",
      "loss 1.7691450119018555\n",
      "loss 1.7705605030059814\n"
     ]
    }
   ],
   "source": [
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "lr = 0.001\n",
    "num_iterations = 30\n",
    "for x in range(num_iterations):\n",
    "    loss_calc = loss(w1,target)\n",
    "    loss_calc.backward()\n",
    "    # print(w1)\n",
    "    # with torch.no_grad():\n",
    "    \n",
    "    print('loss',loss_calc.item())\n",
    "    # print('w1',w1)\n",
    "    '''\n",
    "        在 PyTorch 里，叶子张量是直接创建的张量，例如通过 torch.randn 创建的 w1。在进行参数更新时，如果你使用 w1 -= - lr*w1.grad 这种原位操作（in-place operation），\n",
    "        它实际上会创建一个新的计算图节点，导致 w1 变成非叶子张量。之后再次调用 backward() 时，由于 w1 已经不是叶子张量，它的 .grad 属性默认不会被填充，进而触发警告。\n",
    "    \n",
    "    '''\n",
    "    with torch.no_grad(): # \n",
    "        w1  -=  - lr*w1.grad\n",
    "    if w1.grad is not None:\n",
    "        w1.grad.zero_()\n",
    "        \n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 165,
   "id": "636973fa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "w1 tensor([-0.0337, -0.3933, -0.8400, -0.8628,  1.4893], requires_grad=True) loss 2.3302688598632812\n",
      "w1 tensor([-0.0257, -0.3930, -0.8416, -0.8565,  1.4804], requires_grad=True) loss 2.311663866043091\n",
      "w1 tensor([-0.0177, -0.3928, -0.8431, -0.8503,  1.4715], requires_grad=True) loss 2.2932076454162598\n",
      "w1 tensor([-0.0098, -0.3925, -0.8447, -0.8441,  1.4627], requires_grad=True) loss 2.2748990058898926\n",
      "w1 tensor([-0.0019, -0.3923, -0.8462, -0.8379,  1.4539], requires_grad=True) loss 2.2567358016967773\n",
      "w1 tensor([ 0.0060, -0.3920, -0.8478, -0.8318,  1.4451], requires_grad=True) loss 2.2387185096740723\n",
      "w1 tensor([ 0.0138, -0.3918, -0.8493, -0.8257,  1.4364], requires_grad=True) loss 2.220844268798828\n",
      "w1 tensor([ 0.0216, -0.3915, -0.8508, -0.8196,  1.4277], requires_grad=True) loss 2.203113079071045\n",
      "w1 tensor([ 0.0294, -0.3913, -0.8523, -0.8135,  1.4190], requires_grad=True) loss 2.185523271560669\n",
      "w1 tensor([ 0.0371, -0.3910, -0.8539, -0.8074,  1.4104], requires_grad=True) loss 2.168074131011963\n",
      "w1 tensor([ 0.0448, -0.3908, -0.8554, -0.8014,  1.4018], requires_grad=True) loss 2.1507644653320312\n",
      "w1 tensor([ 0.0525, -0.3905, -0.8569, -0.7954,  1.3932], requires_grad=True) loss 2.1335926055908203\n",
      "w1 tensor([ 0.0602, -0.3903, -0.8584, -0.7894,  1.3847], requires_grad=True) loss 2.1165578365325928\n",
      "w1 tensor([ 0.0678, -0.3900, -0.8599, -0.7835,  1.3762], requires_grad=True) loss 2.0996594429016113\n",
      "w1 tensor([ 0.0754, -0.3898, -0.8613, -0.7775,  1.3677], requires_grad=True) loss 2.0828957557678223\n",
      "w1 tensor([ 0.0829, -0.3896, -0.8628, -0.7716,  1.3593], requires_grad=True) loss 2.066265821456909\n",
      "w1 tensor([ 0.0905, -0.3893, -0.8643, -0.7657,  1.3509], requires_grad=True) loss 2.0497689247131348\n",
      "w1 tensor([ 0.0980, -0.3891, -0.8658, -0.7599,  1.3425], requires_grad=True) loss 2.0334036350250244\n",
      "w1 tensor([ 0.1054, -0.3888, -0.8672, -0.7541,  1.3342], requires_grad=True) loss 2.0171685218811035\n",
      "w1 tensor([ 0.1129, -0.3886, -0.8687, -0.7482,  1.3259], requires_grad=True) loss 2.001063585281372\n",
      "w1 tensor([ 0.1203, -0.3884, -0.8701, -0.7425,  1.3177], requires_grad=True) loss 1.9850871562957764\n",
      "w1 tensor([ 0.1277, -0.3881, -0.8716, -0.7367,  1.3094], requires_grad=True) loss 1.96923828125\n",
      "w1 tensor([ 0.1350, -0.3879, -0.8730, -0.7309,  1.3012], requires_grad=True) loss 1.9535157680511475\n",
      "w1 tensor([ 0.1423, -0.3877, -0.8744, -0.7252,  1.2931], requires_grad=True) loss 1.937919020652771\n",
      "w1 tensor([ 0.1496, -0.3874, -0.8759, -0.7195,  1.2849], requires_grad=True) loss 1.922446608543396\n",
      "w1 tensor([ 0.1569, -0.3872, -0.8773, -0.7138,  1.2768], requires_grad=True) loss 1.9070980548858643\n",
      "w1 tensor([ 0.1641, -0.3870, -0.8787, -0.7082,  1.2688], requires_grad=True) loss 1.891871690750122\n",
      "w1 tensor([ 0.1713, -0.3867, -0.8801, -0.7026,  1.2608], requires_grad=True) loss 1.8767668008804321\n",
      "w1 tensor([ 0.1785, -0.3865, -0.8815, -0.6970,  1.2528], requires_grad=True) loss 1.8617827892303467\n",
      "w1 tensor([ 0.1856, -0.3863, -0.8829, -0.6914,  1.2448], requires_grad=True) loss 1.8469183444976807\n",
      "w1 tensor([ 0.1928, -0.3860, -0.8843, -0.6858,  1.2369], requires_grad=True) loss 1.8321723937988281\n",
      "w1 tensor([ 0.1999, -0.3858, -0.8857, -0.6803,  1.2289], requires_grad=True) loss 1.8175443410873413\n",
      "w1 tensor([ 0.2069, -0.3856, -0.8871, -0.6748,  1.2211], requires_grad=True) loss 1.8030331134796143\n",
      "w1 tensor([ 0.2139, -0.3854, -0.8885, -0.6693,  1.2132], requires_grad=True) loss 1.7886377573013306\n",
      "w1 tensor([ 0.2210, -0.3851, -0.8898, -0.6638,  1.2054], requires_grad=True) loss 1.7743571996688843\n",
      "w1 tensor([ 0.2279, -0.3849, -0.8912, -0.6583,  1.1976], requires_grad=True) loss 1.760190725326538\n",
      "w1 tensor([ 0.2349, -0.3847, -0.8926, -0.6529,  1.1899], requires_grad=True) loss 1.7461373805999756\n",
      "w1 tensor([ 0.2418, -0.3845, -0.8939, -0.6475,  1.1822], requires_grad=True) loss 1.7321962118148804\n",
      "w1 tensor([ 0.2487, -0.3843, -0.8953, -0.6421,  1.1745], requires_grad=True) loss 1.7183666229248047\n",
      "w1 tensor([ 0.2556, -0.3840, -0.8966, -0.6367,  1.1668], requires_grad=True) loss 1.7046470642089844\n",
      "w1 tensor([ 0.2624, -0.3838, -0.8979, -0.6314,  1.1592], requires_grad=True) loss 1.6910371780395508\n",
      "w1 tensor([ 0.2692, -0.3836, -0.8993, -0.6261,  1.1516], requires_grad=True) loss 1.6775357723236084\n",
      "w1 tensor([ 0.2760, -0.3834, -0.9006, -0.6208,  1.1441], requires_grad=True) loss 1.6641426086425781\n",
      "w1 tensor([ 0.2828, -0.3832, -0.9019, -0.6155,  1.1365], requires_grad=True) loss 1.6508560180664062\n",
      "w1 tensor([ 0.2895, -0.3829, -0.9033, -0.6102,  1.1290], requires_grad=True) loss 1.6376755237579346\n",
      "w1 tensor([ 0.2962, -0.3827, -0.9046, -0.6050,  1.1216], requires_grad=True) loss 1.6246004104614258\n",
      "w1 tensor([ 0.3029, -0.3825, -0.9059, -0.5998,  1.1141], requires_grad=True) loss 1.6116294860839844\n",
      "w1 tensor([ 0.3095, -0.3823, -0.9072, -0.5946,  1.1067], requires_grad=True) loss 1.5987622737884521\n",
      "w1 tensor([ 0.3161, -0.3821, -0.9085, -0.5894,  1.0993], requires_grad=True) loss 1.5859978199005127\n",
      "w1 tensor([ 0.3227, -0.3819, -0.9098, -0.5843,  1.0920], requires_grad=True) loss 1.57333505153656\n",
      "w1 tensor([ 0.3293, -0.3817, -0.9110, -0.5791,  1.0846], requires_grad=True) loss 1.5607736110687256\n",
      "w1 tensor([ 0.3358, -0.3815, -0.9123, -0.5740,  1.0773], requires_grad=True) loss 1.5483125448226929\n",
      "w1 tensor([ 0.3424, -0.3813, -0.9136, -0.5689,  1.0701], requires_grad=True) loss 1.5359508991241455\n",
      "w1 tensor([ 0.3488, -0.3810, -0.9149, -0.5638,  1.0628], requires_grad=True) loss 1.5236878395080566\n",
      "w1 tensor([ 0.3553, -0.3808, -0.9161, -0.5588,  1.0556], requires_grad=True) loss 1.511522650718689\n",
      "w1 tensor([ 0.3618, -0.3806, -0.9174, -0.5538,  1.0485], requires_grad=True) loss 1.4994546175003052\n",
      "w1 tensor([ 0.3682, -0.3804, -0.9187, -0.5487,  1.0413], requires_grad=True) loss 1.487483024597168\n",
      "w1 tensor([ 0.3746, -0.3802, -0.9199, -0.5438,  1.0342], requires_grad=True) loss 1.47560715675354\n",
      "w1 tensor([ 0.3809, -0.3800, -0.9211, -0.5388,  1.0271], requires_grad=True) loss 1.4638259410858154\n",
      "w1 tensor([ 0.3873, -0.3798, -0.9224, -0.5338,  1.0200], requires_grad=True) loss 1.4521386623382568\n",
      "w1 tensor([ 0.3936, -0.3796, -0.9236, -0.5289,  1.0130], requires_grad=True) loss 1.440544843673706\n",
      "w1 tensor([ 0.3999, -0.3794, -0.9249, -0.5240,  1.0060], requires_grad=True) loss 1.4290437698364258\n",
      "w1 tensor([ 0.4061, -0.3792, -0.9261, -0.5191,  0.9990], requires_grad=True) loss 1.417634129524231\n",
      "w1 tensor([ 0.4124, -0.3790, -0.9273, -0.5142,  0.9921], requires_grad=True) loss 1.4063156843185425\n",
      "w1 tensor([ 0.4186, -0.3788, -0.9285, -0.5094,  0.9851], requires_grad=True) loss 1.3950875997543335\n",
      "w1 tensor([ 0.4247, -0.3786, -0.9297, -0.5045,  0.9782], requires_grad=True) loss 1.3839492797851562\n",
      "w1 tensor([ 0.4309, -0.3784, -0.9309, -0.4997,  0.9714], requires_grad=True) loss 1.3728998899459839\n",
      "w1 tensor([ 0.4370, -0.3782, -0.9321, -0.4949,  0.9645], requires_grad=True) loss 1.361938714981079\n",
      "w1 tensor([ 0.4432, -0.3780, -0.9333, -0.4901,  0.9577], requires_grad=True) loss 1.351064920425415\n",
      "w1 tensor([ 0.4492, -0.3778, -0.9345, -0.4854,  0.9509], requires_grad=True) loss 1.3402780294418335\n",
      "w1 tensor([ 0.4553, -0.3776, -0.9357, -0.4806,  0.9442], requires_grad=True) loss 1.3295772075653076\n",
      "w1 tensor([ 0.4614, -0.3774, -0.9369, -0.4759,  0.9374], requires_grad=True) loss 1.3189618587493896\n",
      "w1 tensor([ 0.4674, -0.3773, -0.9381, -0.4712,  0.9307], requires_grad=True) loss 1.3084312677383423\n",
      "w1 tensor([ 0.4734, -0.3771, -0.9392, -0.4665,  0.9240], requires_grad=True) loss 1.2979848384857178\n",
      "w1 tensor([ 0.4793, -0.3769, -0.9404, -0.4619,  0.9174], requires_grad=True) loss 1.2876217365264893\n",
      "w1 tensor([ 0.4853, -0.3767, -0.9416, -0.4572,  0.9108], requires_grad=True) loss 1.2773412466049194\n",
      "w1 tensor([ 0.4912, -0.3765, -0.9427, -0.4526,  0.9042], requires_grad=True) loss 1.2671430110931396\n",
      "w1 tensor([ 0.4971, -0.3763, -0.9439, -0.4480,  0.8976], requires_grad=True) loss 1.2570260763168335\n",
      "w1 tensor([ 0.5030, -0.3761, -0.9450, -0.4434,  0.8910], requires_grad=True) loss 1.2469902038574219\n",
      "w1 tensor([ 0.5088, -0.3759, -0.9462, -0.4388,  0.8845], requires_grad=True) loss 1.2370340824127197\n",
      "w1 tensor([ 0.5146, -0.3757, -0.9473, -0.4343,  0.8780], requires_grad=True) loss 1.2271575927734375\n",
      "w1 tensor([ 0.5204, -0.3756, -0.9485, -0.4298,  0.8716], requires_grad=True) loss 1.2173599004745483\n",
      "w1 tensor([ 0.5262, -0.3754, -0.9496, -0.4252,  0.8651], requires_grad=True) loss 1.207640528678894\n",
      "w1 tensor([ 0.5320, -0.3752, -0.9507, -0.4207,  0.8587], requires_grad=True) loss 1.1979987621307373\n",
      "w1 tensor([ 0.5377, -0.3750, -0.9518, -0.4163,  0.8523], requires_grad=True) loss 1.1884340047836304\n",
      "w1 tensor([ 0.5434, -0.3748, -0.9530, -0.4118,  0.8459], requires_grad=True) loss 1.1789453029632568\n",
      "w1 tensor([ 0.5491, -0.3746, -0.9541, -0.4074,  0.8396], requires_grad=True) loss 1.1695327758789062\n",
      "w1 tensor([ 0.5548, -0.3745, -0.9552, -0.4029,  0.8333], requires_grad=True) loss 1.160195231437683\n",
      "w1 tensor([ 0.5604, -0.3743, -0.9563, -0.3985,  0.8270], requires_grad=True) loss 1.1509321928024292\n",
      "w1 tensor([ 0.5660, -0.3741, -0.9574, -0.3941,  0.8207], requires_grad=True) loss 1.1417431831359863\n",
      "w1 tensor([ 0.5716, -0.3739, -0.9585, -0.3898,  0.8145], requires_grad=True) loss 1.1326273679733276\n",
      "w1 tensor([ 0.5772, -0.3737, -0.9596, -0.3854,  0.8083], requires_grad=True) loss 1.1235846281051636\n",
      "w1 tensor([ 0.5827, -0.3736, -0.9607, -0.3811,  0.8021], requires_grad=True) loss 1.1146138906478882\n",
      "w1 tensor([ 0.5883, -0.3734, -0.9617, -0.3767,  0.7959], requires_grad=True) loss 1.1057147979736328\n",
      "w1 tensor([ 0.5938, -0.3732, -0.9628, -0.3724,  0.7898], requires_grad=True) loss 1.0968867540359497\n",
      "w1 tensor([ 0.5993, -0.3730, -0.9639, -0.3682,  0.7837], requires_grad=True) loss 1.0881292819976807\n",
      "w1 tensor([ 0.6047, -0.3729, -0.9650, -0.3639,  0.7776], requires_grad=True) loss 1.079441785812378\n",
      "w1 tensor([ 0.6102, -0.3727, -0.9660, -0.3596,  0.7715], requires_grad=True) loss 1.0708234310150146\n",
      "w1 tensor([ 0.6156, -0.3725, -0.9671, -0.3554,  0.7655], requires_grad=True) loss 1.0622739791870117\n",
      "w1 tensor([ 0.6210, -0.3723, -0.9681, -0.3512,  0.7595], requires_grad=True) loss 1.0537927150726318\n",
      "w1 tensor([ 0.6264, -0.3722, -0.9692, -0.3470,  0.7535], requires_grad=True) loss 1.0453790426254272\n",
      "w1 tensor([ 0.6317, -0.3720, -0.9702, -0.3428,  0.7475], requires_grad=True) loss 1.037032961845398\n",
      "w1 tensor([ 0.6371, -0.3718, -0.9713, -0.3386,  0.7415], requires_grad=True) loss 1.0287532806396484\n",
      "w1 tensor([ 0.6424, -0.3717, -0.9723, -0.3345,  0.7356], requires_grad=True) loss 1.02053964138031\n",
      "w1 tensor([ 0.6477, -0.3715, -0.9734, -0.3303,  0.7297], requires_grad=True) loss 1.0123916864395142\n",
      "w1 tensor([ 0.6529, -0.3713, -0.9744, -0.3262,  0.7238], requires_grad=True) loss 1.0043089389801025\n",
      "w1 tensor([ 0.6582, -0.3711, -0.9754, -0.3221,  0.7180], requires_grad=True) loss 0.9962903261184692\n",
      "w1 tensor([ 0.6634, -0.3710, -0.9764, -0.3180,  0.7122], requires_grad=True) loss 0.9883359670639038\n",
      "w1 tensor([ 0.6686, -0.3708, -0.9775, -0.3140,  0.7064], requires_grad=True) loss 0.9804452061653137\n",
      "w1 tensor([ 0.6738, -0.3706, -0.9785, -0.3099,  0.7006], requires_grad=True) loss 0.9726172685623169\n",
      "w1 tensor([ 0.6790, -0.3705, -0.9795, -0.3059,  0.6948], requires_grad=True) loss 0.964851975440979\n",
      "w1 tensor([ 0.6841, -0.3703, -0.9805, -0.3019,  0.6891], requires_grad=True) loss 0.957148551940918\n",
      "w1 tensor([ 0.6892, -0.3702, -0.9815, -0.2978,  0.6834], requires_grad=True) loss 0.9495066404342651\n",
      "w1 tensor([ 0.6943, -0.3700, -0.9825, -0.2939,  0.6777], requires_grad=True) loss 0.9419257044792175\n",
      "w1 tensor([ 0.6994, -0.3698, -0.9835, -0.2899,  0.6720], requires_grad=True) loss 0.9344054460525513\n",
      "w1 tensor([ 0.7045, -0.3697, -0.9845, -0.2859,  0.6664], requires_grad=True) loss 0.9269450902938843\n",
      "w1 tensor([ 0.7095, -0.3695, -0.9855, -0.2820,  0.6607], requires_grad=True) loss 0.9195443987846375\n",
      "w1 tensor([ 0.7146, -0.3693, -0.9864, -0.2781,  0.6551], requires_grad=True) loss 0.9122027158737183\n",
      "w1 tensor([ 0.7196, -0.3692, -0.9874, -0.2742,  0.6496], requires_grad=True) loss 0.9049198031425476\n",
      "w1 tensor([ 0.7245, -0.3690, -0.9884, -0.2703,  0.6440], requires_grad=True) loss 0.8976948857307434\n",
      "w1 tensor([ 0.7295, -0.3689, -0.9894, -0.2664,  0.6385], requires_grad=True) loss 0.8905277252197266\n",
      "w1 tensor([ 0.7344, -0.3687, -0.9903, -0.2625,  0.6330], requires_grad=True) loss 0.8834177851676941\n",
      "w1 tensor([ 0.7394, -0.3685, -0.9913, -0.2587,  0.6275], requires_grad=True) loss 0.876364529132843\n",
      "w1 tensor([ 0.7443, -0.3684, -0.9923, -0.2548,  0.6220], requires_grad=True) loss 0.8693675994873047\n",
      "w1 tensor([ 0.7492, -0.3682, -0.9932, -0.2510,  0.6166], requires_grad=True) loss 0.8624265789985657\n",
      "w1 tensor([ 0.7540, -0.3681, -0.9942, -0.2472,  0.6111], requires_grad=True) loss 0.855540931224823\n",
      "w1 tensor([ 0.7589, -0.3679, -0.9951, -0.2434,  0.6057], requires_grad=True) loss 0.8487103581428528\n",
      "w1 tensor([ 0.7637, -0.3678, -0.9961, -0.2397,  0.6004], requires_grad=True) loss 0.8419342041015625\n",
      "w1 tensor([ 0.7685, -0.3676, -0.9970, -0.2359,  0.5950], requires_grad=True) loss 0.835212230682373\n",
      "w1 tensor([ 0.7733, -0.3675, -0.9979, -0.2322,  0.5897], requires_grad=True) loss 0.8285438418388367\n",
      "w1 tensor([ 0.7780, -0.3673, -0.9989, -0.2284,  0.5844], requires_grad=True) loss 0.821928858757019\n",
      "w1 tensor([ 0.7828, -0.3672, -0.9998, -0.2247,  0.5791], requires_grad=True) loss 0.8153665661811829\n",
      "w1 tensor([ 0.7875, -0.3670, -1.0007, -0.2210,  0.5738], requires_grad=True) loss 0.8088566660881042\n",
      "w1 tensor([ 0.7922, -0.3669, -1.0017, -0.2174,  0.5685], requires_grad=True) loss 0.802398681640625\n",
      "w1 tensor([ 0.7969, -0.3667, -1.0026, -0.2137,  0.5633], requires_grad=True) loss 0.795992374420166\n",
      "w1 tensor([ 0.8016, -0.3666, -1.0035, -0.2100,  0.5581], requires_grad=True) loss 0.7896372079849243\n",
      "w1 tensor([ 0.8063, -0.3664, -1.0044, -0.2064,  0.5529], requires_grad=True) loss 0.7833327651023865\n",
      "w1 tensor([ 0.8109, -0.3663, -1.0053, -0.2028,  0.5477], requires_grad=True) loss 0.7770785689353943\n",
      "w1 tensor([ 0.8155, -0.3661, -1.0062, -0.1992,  0.5426], requires_grad=True) loss 0.7708743810653687\n",
      "w1 tensor([ 0.8201, -0.3660, -1.0071, -0.1956,  0.5375], requires_grad=True) loss 0.7647197842597961\n",
      "w1 tensor([ 0.8247, -0.3658, -1.0080, -0.1920,  0.5324], requires_grad=True) loss 0.7586142420768738\n",
      "w1 tensor([ 0.8292, -0.3657, -1.0089, -0.1884,  0.5273], requires_grad=True) loss 0.7525573968887329\n",
      "w1 tensor([ 0.8338, -0.3655, -1.0098, -0.1849,  0.5222], requires_grad=True) loss 0.746549129486084\n",
      "w1 tensor([ 0.8383, -0.3654, -1.0107, -0.1813,  0.5172], requires_grad=True) loss 0.7405885457992554\n",
      "w1 tensor([ 0.8428, -0.3652, -1.0116, -0.1778,  0.5121], requires_grad=True) loss 0.7346757650375366\n",
      "w1 tensor([ 0.8473, -0.3651, -1.0124, -0.1743,  0.5071], requires_grad=True) loss 0.7288101315498352\n",
      "w1 tensor([ 0.8518, -0.3650, -1.0133, -0.1708,  0.5022], requires_grad=True) loss 0.7229912877082825\n",
      "w1 tensor([ 0.8562, -0.3648, -1.0142, -0.1673,  0.4972], requires_grad=True) loss 0.7172189950942993\n",
      "w1 tensor([ 0.8607, -0.3647, -1.0150, -0.1639,  0.4922], requires_grad=True) loss 0.711492657661438\n",
      "w1 tensor([ 0.8651, -0.3645, -1.0159, -0.1604,  0.4873], requires_grad=True) loss 0.7058120965957642\n",
      "w1 tensor([ 0.8695, -0.3644, -1.0168, -0.1570,  0.4824], requires_grad=True) loss 0.7001768350601196\n",
      "w1 tensor([ 0.8739, -0.3642, -1.0176, -0.1536,  0.4775], requires_grad=True) loss 0.6945866346359253\n",
      "w1 tensor([ 0.8782, -0.3641, -1.0185, -0.1501,  0.4727], requires_grad=True) loss 0.6890411376953125\n",
      "w1 tensor([ 0.8826, -0.3640, -1.0193, -0.1468,  0.4678], requires_grad=True) loss 0.6835399270057678\n",
      "w1 tensor([ 0.8869, -0.3638, -1.0202, -0.1434,  0.4630], requires_grad=True) loss 0.6780824065208435\n",
      "w1 tensor([ 0.8912, -0.3637, -1.0210, -0.1400,  0.4582], requires_grad=True) loss 0.6726686358451843\n",
      "w1 tensor([ 0.8955, -0.3636, -1.0219, -0.1366,  0.4534], requires_grad=True) loss 0.6672980785369873\n",
      "w1 tensor([ 0.8998, -0.3634, -1.0227, -0.1333,  0.4486], requires_grad=True) loss 0.661970317363739\n",
      "w1 tensor([ 0.9041, -0.3633, -1.0235, -0.1300,  0.4439], requires_grad=True) loss 0.6566851735115051\n",
      "w1 tensor([ 0.9083, -0.3631, -1.0244, -0.1267,  0.4391], requires_grad=True) loss 0.6514422297477722\n",
      "w1 tensor([ 0.9125, -0.3630, -1.0252, -0.1233,  0.4344], requires_grad=True) loss 0.6462410688400269\n",
      "w1 tensor([ 0.9168, -0.3629, -1.0260, -0.1201,  0.4297], requires_grad=True) loss 0.6410815119743347\n",
      "w1 tensor([ 0.9209, -0.3627, -1.0268, -0.1168,  0.4251], requires_grad=True) loss 0.6359631419181824\n",
      "w1 tensor([ 0.9251, -0.3626, -1.0277, -0.1135,  0.4204], requires_grad=True) loss 0.6308857202529907\n",
      "w1 tensor([ 0.9293, -0.3625, -1.0285, -0.1103,  0.4158], requires_grad=True) loss 0.625848650932312\n",
      "w1 tensor([ 0.9334, -0.3623, -1.0293, -0.1070,  0.4111], requires_grad=True) loss 0.6208518147468567\n",
      "w1 tensor([ 0.9376, -0.3622, -1.0301, -0.1038,  0.4065], requires_grad=True) loss 0.6158949732780457\n",
      "w1 tensor([ 0.9417, -0.3621, -1.0309, -0.1006,  0.4020], requires_grad=True) loss 0.6109777092933655\n",
      "w1 tensor([ 0.9458, -0.3619, -1.0317, -0.0974,  0.3974], requires_grad=True) loss 0.6060996055603027\n",
      "w1 tensor([ 0.9498, -0.3618, -1.0325, -0.0942,  0.3928], requires_grad=True) loss 0.6012605428695679\n",
      "w1 tensor([ 0.9539, -0.3617, -1.0333, -0.0910,  0.3883], requires_grad=True) loss 0.5964599847793579\n",
      "w1 tensor([ 0.9579, -0.3616, -1.0341, -0.0879,  0.3838], requires_grad=True) loss 0.5916978716850281\n",
      "w1 tensor([ 0.9620, -0.3614, -1.0349, -0.0847,  0.3793], requires_grad=True) loss 0.5869737863540649\n",
      "w1 tensor([ 0.9660, -0.3613, -1.0357, -0.0816,  0.3748], requires_grad=True) loss 0.5822873711585999\n",
      "w1 tensor([ 0.9700, -0.3612, -1.0364, -0.0785,  0.3704], requires_grad=True) loss 0.5776383280754089\n",
      "w1 tensor([ 0.9740, -0.3610, -1.0372, -0.0754,  0.3659], requires_grad=True) loss 0.5730265378952026\n",
      "w1 tensor([ 0.9779, -0.3609, -1.0380, -0.0723,  0.3615], requires_grad=True) loss 0.568451464176178\n",
      "w1 tensor([ 0.9819, -0.3608, -1.0388, -0.0692,  0.3571], requires_grad=True) loss 0.5639129877090454\n",
      "w1 tensor([ 0.9858, -0.3607, -1.0395, -0.0661,  0.3527], requires_grad=True) loss 0.5594106912612915\n",
      "w1 tensor([ 0.9897, -0.3605, -1.0403, -0.0630,  0.3484], requires_grad=True) loss 0.5549443364143372\n",
      "w1 tensor([ 0.9936, -0.3604, -1.0411, -0.0600,  0.3440], requires_grad=True) loss 0.5505136251449585\n",
      "w1 tensor([ 0.9975, -0.3603, -1.0418, -0.0569,  0.3397], requires_grad=True) loss 0.5461183786392212\n",
      "w1 tensor([ 1.0014, -0.3602, -1.0426, -0.0539,  0.3354], requires_grad=True) loss 0.5417581796646118\n",
      "w1 tensor([ 1.0052, -0.3600, -1.0433, -0.0509,  0.3311], requires_grad=True) loss 0.537432849407196\n",
      "w1 tensor([ 1.0091, -0.3599, -1.0441, -0.0479,  0.3268], requires_grad=True) loss 0.5331419110298157\n",
      "w1 tensor([ 1.0129, -0.3598, -1.0448, -0.0449,  0.3225], requires_grad=True) loss 0.5288853049278259\n",
      "w1 tensor([ 1.0167, -0.3597, -1.0456, -0.0419,  0.3183], requires_grad=True) loss 0.5246626734733582\n",
      "w1 tensor([ 1.0205, -0.3596, -1.0463, -0.0390,  0.3141], requires_grad=True) loss 0.520473837852478\n",
      "w1 tensor([ 1.0243, -0.3594, -1.0471, -0.0360,  0.3098], requires_grad=True) loss 0.5163183808326721\n",
      "w1 tensor([ 1.0280, -0.3593, -1.0478, -0.0331,  0.3056], requires_grad=True) loss 0.5121960043907166\n",
      "w1 tensor([ 1.0318, -0.3592, -1.0485, -0.0302,  0.3015], requires_grad=True) loss 0.5081066489219666\n",
      "w1 tensor([ 1.0355, -0.3591, -1.0493, -0.0272,  0.2973], requires_grad=True) loss 0.5040500164031982\n",
      "w1 tensor([ 1.0392, -0.3590, -1.0500, -0.0243,  0.2932], requires_grad=True) loss 0.5000256299972534\n",
      "w1 tensor([ 1.0430, -0.3588, -1.0507, -0.0214,  0.2890], requires_grad=True) loss 0.4960334300994873\n",
      "w1 tensor([ 1.0466, -0.3587, -1.0514, -0.0186,  0.2849], requires_grad=True) loss 0.49207305908203125\n",
      "w1 tensor([ 1.0503, -0.3586, -1.0522, -0.0157,  0.2808], requires_grad=True) loss 0.4881443977355957\n",
      "w1 tensor([ 1.0540, -0.3585, -1.0529, -0.0128,  0.2767], requires_grad=True) loss 0.4842470586299896\n",
      "w1 tensor([ 1.0576, -0.3584, -1.0536, -0.0100,  0.2727], requires_grad=True) loss 0.4803807735443115\n",
      "w1 tensor([ 1.0612, -0.3582, -1.0543, -0.0071,  0.2686], requires_grad=True) loss 0.47654542326927185\n",
      "w1 tensor([ 1.0649, -0.3581, -1.0550, -0.0043,  0.2646], requires_grad=True) loss 0.47274070978164673\n",
      "w1 tensor([ 1.0685, -0.3580, -1.0557, -0.0015,  0.2606], requires_grad=True) loss 0.4689663350582123\n",
      "w1 tensor([ 1.0721, -0.3579, -1.0564,  0.0013,  0.2566], requires_grad=True) loss 0.4652221202850342\n",
      "w1 tensor([ 1.0756, -0.3578, -1.0571,  0.0041,  0.2526], requires_grad=True) loss 0.46150779724121094\n",
      "w1 tensor([ 1.0792, -0.3577, -1.0578,  0.0069,  0.2486], requires_grad=True) loss 0.45782312750816345\n",
      "w1 tensor([ 1.0827, -0.3576, -1.0585,  0.0096,  0.2447], requires_grad=True) loss 0.454167902469635\n",
      "w1 tensor([ 1.0863, -0.3574, -1.0592,  0.0124,  0.2408], requires_grad=True) loss 0.45054179430007935\n",
      "w1 tensor([ 1.0898, -0.3573, -1.0599,  0.0152,  0.2368], requires_grad=True) loss 0.4469446539878845\n",
      "w1 tensor([ 1.0933, -0.3572, -1.0606,  0.0179,  0.2329], requires_grad=True) loss 0.44337624311447144\n",
      "w1 tensor([ 1.0968, -0.3571, -1.0613,  0.0206,  0.2290], requires_grad=True) loss 0.439836323261261\n",
      "w1 tensor([ 1.1002, -0.3570, -1.0619,  0.0233,  0.2252], requires_grad=True) loss 0.43632468581199646\n",
      "w1 tensor([ 1.1037, -0.3569, -1.0626,  0.0260,  0.2213], requires_grad=True) loss 0.43284112215042114\n",
      "w1 tensor([ 1.1071, -0.3568, -1.0633,  0.0287,  0.2175], requires_grad=True) loss 0.4293852746486664\n",
      "w1 tensor([ 1.1106, -0.3567, -1.0640,  0.0314,  0.2136], requires_grad=True) loss 0.4259570240974426\n",
      "w1 tensor([ 1.1140, -0.3566, -1.0646,  0.0341,  0.2098], requires_grad=True) loss 0.42255616188049316\n",
      "w1 tensor([ 1.1174, -0.3565, -1.0653,  0.0367,  0.2060], requires_grad=True) loss 0.4191824495792389\n",
      "w1 tensor([ 1.1208, -0.3563, -1.0660,  0.0394,  0.2023], requires_grad=True) loss 0.41583576798439026\n",
      "w1 tensor([ 1.1242, -0.3562, -1.0666,  0.0420,  0.1985], requires_grad=True) loss 0.41251564025878906\n",
      "w1 tensor([ 1.1275, -0.3561, -1.0673,  0.0447,  0.1947], requires_grad=True) loss 0.4092221260070801\n",
      "w1 tensor([ 1.1309, -0.3560, -1.0679,  0.0473,  0.1910], requires_grad=True) loss 0.40595489740371704\n",
      "w1 tensor([ 1.1342, -0.3559, -1.0686,  0.0499,  0.1873], requires_grad=True) loss 0.4027137756347656\n",
      "w1 tensor([ 1.1375, -0.3558, -1.0692,  0.0525,  0.1836], requires_grad=True) loss 0.39949852228164673\n",
      "w1 tensor([ 1.1409, -0.3557, -1.0699,  0.0551,  0.1799], requires_grad=True) loss 0.396308958530426\n",
      "w1 tensor([ 1.1442, -0.3556, -1.0705,  0.0576,  0.1762], requires_grad=True) loss 0.393144816160202\n",
      "w1 tensor([ 1.1474, -0.3555, -1.0712,  0.0602,  0.1726], requires_grad=True) loss 0.390005886554718\n",
      "w1 tensor([ 1.1507, -0.3554, -1.0718,  0.0628,  0.1689], requires_grad=True) loss 0.3868921399116516\n",
      "w1 tensor([ 1.1540, -0.3553, -1.0725,  0.0653,  0.1653], requires_grad=True) loss 0.38380318880081177\n",
      "w1 tensor([ 1.1572, -0.3552, -1.0731,  0.0678,  0.1617], requires_grad=True) loss 0.38073885440826416\n",
      "w1 tensor([ 1.1604, -0.3551, -1.0737,  0.0704,  0.1581], requires_grad=True) loss 0.3776990473270416\n",
      "w1 tensor([ 1.1637, -0.3550, -1.0744,  0.0729,  0.1545], requires_grad=True) loss 0.37468352913856506\n",
      "w1 tensor([ 1.1669, -0.3549, -1.0750,  0.0754,  0.1509], requires_grad=True) loss 0.371692031621933\n",
      "w1 tensor([ 1.1701, -0.3548, -1.0756,  0.0779,  0.1473], requires_grad=True) loss 0.36872443556785583\n",
      "w1 tensor([ 1.1732, -0.3547, -1.0762,  0.0804,  0.1438], requires_grad=True) loss 0.3657805323600769\n",
      "w1 tensor([ 1.1764, -0.3546, -1.0768,  0.0828,  0.1403], requires_grad=True) loss 0.3628601133823395\n",
      "w1 tensor([ 1.1796, -0.3545, -1.0775,  0.0853,  0.1367], requires_grad=True) loss 0.359963059425354\n",
      "w1 tensor([ 1.1827, -0.3544, -1.0781,  0.0878,  0.1332], requires_grad=True) loss 0.357089102268219\n",
      "w1 tensor([ 1.1858, -0.3543, -1.0787,  0.0902,  0.1297], requires_grad=True) loss 0.3542380928993225\n",
      "w1 tensor([ 1.1890, -0.3542, -1.0793,  0.0927,  0.1263], requires_grad=True) loss 0.3514098525047302\n",
      "w1 tensor([ 1.1921, -0.3541, -1.0799,  0.0951,  0.1228], requires_grad=True) loss 0.3486042618751526\n",
      "w1 tensor([ 1.1951, -0.3540, -1.0805,  0.0975,  0.1194], requires_grad=True) loss 0.34582096338272095\n",
      "w1 tensor([ 1.1982, -0.3539, -1.0811,  0.0999,  0.1159], requires_grad=True) loss 0.3430599272251129\n",
      "w1 tensor([ 1.2013, -0.3538, -1.0817,  0.1023,  0.1125], requires_grad=True) loss 0.3403209149837494\n",
      "w1 tensor([ 1.2044, -0.3537, -1.0823,  0.1047,  0.1091], requires_grad=True) loss 0.3376038074493408\n",
      "w1 tensor([ 1.2074, -0.3536, -1.0829,  0.1071,  0.1057], requires_grad=True) loss 0.3349083662033081\n",
      "w1 tensor([ 1.2104, -0.3535, -1.0835,  0.1094,  0.1023], requires_grad=True) loss 0.3322344422340393\n",
      "w1 tensor([ 1.2134, -0.3534, -1.0841,  0.1118,  0.0990], requires_grad=True) loss 0.3295819163322449\n",
      "w1 tensor([ 1.2165, -0.3533, -1.0847,  0.1141,  0.0956], requires_grad=True) loss 0.3269504904747009\n",
      "w1 tensor([ 1.2194, -0.3532, -1.0853,  0.1165,  0.0923], requires_grad=True) loss 0.3243401348590851\n",
      "w1 tensor([ 1.2224, -0.3531, -1.0859,  0.1188,  0.0889], requires_grad=True) loss 0.32175058126449585\n",
      "w1 tensor([ 1.2254, -0.3530, -1.0864,  0.1211,  0.0856], requires_grad=True) loss 0.31918174028396606\n",
      "w1 tensor([ 1.2284, -0.3529, -1.0870,  0.1234,  0.0823], requires_grad=True) loss 0.3166333734989166\n",
      "w1 tensor([ 1.2313, -0.3528, -1.0876,  0.1258,  0.0790], requires_grad=True) loss 0.31410539150238037\n",
      "w1 tensor([ 1.2342, -0.3527, -1.0882,  0.1280,  0.0758], requires_grad=True) loss 0.3115975260734558\n",
      "w1 tensor([ 1.2372, -0.3526, -1.0887,  0.1303,  0.0725], requires_grad=True) loss 0.30910977721214294\n",
      "w1 tensor([ 1.2401, -0.3525, -1.0893,  0.1326,  0.0693], requires_grad=True) loss 0.3066418468952179\n",
      "w1 tensor([ 1.2430, -0.3524, -1.0899,  0.1349,  0.0660], requires_grad=True) loss 0.3041936159133911\n",
      "w1 tensor([ 1.2459, -0.3523, -1.0904,  0.1371,  0.0628], requires_grad=True) loss 0.30176493525505066\n",
      "w1 tensor([ 1.2487, -0.3522, -1.0910,  0.1394,  0.0596], requires_grad=True) loss 0.2993556559085846\n",
      "w1 tensor([ 1.2516, -0.3522, -1.0916,  0.1416,  0.0564], requires_grad=True) loss 0.2969655394554138\n",
      "w1 tensor([ 1.2545, -0.3521, -1.0921,  0.1438,  0.0532], requires_grad=True) loss 0.2945946156978607\n",
      "w1 tensor([ 1.2573, -0.3520, -1.0927,  0.1461,  0.0501], requires_grad=True) loss 0.29224255681037903\n",
      "w1 tensor([ 1.2601, -0.3519, -1.0932,  0.1483,  0.0469], requires_grad=True) loss 0.2899092733860016\n",
      "w1 tensor([ 1.2630, -0.3518, -1.0938,  0.1505,  0.0438], requires_grad=True) loss 0.28759461641311646\n",
      "w1 tensor([ 1.2658, -0.3517, -1.0943,  0.1527,  0.0406], requires_grad=True) loss 0.28529849648475647\n",
      "w1 tensor([ 1.2686, -0.3516, -1.0949,  0.1549,  0.0375], requires_grad=True) loss 0.28302067518234253\n",
      "w1 tensor([ 1.2714, -0.3515, -1.0954,  0.1570,  0.0344], requires_grad=True) loss 0.28076106309890747\n",
      "w1 tensor([ 1.2741, -0.3514, -1.0960,  0.1592,  0.0313], requires_grad=True) loss 0.2785194516181946\n",
      "w1 tensor([ 1.2769, -0.3513, -1.0965,  0.1614,  0.0282], requires_grad=True) loss 0.2762957215309143\n",
      "w1 tensor([ 1.2796, -0.3513, -1.0971,  0.1635,  0.0252], requires_grad=True) loss 0.27408984303474426\n",
      "w1 tensor([ 1.2824, -0.3512, -1.0976,  0.1657,  0.0221], requires_grad=True) loss 0.2719014585018158\n",
      "w1 tensor([ 1.2851, -0.3511, -1.0981,  0.1678,  0.0191], requires_grad=True) loss 0.2697305977344513\n",
      "w1 tensor([ 1.2878, -0.3510, -1.0987,  0.1699,  0.0160], requires_grad=True) loss 0.26757708191871643\n",
      "w1 tensor([ 1.2905, -0.3509, -1.0992,  0.1720,  0.0130], requires_grad=True) loss 0.2654407322406769\n",
      "w1 tensor([ 1.2932, -0.3508, -1.0997,  0.1742,  0.0100], requires_grad=True) loss 0.2633214592933655\n",
      "w1 tensor([ 1.2959, -0.3507, -1.1002,  0.1763,  0.0070], requires_grad=True) loss 0.2612190842628479\n",
      "w1 tensor([ 1.2986, -0.3507, -1.1008,  0.1783,  0.0040], requires_grad=True) loss 0.2591334879398346\n",
      "w1 tensor([ 1.3013e+00, -3.5057e-01, -1.1013e+00,  1.8043e-01,  1.0439e-03],\n",
      "       requires_grad=True) loss 0.2570645809173584\n",
      "w1 tensor([ 1.3039, -0.3505, -1.1018,  0.1825, -0.0019], requires_grad=True) loss 0.255012184381485\n",
      "w1 tensor([ 1.3066, -0.3504, -1.1023,  0.1846, -0.0049], requires_grad=True) loss 0.2529761791229248\n",
      "w1 tensor([ 1.3092, -0.3503, -1.1028,  0.1866, -0.0078], requires_grad=True) loss 0.2509564161300659\n",
      "w1 tensor([ 1.3118, -0.3502, -1.1034,  0.1887, -0.0107], requires_grad=True) loss 0.24895277619361877\n",
      "w1 tensor([ 1.3145, -0.3501, -1.1039,  0.1907, -0.0136], requires_grad=True) loss 0.24696512520313263\n",
      "w1 tensor([ 1.3171, -0.3501, -1.1044,  0.1928, -0.0165], requires_grad=True) loss 0.24499337375164032\n",
      "w1 tensor([ 1.3196, -0.3500, -1.1049,  0.1948, -0.0194], requires_grad=True) loss 0.24303731322288513\n",
      "w1 tensor([ 1.3222, -0.3499, -1.1054,  0.1968, -0.0223], requires_grad=True) loss 0.24109692871570587\n",
      "w1 tensor([ 1.3248, -0.3498, -1.1059,  0.1988, -0.0252], requires_grad=True) loss 0.23917202651500702\n",
      "w1 tensor([ 1.3274, -0.3497, -1.1064,  0.2008, -0.0280], requires_grad=True) loss 0.23726248741149902\n",
      "w1 tensor([ 1.3299, -0.3496, -1.1069,  0.2028, -0.0309], requires_grad=True) loss 0.23536813259124756\n",
      "w1 tensor([ 1.3325, -0.3496, -1.1074,  0.2048, -0.0337], requires_grad=True) loss 0.23348896205425262\n",
      "w1 tensor([ 1.3350, -0.3495, -1.1079,  0.2068, -0.0365], requires_grad=True) loss 0.2316247969865799\n",
      "w1 tensor([ 1.3375, -0.3494, -1.1084,  0.2087, -0.0393], requires_grad=True) loss 0.22977547347545624\n",
      "w1 tensor([ 1.3400, -0.3493, -1.1089,  0.2107, -0.0421], requires_grad=True) loss 0.22794094681739807\n",
      "w1 tensor([ 1.3425, -0.3492, -1.1094,  0.2127, -0.0449], requires_grad=True) loss 0.22612106800079346\n",
      "w1 tensor([ 1.3450, -0.3492, -1.1098,  0.2146, -0.0477], requires_grad=True) loss 0.22431573271751404\n",
      "w1 tensor([ 1.3475, -0.3491, -1.1103,  0.2165, -0.0505], requires_grad=True) loss 0.22252479195594788\n",
      "w1 tensor([ 1.3500, -0.3490, -1.1108,  0.2185, -0.0532], requires_grad=True) loss 0.2207481861114502\n",
      "w1 tensor([ 1.3524, -0.3489, -1.1113,  0.2204, -0.0560], requires_grad=True) loss 0.21898572146892548\n",
      "w1 tensor([ 1.3549, -0.3489, -1.1118,  0.2223, -0.0587], requires_grad=True) loss 0.21723735332489014\n",
      "w1 tensor([ 1.3573, -0.3488, -1.1123,  0.2242, -0.0614], requires_grad=True) loss 0.21550293266773224\n",
      "w1 tensor([ 1.3597, -0.3487, -1.1127,  0.2261, -0.0641], requires_grad=True) loss 0.21378234028816223\n",
      "w1 tensor([ 1.3622, -0.3486, -1.1132,  0.2280, -0.0668], requires_grad=True) loss 0.21207550168037415\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "\n",
    "\n",
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "lr = 0.001\n",
    "num_iterations = 300\n",
    "\n",
    "op = optim.SGD([w1],lr=0.01)\n",
    "\n",
    "for x in range(num_iterations):\n",
    "    loss_calc = loss(w1,target)\n",
    "    op.zero_grad()\n",
    "    loss_calc.backward()\n",
    "\n",
    "    op.step()\n",
    "    print('w1',w1,'loss',loss_calc.item())\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 148,
   "id": "301307b3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 1.4934, -0.2904,  0.2928,  0.5571, -0.5768], grad_fn=<SubBackward0>)"
      ]
     },
     "execution_count": 148,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1-lr*w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 161,
   "id": "90f7fa69",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "更新后的 w1: tensor([ 0.0620,  0.5107, -0.0070,  0.5278,  1.6134], requires_grad=True)\n",
      "更新后的 w1: tensor([ 0.0645,  0.4979, -0.0153,  0.5176,  1.6094], requires_grad=True)\n",
      "更新后的 w1: tensor([ 0.0670,  0.4851, -0.0235,  0.5074,  1.6053], requires_grad=True)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "# 定义均方误差损失函数\n",
    "loss = nn.MSELoss()\n",
    "\n",
    "# 初始化参数 w1 并开启梯度计算\n",
    "w1 = torch.randn(size=(5,), requires_grad=True)\n",
    "# 目标值\n",
    "target = torch.randn(size=(5,))\n",
    "# 定义优化器\n",
    "optimizer = optim.SGD([w1], lr=0.01)\n",
    "\n",
    "# 迭代次数\n",
    "num_iterations = 3\n",
    "\n",
    "for _ in range(num_iterations):\n",
    "    # 前向传播计算损失\n",
    "    loss_calc = loss(w1, target)\n",
    "    # 梯度清零\n",
    "    optimizer.zero_grad()\n",
    "    # 反向传播计算梯度\n",
    "    loss_calc.backward()\n",
    "    # 更新参数\n",
    "    optimizer.step()\n",
    "\n",
    "    print(f\"更新后的 w1: {w1}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch_py38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.7rc1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
