{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9074a269",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e2df972",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "loss_calc = loss(w1,target)\n",
    "loss_calc.backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 129,
   "id": "c5dcf840",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.3883,  2.6746,  0.1608, -0.3933,  0.5442], requires_grad=True)"
      ]
     },
     "execution_count": 129,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 146,
   "id": "1808796e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.2709, 0.1248, 0.2790, 0.1998, 0.0765])"
      ]
     },
     "execution_count": 146,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "5a820b7d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.4109,  0.5509,  0.3199,  0.2338,  0.3887], grad_fn=<DivBackward0>)"
      ]
     },
     "execution_count": 131,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 手动计算提取\n",
    "2*(w1-target)/5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b9fc849",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss.zero_grad()\n",
    "loss_calc = loss(w1,target)\n",
    "loss_calc.backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 133,
   "id": "9d730465",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.8218,  1.1018,  0.6398,  0.4676,  0.7775])"
      ]
     },
     "execution_count": 133,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "id": "72cbad92",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.0278"
      ]
     },
     "execution_count": 123,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.0139*2"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "38ade99f",
   "metadata": {},
   "source": [
    "# 手动提取下降"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "019bd4e5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss 5.809220790863037\n",
      "loss 5.813869476318359\n",
      "loss 5.8185224533081055\n"
     ]
    }
   ],
   "source": [
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "lr = 0.001\n",
    "num_iterations = 3\n",
    "for x in range(num_iterations):\n",
    "    loss_calc = loss(w1,target)\n",
    "    loss_calc.backward()\n",
    "    # print(w1)\n",
    "    # with torch.no_grad():\n",
    "    \n",
    "    print('loss',loss_calc.item())\n",
    "    # print('w1',w1)\n",
    "    '''\n",
    "        在 PyTorch 里，叶子张量是直接创建的张量，例如通过 torch.randn 创建的 w1。在进行参数更新时，如果你使用 w1 -= - lr*w1.grad 这种原位操作（in-place operation），\n",
    "        它实际上会创建一个新的计算图节点，导致 w1 变成非叶子张量。之后再次调用 backward() 时，由于 w1 已经不是叶子张量，它的 .grad 属性默认不会被填充，进而触发警告。\n",
    "    \n",
    "    '''\n",
    "    with torch.no_grad(): # \n",
    "        w1  -=  - lr*w1.grad\n",
    "    if w1.grad is not None:\n",
    "        w1.grad.zero_()\n",
    "        \n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e5864d5e",
   "metadata": {},
   "source": [
    "# optimizer 求梯度下降"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "636973fa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "w1 tensor([ 0.7578, -0.9004,  1.0370,  0.0199, -0.0182], requires_grad=True) loss 2.2796645164489746\n",
      "w1 tensor([ 0.7547, -0.9024,  1.0251,  0.0187, -0.0231], requires_grad=True) loss 2.2614638805389404\n",
      "w1 tensor([ 0.7516, -0.9044,  1.0132,  0.0175, -0.0280], requires_grad=True) loss 2.243408441543579\n",
      "w1 tensor([ 0.7486, -0.9064,  1.0014,  0.0163, -0.0328], requires_grad=True) loss 2.225496768951416\n",
      "w1 tensor([ 0.7456, -0.9084,  0.9896,  0.0151, -0.0376], requires_grad=True) loss 2.207728862762451\n",
      "w1 tensor([ 0.7425, -0.9104,  0.9779,  0.0139, -0.0424], requires_grad=True) loss 2.1901021003723145\n",
      "w1 tensor([ 0.7395, -0.9124,  0.9662,  0.0127, -0.0472], requires_grad=True) loss 2.172616481781006\n",
      "w1 tensor([ 0.7365, -0.9143,  0.9545,  0.0115, -0.0520], requires_grad=True) loss 2.1552700996398926\n",
      "w1 tensor([ 0.7335, -0.9163,  0.9429,  0.0104, -0.0567], requires_grad=True) loss 2.1380622386932373\n",
      "w1 tensor([ 0.7305, -0.9182,  0.9314,  0.0092, -0.0615], requires_grad=True) loss 2.1209921836853027\n",
      "w1 tensor([ 0.7276, -0.9201,  0.9199,  0.0080, -0.0662], requires_grad=True) loss 2.1040585041046143\n",
      "w1 tensor([ 0.7246, -0.9221,  0.9084,  0.0069, -0.0709], requires_grad=True) loss 2.087259292602539\n",
      "w1 tensor([ 0.7217, -0.9240,  0.8970,  0.0057, -0.0755], requires_grad=True) loss 2.070594549179077\n",
      "w1 tensor([ 0.7187, -0.9259,  0.8857,  0.0045, -0.0802], requires_grad=True) loss 2.054063081741333\n",
      "w1 tensor([ 0.7158, -0.9278,  0.8744,  0.0034, -0.0848], requires_grad=True) loss 2.037663459777832\n",
      "w1 tensor([ 0.7129, -0.9297,  0.8631,  0.0023, -0.0894], requires_grad=True) loss 2.021394729614258\n",
      "w1 tensor([ 0.7100, -0.9316,  0.8519,  0.0011, -0.0940], requires_grad=True) loss 2.005255937576294\n",
      "w1 tensor([ 7.0713e-01, -9.3348e-01,  8.4068e-01, -2.0378e-05, -9.8628e-02],\n",
      "       requires_grad=True) loss 1.989246129989624\n",
      "w1 tensor([ 0.7043, -0.9353,  0.8295, -0.0012, -0.1032], requires_grad=True) loss 1.9733638763427734\n",
      "w1 tensor([ 0.7014, -0.9372,  0.8185, -0.0023, -0.1077], requires_grad=True) loss 1.9576085805892944\n",
      "w1 tensor([ 0.6985, -0.9391,  0.8074, -0.0034, -0.1123], requires_grad=True) loss 1.9419790506362915\n",
      "w1 tensor([ 0.6957, -0.9409,  0.7964, -0.0045, -0.1168], requires_grad=True) loss 1.9264743328094482\n",
      "w1 tensor([ 0.6929, -0.9428,  0.7854, -0.0056, -0.1213], requires_grad=True) loss 1.9110933542251587\n",
      "w1 tensor([ 0.6901, -0.9446,  0.7745, -0.0067, -0.1257], requires_grad=True) loss 1.8958351612091064\n",
      "w1 tensor([ 0.6872, -0.9464,  0.7637, -0.0078, -0.1302], requires_grad=True) loss 1.880698561668396\n",
      "w1 tensor([ 0.6845, -0.9483,  0.7528, -0.0089, -0.1346], requires_grad=True) loss 1.8656831979751587\n",
      "w1 tensor([ 0.6817, -0.9501,  0.7420, -0.0100, -0.1390], requires_grad=True) loss 1.8507875204086304\n",
      "w1 tensor([ 0.6789, -0.9519,  0.7313, -0.0111, -0.1434], requires_grad=True) loss 1.8360109329223633\n",
      "w1 tensor([ 0.6761, -0.9537,  0.7206, -0.0122, -0.1478], requires_grad=True) loss 1.821352243423462\n",
      "w1 tensor([ 0.6734, -0.9555,  0.7100, -0.0133, -0.1522], requires_grad=True) loss 1.8068106174468994\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "手动求提取降低\n",
    "'''\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "loss = nn.MSELoss()\n",
    "w1 = torch.randn(size=(5,),requires_grad=True)\n",
    "target = torch.randn(size=(5,))\n",
    "lr = 0.001\n",
    "num_iterations = 30\n",
    "\n",
    "op = optim.SGD([w1],lr=0.01)\n",
    "\n",
    "for x in range(num_iterations):\n",
    "    loss_calc = loss(w1,target)\n",
    "    op.zero_grad()\n",
    "    loss_calc.backward()\n",
    "\n",
    "    op.step()\n",
    "    print('w1',w1,'loss',loss_calc.item())\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 148,
   "id": "301307b3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 1.4934, -0.2904,  0.2928,  0.5571, -0.5768], grad_fn=<SubBackward0>)"
      ]
     },
     "execution_count": 148,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w1-lr*w1.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 161,
   "id": "90f7fa69",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "更新后的 w1: tensor([ 0.0620,  0.5107, -0.0070,  0.5278,  1.6134], requires_grad=True)\n",
      "更新后的 w1: tensor([ 0.0645,  0.4979, -0.0153,  0.5176,  1.6094], requires_grad=True)\n",
      "更新后的 w1: tensor([ 0.0670,  0.4851, -0.0235,  0.5074,  1.6053], requires_grad=True)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "# 定义均方误差损失函数\n",
    "loss = nn.MSELoss()\n",
    "\n",
    "# 初始化参数 w1 并开启梯度计算\n",
    "w1 = torch.randn(size=(5,), requires_grad=True)\n",
    "# 目标值\n",
    "target = torch.randn(size=(5,))\n",
    "# 定义优化器\n",
    "optimizer = optim.SGD([w1], lr=0.01)\n",
    "\n",
    "# 迭代次数\n",
    "num_iterations = 3\n",
    "\n",
    "for _ in range(num_iterations):\n",
    "    # 前向传播计算损失\n",
    "    loss_calc = loss(w1, target)\n",
    "    # 梯度清零\n",
    "    optimizer.zero_grad()\n",
    "    # 反向传播计算梯度\n",
    "    loss_calc.backward()\n",
    "    # 更新参数\n",
    "    optimizer.step()\n",
    "\n",
    "    print(f\"更新后的 w1: {w1}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ecbc4bc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "331592bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TextSentiment(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim, num_class, batch_size, cutlen):\n",
    "        \"\"\"\n",
    "        :param vocab_size: 整个语料包含的不同词汇总数\n",
    "        :param embed_dim: 指定词嵌入的维度\n",
    "        :param num_class: 文本分类的类别总数\n",
    "        \"\"\"\n",
    "        # super(TextSentiment, self).__init__()\n",
    "        super().__init__()\n",
    "        self.vocab_size = vocab_size # 字数 \n",
    "        self.embed_dim = embed_dim  # 128维度\n",
    "        self.num_class = num_class  # 分类 4中\n",
    "        self.batch_size = batch_size  # 64 大小\n",
    "        self.cutlen = cutlen    # 每行分词 字数 64个\n",
    "\n",
    "        # sparse=True 代表每次对该层更新时只更新部分权重\n",
    "        self.embedding = nn.Embedding(vocab_size, embed_dim, sparse=True) # 字数  128 \n",
    "        self.fc = nn.Linear(embed_dim, num_class)\n",
    "\n",
    "        self.init_weights()\n",
    "\n",
    "    def init_weights(self, ):\n",
    "        # 指定初始化权重的取值范围 - 一般小于1\n",
    "        initrange = 0.5\n",
    "        # Tips: 初始化为全 0 的网络十分难以训练\n",
    "        # uniform - 均匀分布\n",
    "        self.embedding.weight.data.uniform_(-initrange, initrange)\n",
    "        # 偏置根据其含义初始化为 0 则没有太大影响\n",
    "        self.fc.bias.data.zero_()\n",
    "\n",
    "    def forward(self, text):\n",
    "        \"\"\"\n",
    "        :param text: 文本数值映射后的结构 （4096，） 一维数据，\n",
    "        :return: 与类别数尺寸相同的张量\n",
    "        \"\"\"\n",
    "        # input: (batch * m) - m 表示句子长度 - 将 batch 横向拼接了, 多个语句拼接处理为了一个语句\n",
    "        # label: (batch * 1)\n",
    "        # example m=4 : input = [ 1 2 3 4 1 2 3 4 1 2 3 4 ...], [ 1 2 1 ...]\n",
    "        embedded = self.embedding(text)  # ((batch * m), embed_dim)\n",
    "        embedded = embedded.transpose(1, 0).unsqueeze(0)  # (1, embed_dim, (batch * m))\n",
    "\n",
    "        # avg pool - 作用在行上，需求三维\n",
    "        embedded = F.avg_pool1d(embedded, kernel_size=self.cutlen)  # 将同一个语句中，不同词的嵌入取平均\n",
    "\n",
    "        return F.softmax(self.fc(embedded[0].transpose(1, 0)), dim=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "49b1513c",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = TextSentiment(1000,128,4,64,64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f2ad130f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'torch.nn.parameter.Parameter'>\n",
      "True\n",
      "<class 'torch.nn.parameter.Parameter'>\n",
      "True\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
      "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
      "\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "for _ in model.parameters():\n",
    "    print(type(_))\n",
    "    print(_.requires_grad)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4515c3cd",
   "metadata": {},
   "source": [
    "# 模型手动 梯度下降"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "27576474",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5, Loss: 1.306616187095642\n",
      "Epoch 2/5, Loss: 1.263529896736145\n",
      "Epoch 3/5, Loss: 1.2229125499725342\n",
      "Epoch 4/5, Loss: 1.184615135192871\n",
      "Epoch 5/5, Loss: 1.1484981775283813\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "# 定义一个简单的线性模型\n",
    "class SimpleModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(SimpleModel, self).__init__()\n",
    "        self.linear = nn.Linear(10, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.linear(x)\n",
    "\n",
    "# 初始化模型\n",
    "model = SimpleModel()\n",
    "\n",
    "# 生成一些示例数据\n",
    "input_data = torch.randn(32, 10)\n",
    "target = torch.randn(32, 1)\n",
    "\n",
    "# 定义损失函数\n",
    "loss_function = nn.MSELoss()\n",
    "\n",
    "# 学习率\n",
    "learning_rate = 0.01\n",
    "\n",
    "# 迭代次数\n",
    "num_epochs = 5\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    # 前向传播\n",
    "    output = model(input_data)\n",
    "    # 计算损失\n",
    "    loss = loss_function(output, target)\n",
    "\n",
    "    # 反向传播\n",
    "    loss.backward()\n",
    "\n",
    "    # for param in model.parameters():\n",
    "    #     print(param.grad)\n",
    "\n",
    "\n",
    "    # 手动更新参数\n",
    "    with torch.no_grad():\n",
    "        for param in model.parameters():\n",
    "            param -= learning_rate * param.grad\n",
    "\n",
    "    # 梯度清零\n",
    "    for param in model.parameters():\n",
    "        param.grad.zero_()\n",
    "\n",
    "    print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item()}')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch_py38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.7rc1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
