{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "eecf0b9f",
   "metadata": {},
   "source": [
    "# 激活函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d496c510",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "63120b81",
   "metadata": {},
   "source": [
    "#### sigmoid 实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "9bfcbb58",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-100.0000,  -77.7778,  -55.5556,  -33.3333,  -11.1111,   11.1111,\n",
      "          33.3333,   55.5556,   77.7778,  100.0000])\n",
      "tensor([0.0000e+00, 1.6655e-34, 7.4564e-25, 3.3382e-15, 1.4945e-05, 9.9999e-01,\n",
      "        1.0000e+00, 1.0000e+00, 1.0000e+00, 1.0000e+00])\n"
     ]
    }
   ],
   "source": [
    "# 得到 -100到100的向量，分成10等分\n",
    "a = torch.linspace(-100, 100, 10)\n",
    "print(a)\n",
    "\n",
    "print(torch.sigmoid(a))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "4f40061f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([0.0000e+00, 1.6655e-34, 7.4564e-25, 3.3382e-15, 1.4945e-05, 9.9999e-01,\n",
      "        1.0000e+00, 1.0000e+00, 1.0000e+00, 1.0000e+00])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\python\\Anaconda3\\lib\\site-packages\\torch\\nn\\functional.py:1960: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n",
      "  warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n"
     ]
    }
   ],
   "source": [
    "# 或者\n",
    "from torch.nn import functional as F\n",
    "print(F.sigmoid(a))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5e5d9dd9",
   "metadata": {},
   "source": [
    "### tanh 实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c2f4b227",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-0.7616, -0.6514, -0.5047, -0.3215, -0.1107,  0.1107,  0.3215,  0.5047,\n",
      "         0.6514,  0.7616])\n"
     ]
    }
   ],
   "source": [
    "a = torch.linspace(-1, 1, 10)\n",
    "\n",
    "print(torch.tanh(a))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6da78d85",
   "metadata": {},
   "source": [
    "### ReLU 实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "eb0a673c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1111, 0.3333, 0.5556, 0.7778,\n",
      "        1.0000])\n",
      "tensor([0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1111, 0.3333, 0.5556, 0.7778,\n",
      "        1.0000])\n"
     ]
    }
   ],
   "source": [
    "from torch.nn import functional as F\n",
    "\n",
    "a = torch.linspace(-1, 1, 10)\n",
    "print(torch.relu(a))\n",
    "\n",
    "print(F.relu(a))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "36ca1244",
   "metadata": {},
   "source": [
    "# Loss\n",
    "\n",
    "- Mean Squared Error (MSE) 均方差\n",
    "\n",
    "$\n",
    "\\Sigma{(y - \\overline{y})^2}\n",
    "$\n",
    "\n",
    "- Cross Entropy Loss 用于分类的误差\n",
    "  - binary 可用于二分类\n",
    "  \n",
    "  - multi-class 可用于多分类\n",
    "  \n",
    "  - +softmax 可以和softmax激活函数配合使用\n",
    "  \n",
    "  - Leave it to Logistic Regression Part"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af37bfed",
   "metadata": {},
   "source": [
    "### Pytorch 自动求导\n",
    "\n",
    "- autograd.grad 求导"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "63d9298d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1., grad_fn=<MseLossBackward0>)\n",
      "tensor([2.], requires_grad=True)\n",
      "(tensor([2.]),)\n",
      "(tensor([2.]),)\n"
     ]
    }
   ],
   "source": [
    "# pred = xw + b\n",
    "#        1 * 2 + 0\n",
    "x = torch.ones(1)\n",
    "# requires_grad=True 告诉pytorch需要grad信息\n",
    "w = torch.full([1], 2., requires_grad=True)\n",
    "\n",
    "# predict -> torch.ones(1)\n",
    "# LABEL -> x*w\n",
    "mse = F.mse_loss(torch.ones(1), x * w)\n",
    "print(mse)\n",
    "\n",
    "# 在此，w参数是不需要求导的，是因为在初始化时，没有设置需要导数信息，\n",
    "# 那么就必须对w信息更新，可以使用w.requires_grad_()，末尾的_表示此函数的结果为inplace操作\n",
    "# print(torch.autograd.grad(mse, [w]))\n",
    "\n",
    "print(w.requires_grad_())\n",
    "\n",
    "# 第一个参数mse 为预测值，第二个参数为各项权重值\n",
    "print(torch.autograd.grad(mse, [w]))\n",
    "\n",
    "# 求导后必须用mse_loss计算图过程更新一下，重新得到mse\n",
    "mse = F.mse_loss(torch.ones(1), x*w)\n",
    "\n",
    "print(torch.autograd.grad(mse, [w]))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bc12abf2",
   "metadata": {},
   "source": [
    "- loss.backward 求导"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "31d80ed4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1., grad_fn=<MseLossBackward0>)\n",
      "tensor([2.], requires_grad=True)\n",
      "(tensor([2.]),)\n",
      "tensor([2.])\n"
     ]
    }
   ],
   "source": [
    "x = torch.ones(1)\n",
    "w = torch.full([1], 2., requires_grad=True)\n",
    "mse = F.mse_loss(torch.ones(1), x*w)\n",
    "print(mse)\n",
    "\n",
    "print(w.requires_grad_())\n",
    "\n",
    "print(torch.autograd.grad(mse, [w]))\n",
    "\n",
    "mse = F.mse_loss(torch.ones(1), x*w)\n",
    "# 反向传播，结果赋在每个需要梯度的tensor上面\n",
    "mse.backward()\n",
    "\n",
    "print(w.grad)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a72cc09b",
   "metadata": {},
   "source": [
    "### softmax激活函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "c7c487ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([0.4426, 0.2070, 0.2458], requires_grad=True)\n",
      "(tensor([-0.1159,  0.2110, -0.0952]),)\n",
      "(tensor([-0.1204, -0.0952,  0.2156]),)\n"
     ]
    }
   ],
   "source": [
    "a = torch.rand(3)\n",
    "# 标注为需要 grad信息\n",
    "print(a.requires_grad_())\n",
    "\n",
    "# dim=0 指定softmax的维度\n",
    "# p = F.softmax(a, dim=0)\n",
    "\n",
    "# print(p.backward())\n",
    "# RuntimeError: grad can be implicitly created only for scalar outputs\n",
    "# RuntimeError: Trying to backward through the graph a second time, but the buffers have already been freed.\n",
    "# Specify retain_graph=True when calling backward the first time.\n",
    "\n",
    "p = F.softmax(a, dim=0)\n",
    "\n",
    "# retain_graph=True 保存图信息\n",
    "# 当计算相同索引值时 p[1]结果为正，其他为负\n",
    "print(torch.autograd.grad(p[1], [a], retain_graph=True))\n",
    "\n",
    "print(torch.autograd.grad(p[2], [a]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc9dbaa0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
