{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# torch - autograd\n",
    "#\n",
    "''' \n",
    "PyTorch中,所有神经网络的核心是autograd包\n",
    "\n",
    "torch.Tensor是这个包的核心类。如果设置它的属性 .requires_grad为True, 那么它将会追踪对于该张量的所有操作。\n",
    "当完成计算后可以通过调用.backward()，来自动计算所有的梯度。这个张量的所有梯度将会自动累加到.grad属性. 要阻\n",
    "止一个张量被跟踪历史，可以调用.detach()方法将其与计算历史分离，并阻止它未来的计算记录被跟踪。\n",
    "为了防止跟踪历史记录, 可以将代码块包装在 with torch.no_grad(): 中。在评估模型时特别有用, 因为模型可能具有\n",
    "requires_grad = True 的可训练的参数，但是我们不需要在此过程中对他们进行梯度计算。 \n",
    "还有一个类对于autograd的实现非常重要: Function。 \n",
    "Tensor和Function互相连接生成了一个非循环图, 它编码了完整的计算历史。\n",
    "每个张量都有一个.grad_fn属性, 它引用了一个创建了这个Tensor的Function (除非这个张量是用户手动创建的,即这个张量的grad_fn是None)。\n",
    "如果需要计算导数, 可以在Tensor上调用.backward()。如果Tensor是一个标量 (即它包含一个元素的数据), 则不需要为backward()\n",
    "指定任何参数, 但是如果它有更多的元素, 则需要指定一个gradient参数, 它是形状匹配的张量。\n",
    "'''\n",
    "import torch\n",
    "\n",
    "# 创建一个张量并设置requires_grad = True用来追踪其计算历史\n",
    "# x = 1\n",
    "x = torch.ones(2, 2, requires_grad=True)\n",
    "print(x)\n",
    "\n",
    "# 对这个张量做一次运算\n",
    "# y是计算的结果，所以它有grad_fn属性。\n",
    "y = x + 2\n",
    "print(y)\n",
    "print(y.grad_fn)\n",
    "\n",
    "# 对y进行更多操作:  z = 3*(x+2)^2\n",
    "z = y * y * 3\n",
    "\n",
    "# out = sum(zi) / 4\n",
    "out = z.mean()\n",
    "print(z,'\\n', out)\n",
    "print()\n",
    "\n",
    "# .requires_grad_(...) 原地改变了现有张量的 requires_grad 标志。如果没有指定的话，默认输入的这个标志是False。\n",
    "print('.requires_grad_(...) 原地改变了现有张量的 requires_grad 标志。如果没有指定的话, 默认输入的这个标志是False')\n",
    "a = torch.randn(2, 2)\n",
    "a = ((a * 3) / (a - 1))\n",
    "print(a.requires_grad)\n",
    "a.requires_grad_(True)\n",
    "print(a.requires_grad)\n",
    "b = (a * a).sum()\n",
    "print(b.grad_fn)\n",
    "print()\n",
    "\n",
    "# 因为out是一个标量。所以让我们直接进行反向传播，\n",
    "# out.backward()和out.backward(torch.tensor(1.))等价\n",
    "out.backward()\n",
    "\n",
    "# 输出导数d(out)/dx = (3/2)*(x+2), x=1, 则 9/2 = 4.5\n",
    "print(x.grad)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# torch - 梯度\n",
    "# \n",
    "import torch\n",
    "\n",
    "x = torch.randn(3, requires_grad=True)\n",
    "y = x * 2\n",
    "# print(y)\n",
    "\n",
    "while y.data.norm() < 1000:\n",
    "    y = y * 2\n",
    "\n",
    "print(y)\n",
    "\n",
    "v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)\n",
    "y.backward(v)\n",
    "print(x.grad)\n",
    "\n",
    "# 为了防止跟踪历史记录（和使用内存），可以将代码块包装在with torch.no_grad(): 中。\n",
    "# 在评估模型时特别有用，因为模型可能具有requires_grad = True的可训练的参数，但是我们不需要在此过程中对他们进行梯度计算。 \n",
    "# 也可以通过将代码块包装在 with torch.no_grad(): 中，来阻止autograd跟踪设置了 .requires_grad = True 的张量的历史记录。\n",
    "print(x.requires_grad)\n",
    "print((x ** 2).requires_grad)\n",
    "with torch.no_grad():\n",
    "    print((x ** 2).requires_grad)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.7"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "60d0b561bcf179bd0cd76361af4313e00281c3c5daca1903769e6cc3dd82b37c"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
