{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 求导",
   "id": "d8e494a1e6af0c54"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "近似求导",
   "id": "761eb803c475a05d"
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-03-07T13:58:15.843847Z",
     "start_time": "2025-03-07T13:58:15.840045Z"
    }
   },
   "source": [
    "def f(x):\n",
    "    return 3. * x ** 2 + 2. * x + 1.\n",
    "\n",
    "\n",
    "# 近似求导  eps为步长\n",
    "def approx_derivative(f, x, eps=1e-6):\n",
    "    return (f(x + eps) - f(x - eps)) / (2. * eps)\n",
    "\n",
    "\n",
    "print(approx_derivative(f, 2.))"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "14.000000001956892\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "偏导数",
   "id": "fb2830cbd2343d81"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:05:17.404852Z",
     "start_time": "2025-03-07T14:05:17.402001Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def g(x1, x2):\n",
    "    return (x1 + 5) * (x2 ** 2)  # 定义函数x1+5*x2^2的乘积\n",
    "\n",
    "\n",
    "def approx_partial_derivative(g, x1, x2, eps=1e-6):\n",
    "    # 对x1求偏导\n",
    "    dg_x1 = approx_derivative(lambda x: g(x, x2), x1, eps)  # x2^2\n",
    "    # 对x2求偏导\n",
    "    dg_x2 = approx_derivative(lambda x: g(x1, x), x2, eps)  # x1+5 * 2x2\n",
    "    return dg_x1, dg_x2 # \n",
    "\n",
    "\n",
    "print(approx_partial_derivative(g, 2., 3.))"
   ],
   "id": "ca07b03871b87692",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(8.999999998593466, 42.00000000409432)\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## torch 近似求导",
   "id": "a1018a0789441fb3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:13:12.330666Z",
     "start_time": "2025-03-07T14:13:12.326578Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "x1 = torch.tensor([2.], requires_grad=True)\n",
    "x2 = torch.tensor([3.], requires_grad=True)\n",
    "def g(x1, x2):\n",
    "    return (x1 + 5) * (x2 ** 2)  # 定义函数x1+5*x2^2的乘积\n",
    "\n",
    "y = g(x1, x2)\n",
    "    \n",
    "(dy_dx1,) = torch.autograd.grad(y, x1,retain_graph=True)\n",
    "(dy_dx2,) = torch.autograd.grad(y, x2,retain_graph=True)\n",
    "print(dy_dx1)\n",
    "print(dy_dx2)"
   ],
   "id": "6103b1cd07a59ed1",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([9.])\n",
      "tensor([42.])\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:13:14.811278Z",
     "start_time": "2025-03-07T14:13:14.808376Z"
    }
   },
   "cell_type": "code",
   "source": [
    "try: #不加retain_graph=True第二次执行会报错，原因是因为计算图已经被释放了\n",
    "    (dy_dx2,) = torch.autograd.grad(y, x2,retain_graph=True)\n",
    "    print(dy_dx2)\n",
    "except Exception as e:\n",
    "    print(e)"
   ],
   "id": "7cfeadb6c9992038",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([42.])\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:14:04.860749Z",
     "start_time": "2025-03-07T14:14:04.857102Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 同时求导\n",
    "\n",
    "x1 = torch.tensor([2.], requires_grad=True)\n",
    "x2 = torch.tensor([3.], requires_grad=True)\n",
    "y = g(x1, x2)\n",
    "\n",
    "# 求偏导数 create_graph=True表示可以继续求导\n",
    "dy_dx1, dy_dx2 = torch.autograd.grad(y, [x1, x2], create_graph=True)\n",
    "\n",
    "\n",
    "print(dy_dx1, dy_dx2)"
   ],
   "id": "6644180b4436c16",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([9.], grad_fn=<MulBackward0>) tensor([42.], grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:14:41.358814Z",
     "start_time": "2025-03-07T14:14:41.353540Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 当然我们一般直接用 backward\n",
    "\n",
    "x1 = torch.tensor([2.], requires_grad=True)\n",
    "x2 = torch.tensor([3.], requires_grad=True)\n",
    "y = g(x1, x2)\n",
    "\n",
    "# 求偏导数,求梯度\n",
    "y.backward()\n",
    "print(x1.grad, x2.grad)"
   ],
   "id": "3c7f78c9e518ea32",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([9.]) tensor([42.])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([63.], grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:14:52.493490Z",
     "start_time": "2025-03-07T14:14:52.489546Z"
    }
   },
   "cell_type": "code",
   "source": [
    "x1 = torch.tensor([2.], requires_grad=True)\n",
    "x2 = torch.tensor([3.], requires_grad=True)\n",
    "y = g(x1, x2)\n",
    "\n",
    "# 求y对x1和x2的二阶偏导数\n",
    "#，allow_unused 参数的作用是控制当 inputs 中的某些张量不需要梯度时，函数的行为方式。\n",
    "dy_dx1, dy_dx2 = torch.autograd.grad(y, [x1, x2], create_graph=True)\n",
    "dy_dx1_dx1, dy_dx1_dx2 = torch.autograd.grad(dy_dx1, [x1, x2], allow_unused=True)\n",
    "dy_dx2_dx1, dy_dx2_dx2 = torch.autograd.grad(dy_dx2, [x1, x2], allow_unused=True)\n",
    "print(dy_dx1_dx1, dy_dx2_dx1, dy_dx2_dx2)"
   ],
   "id": "d7ba07d1f5f3f793",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None tensor([6.]) tensor([14.])\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-07T14:14:59.924982Z",
     "start_time": "2025-03-07T14:14:59.914318Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#模拟梯度下降算法 SGD\n",
    "import torch\n",
    "learning_rate = 0.3\n",
    "x = torch.tensor(2.0, requires_grad=True) # requires_grad=True表示需要求导\n",
    "for _ in range(100):\n",
    "    # 计算损失函数\n",
    "    z = f(x)\n",
    "    # 求偏导数,求梯度\n",
    "    z.backward()\n",
    "    # 手动更新参数\n",
    "    x.data.sub_(learning_rate * x.grad) # x -= learning_rate * x.grad，这里就等价于optimizer.step()\n",
    "    x.grad.zero_() # x.grad -= x.grad, x.grad = 0,梯度清零\n",
    "print(x)"
   ],
   "id": "e94af754fc9a47f0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(-0.3333, requires_grad=True)\n"
     ]
    }
   ],
   "execution_count": 22
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
