{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b0a3c030",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "08e7f833",
   "metadata": {},
   "outputs": [],
   "source": [
    "t = torch.tensor([[1,2,3],[4,5,6]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "4c17a9db",
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor(1.0)\n",
    "b = torch.tensor(2.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c7da4d46",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None\n"
     ]
    }
   ],
   "source": [
    "print(a.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1c2d7ca2",
   "metadata": {},
   "outputs": [],
   "source": [
    "c = a + b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "5b5340b5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "element 0 of tensors does not require grad and does not have a grad_fn\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    c.backward()\n",
    "except RuntimeError as e:\n",
    "    print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "beed4610",
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor(1.0, requires_grad=True)\n",
    "b = torch.tensor(2.0, requires_grad=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e8df0d11",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None None\n"
     ]
    }
   ],
   "source": [
    "print(a.grad, b.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "56bad030",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(3., grad_fn=<AddBackward0>)\n"
     ]
    }
   ],
   "source": [
    "c = a + b\n",
    "print(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "a55ddbdb",
   "metadata": {},
   "outputs": [],
   "source": [
    "c.backward()\n",
    "# torch.autograd.backward(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "39954daa",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(3., grad_fn=<AddBackward0>),\n",
       " tensor(1., requires_grad=True),\n",
       " tensor(2., requires_grad=True))"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c, a, b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "8d5ca08b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(1.), tensor(1.))"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a.grad, b.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "191f9807",
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor(42.0, requires_grad=True)\n",
    "b = torch.tensor(99.0, requires_grad=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2c634492",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(4158., grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c = a * b\n",
    "c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "cbebca79",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "c.backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "c91a5de3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(99.), tensor(42.))"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a.grad, b.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "16851927",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None\n"
     ]
    }
   ],
   "source": [
    "print(a.grad_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "c554d1bb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n"
     ]
    }
   ],
   "source": [
    "print(a.is_leaf)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "7a8e24c5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<MulBackward0 object at 0x000001E264B841F0> False\n"
     ]
    }
   ],
   "source": [
    "print(c.grad_fn, c.is_leaf)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a1649823",
   "metadata": {},
   "source": [
    "$$\n",
    "f = a \\times b + \\frac{c \\times d^2}{e}\n",
    "$$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "3f6b2ead",
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor(2., requires_grad=True)\n",
    "b = torch.tensor(4., requires_grad=True)\n",
    "c = torch.tensor(6., requires_grad=True)\n",
    "d = torch.tensor(8., requires_grad=True)\n",
    "e = torch.tensor(10., requires_grad=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "16a7c26e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None\n"
     ]
    }
   ],
   "source": [
    "print(a.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "272bf356",
   "metadata": {},
   "outputs": [],
   "source": [
    "g = a * b\n",
    "h = c*d**2/e\n",
    "f = g + h"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "6ee61687",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "False <MulBackward0 object at 0x000001E264C1F7F0>\n"
     ]
    }
   ],
   "source": [
    "print(g.is_leaf, g.grad_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "ce646161",
   "metadata": {},
   "outputs": [],
   "source": [
    "f.backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "b33afd13",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(4.)\n",
      "tensor(2.)\n",
      "tensor(6.4000)\n",
      "tensor(9.6000)\n",
      "tensor(-3.8400)\n"
     ]
    }
   ],
   "source": [
    "for el in [a, b, c, d, e]:\n",
    "    print(el.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "cc8d6d9a",
   "metadata": {},
   "outputs": [],
   "source": [
    "a.grad = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "27dc4897",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\Users\\Min\\Anaconda3\\envs\\torch\\lib\\site-packages\\torch\\_tensor.py:1083: UserWarning: The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad attribute won't be populated during autograd.backward(). If you indeed want the .grad field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor instead. See github.com/pytorch/pytorch/pull/30531 for more informations. (Triggered internally at  C:\\cb\\pytorch_1000000000000\\work\\build\\aten\\src\\ATen/core/TensorBody.h:482.)\n",
      "  return self._grad\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(None, None)"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "g.grad, h.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "0d7a6871",
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor([[2.,4.,6.],[2.,3.,4.],[9.,7.,8.]], requires_grad=True)\n",
    "b = torch.tensor([[4.,9.,2.],[7.,1.,1.],[6.,7.,3.]], requires_grad=True)\n",
    "c = torch.tensor([[6.,1.,3.],[2.,3.,2.],[4.,8.,6.]], requires_grad=True)\n",
    "d = torch.tensor([[2.,2.,4.],[4.,4.,6.],[3.,2.,8.]], requires_grad=True)\n",
    "e = torch.tensor([[1.,3.,6.],[7.,1.,4.],[1.,4.,6.]], requires_grad=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "b12e0d83",
   "metadata": {},
   "outputs": [],
   "source": [
    "g = a @ b\n",
    "h = c@d**2/e\n",
    "f = g + h"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "7a84c171",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 72.,  64.,  26.],\n",
      "        [ 53.,  49.,  19.],\n",
      "        [133., 144.,  49.]], grad_fn=<MmBackward0>) tensor([[15.,  9., 16.],\n",
      "        [15.,  9., 16.],\n",
      "        [15.,  9., 16.]]) tensor([[13., 13., 13.],\n",
      "        [14., 14., 14.],\n",
      "        [18., 18., 18.]])\n"
     ]
    }
   ],
   "source": [
    "grad_tensor = torch.ones_like(g)\n",
    "g.backward(grad_tensor)\n",
    "print(g, a.grad, b.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "f0fbc37a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 72.,  64.,  26.],\n",
      "        [ 53.,  49.,  19.],\n",
      "        [133., 144.,  49.]], grad_fn=<MmBackward0>) tensor([[30., 18., 32.],\n",
      "        [39., 19., 39.],\n",
      "        [30., 18., 32.]]) tensor([[26., 28., 26.],\n",
      "        [28., 31., 28.],\n",
      "        [36., 40., 36.]])\n"
     ]
    }
   ],
   "source": [
    "g = a @ b\n",
    "h = c@d**2/e\n",
    "f = g + h\n",
    "\n",
    "grad_tensor = torch.ones_like(g)\n",
    "grad_tensor[1][1] = 2\n",
    "g.backward(grad_tensor)\n",
    "print(g, a.grad, b.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "a2855f5c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[139.0000,  81.3333,  80.0000],\n",
       "        [ 63.5714, 113.0000,  86.0000],\n",
       "        [331.0000, 186.0000, 171.6667]], grad_fn=<AddBackward0>)"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "f"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "c7c7b8c6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[45., 27., 48.],\n",
      "        [54., 28., 55.],\n",
      "        [45., 27., 48.]])\n",
      "tensor([[39., 41., 39.],\n",
      "        [42., 45., 42.],\n",
      "        [54., 58., 54.]])\n",
      "tensor([[ 8.0000, 27.3333, 21.0000],\n",
      "        [ 8.5714, 27.2857, 21.2857],\n",
      "        [ 7.6667, 26.0000, 20.6667]])\n",
      "tensor([[41.1429, 20.0000, 17.3333],\n",
      "        [75.4286, 42.6667, 27.0000],\n",
      "        [55.7143, 18.0000, 32.0000]])\n",
      "tensor([[ -67.0000,   -5.7778,   -9.0000],\n",
      "        [  -1.5102,  -64.0000,  -16.7500],\n",
      "        [-198.0000,  -10.5000,  -20.4444]])\n"
     ]
    }
   ],
   "source": [
    "g = a @ b\n",
    "h = c@d**2/e\n",
    "f = g + h\n",
    "\n",
    "f.backward(torch.ones_like(f))\n",
    "for el in [a, b, c, d, e]:\n",
    "    print(el.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "45bb3394",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = torch.tensor([1., 2., 3.], requires_grad=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "4ca08e19",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(14., grad_fn=<DotBackward0>)\n",
      "tensor([2., 4., 6.])\n"
     ]
    }
   ],
   "source": [
    "y = x @ x\n",
    "print(y)\n",
    "y.backward()\n",
    "print(x.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "602b0047",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(14., grad_fn=<DotBackward0>)\n",
      "tensor([ 4.,  8., 12.])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Min\\AppData\\Local\\Temp\\ipykernel_15588\\3668660662.py:1: UserWarning: The use of `x.T` on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Consider `x.mT` to transpose batches of matricesor `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse the dimensions of a tensor. (Triggered internally at  C:\\cb\\pytorch_1000000000000\\work\\aten\\src\\ATen\\native\\TensorShape.cpp:2985.)\n",
      "  y = x.T @ x\n"
     ]
    }
   ],
   "source": [
    "y = x.T @ x\n",
    "print(y)\n",
    "y.backward()\n",
    "print(x.grad)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.4"
  },
  "vscode": {
   "interpreter": {
    "hash": "8b80c1faea7a717805d9ab9c3a93faacda9fe03403ef3183089e64d8f6820eec"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
