{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "881da64a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "30055557",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-2.526979426209908"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x=0.0799\n",
    "np.log(x)  # 0.0904\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "ee335819",
   "metadata": {},
   "outputs": [],
   "source": [
    "out_datas = [10,9,8,7,6,5,4,3,2,1]\n",
    "out_data = torch.Tensor(out_datas).long().view(-1,1)\n",
    "input = out_data[0]\n",
    "embed = nn.Embedding(10,25)\n",
    "# embed(input)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "44edb804",
   "metadata": {},
   "outputs": [
    {
     "ename": "IndexError",
     "evalue": "index out of range in self",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[25], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43membed\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTensor\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlong\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\COLORFUL\\python_virtualenv\\torch39\\Scripts\\torch11.3\\lib\\site-packages\\torch\\nn\\modules\\module.py:1130\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1126\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1127\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1129\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1130\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m   1131\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[0;32m   1132\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[1;32mc:\\Users\\COLORFUL\\python_virtualenv\\torch39\\Scripts\\torch11.3\\lib\\site-packages\\torch\\nn\\modules\\sparse.py:158\u001b[0m, in \u001b[0;36mEmbedding.forward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    157\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[1;32m--> 158\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43membedding\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    159\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_norm\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    160\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnorm_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscale_grad_by_freq\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msparse\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\COLORFUL\\python_virtualenv\\torch39\\Scripts\\torch11.3\\lib\\site-packages\\torch\\nn\\functional.py:2199\u001b[0m, in \u001b[0;36membedding\u001b[1;34m(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)\u001b[0m\n\u001b[0;32m   2193\u001b[0m     \u001b[38;5;66;03m# Note [embedding_renorm set_grad_enabled]\u001b[39;00m\n\u001b[0;32m   2194\u001b[0m     \u001b[38;5;66;03m# XXX: equivalent to\u001b[39;00m\n\u001b[0;32m   2195\u001b[0m     \u001b[38;5;66;03m# with torch.no_grad():\u001b[39;00m\n\u001b[0;32m   2196\u001b[0m     \u001b[38;5;66;03m#   torch.embedding_renorm_\u001b[39;00m\n\u001b[0;32m   2197\u001b[0m     \u001b[38;5;66;03m# remove once script supports set_grad_enabled\u001b[39;00m\n\u001b[0;32m   2198\u001b[0m     _no_grad_embedding_renorm_(weight, \u001b[38;5;28minput\u001b[39m, max_norm, norm_type)\n\u001b[1;32m-> 2199\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43membedding\u001b[49m\u001b[43m(\u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpadding_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscale_grad_by_freq\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msparse\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[1;31mIndexError\u001b[0m: index out of range in self"
     ]
    }
   ],
   "source": [
    "embed(torch.Tensor([10]).long())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "83c86da6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3202,  0.8376, -0.1786, -0.0976,  0.7952, -0.2226,  0.6480,  0.4603,\n",
       "         -1.6852,  0.4876,  1.7347,  0.8975, -1.1534,  0.6016,  0.5979, -0.5186,\n",
       "          1.0096, -0.1450, -0.1667, -0.7726, -1.0566, -1.0717, -0.7197,  0.8306,\n",
       "         -0.1832]], grad_fn=<EmbeddingBackward0>)"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embed(torch.Tensor([9]).long())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "c2cecd76",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([9.])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.Tensor([9])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "87398863",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d0c4a9de",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "217e710a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "79630a59",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.utils.data import Dataset,DataLoader\n",
    "from torchvision import datasets\n",
    "from torchvision.transforms import ToTensor\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "6dbf27d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to data\\FashionMNIST\\raw\\train-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 26421880/26421880 [00:13<00:00, 1988356.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting data\\FashionMNIST\\raw\\train-images-idx3-ubyte.gz to data\\FashionMNIST\\raw\n",
      "\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz to data\\FashionMNIST\\raw\\train-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 29515/29515 [00:00<00:00, 161289.96it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting data\\FashionMNIST\\raw\\train-labels-idx1-ubyte.gz to data\\FashionMNIST\\raw\n",
      "\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz to data\\FashionMNIST\\raw\\t10k-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 4422102/4422102 [00:02<00:00, 1474099.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting data\\FashionMNIST\\raw\\t10k-images-idx3-ubyte.gz to data\\FashionMNIST\\raw\n",
      "\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz\n",
      "Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz to data\\FashionMNIST\\raw\\t10k-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 5148/5148 [00:00<00:00, 5143467.60it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting data\\FashionMNIST\\raw\\t10k-labels-idx1-ubyte.gz to data\\FashionMNIST\\raw\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "training_data = datasets.FashionMNIST(\n",
    "    root=\"data\",\n",
    "    train=True,\n",
    "    download=True,\n",
    "    transform=ToTensor()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d2173a9c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 28, 28])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(training_data)\n",
    "\n",
    "img,label = training_data[0]\n",
    "img.shape  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7c7cab79",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "9"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "29a26e40",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 28, 28])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "9"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "img,label = next(iter(training_data))\n",
    "display(img.shape,label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "3163d2bd",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., 0., 0.],\n",
       "        [0., 0., 0.],\n",
       "        [0., 0., 0.]])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.zeros(3,3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "9ada1648",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 1., 1.],\n",
       "        [1., 1., 1.],\n",
       "        [1., 1., 1.]])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.ones(3,3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "28908741",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 0., 0., 0., 0.],\n",
       "        [0., 1., 0., 0., 0.],\n",
       "        [0., 0., 1., 0., 0.],\n",
       "        [0., 0., 0., 1., 0.]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.eye(4,5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "e9df68b5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([-0.8099,  0.5247,  1.7634, -0.9410,  0.3661]), 3)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# dataloader\n",
    "\n",
    "(torch.randn(size=(5,)),3)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "63eb51ed",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "7c73c7ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "i[0] tensor([ 1.0979,  1.0822, -0.5132])\n",
      "i[0] tensor([-0.5916,  0.0760,  0.2440])\n",
      "i[0] tensor([ 0.5238, -0.3976,  2.0093])\n",
      "i[0] tensor([-1.9309, -0.8518,  0.9492])\n",
      "tensor([ 1.0979,  1.0822, -0.5132, -0.5916,  0.0760,  0.2440,  0.5238, -0.3976,\n",
      "         2.0093, -1.9309, -0.8518,  0.9492])\n",
      "tensor([3., 1., 1., 0.])\n",
      "i[0] tensor([ 1.1019, -0.6866, -0.8328])\n",
      "i[0] tensor([ 1.2098, -0.0257,  0.2808])\n",
      "i[0] tensor([-2.2369,  1.2053, -0.4967])\n",
      "i[0] tensor([0.2790, 0.3103, 1.5442])\n",
      "tensor([ 1.1019, -0.6866, -0.8328,  1.2098, -0.0257,  0.2808, -2.2369,  1.2053,\n",
      "        -0.4967,  0.2790,  0.3103,  1.5442])\n",
      "tensor([6., 4., 3., 5.])\n",
      "i[0] tensor([0.5200, 0.8319, 1.5055])\n",
      "i[0] tensor([-1.1421, -1.4557, -0.5195])\n",
      "i[0] tensor([ 0.6503, -1.3362,  0.4240])\n",
      "i[0] tensor([ 0.4335,  0.6956, -0.7447])\n",
      "tensor([ 0.5200,  0.8319,  1.5055, -1.1421, -1.4557, -0.5195,  0.6503, -1.3362,\n",
      "         0.4240,  0.4335,  0.6956, -0.7447])\n",
      "tensor([2., 1., 3., 1.])\n",
      "i[0] tensor([0.4491, 0.1499, 0.9924])\n",
      "i[0] tensor([ 1.9008, -1.5779,  0.3754])\n",
      "i[0] tensor([-1.4883,  0.5996, -0.1784])\n",
      "i[0] tensor([ 0.3160, -1.4346, -0.2175])\n",
      "tensor([ 0.4491,  0.1499,  0.9924,  1.9008, -1.5779,  0.3754, -1.4883,  0.5996,\n",
      "        -0.1784,  0.3160, -1.4346, -0.2175])\n",
      "tensor([4., 0., 3., 7.])\n",
      "i[0] tensor([ 0.3649, -0.0753, -2.0647])\n",
      "i[0] tensor([-1.0011,  1.2656, -1.5388])\n",
      "i[0] tensor([-0.1734,  0.2019,  1.0021])\n",
      "i[0] tensor([0.2476, 0.4751, 1.0693])\n",
      "tensor([ 0.3649, -0.0753, -2.0647, -1.0011,  1.2656, -1.5388, -0.1734,  0.2019,\n",
      "         1.0021,  0.2476,  0.4751,  1.0693])\n",
      "tensor([8., 5., 4., 6.])\n",
      "i[0] tensor([-0.2212, -0.5240,  0.4741])\n",
      "i[0] tensor([0.0638, 1.1014, 0.7821])\n",
      "i[0] tensor([-0.6865,  0.1303, -0.0621])\n",
      "i[0] tensor([ 1.6973, -1.0482, -0.4661])\n",
      "tensor([-0.2212, -0.5240,  0.4741,  0.0638,  1.1014,  0.7821, -0.6865,  0.1303,\n",
      "        -0.0621,  1.6973, -1.0482, -0.4661])\n",
      "tensor([5., 0., 2., 6.])\n",
      "i[0] tensor([-0.0217, -0.6917, -0.8798])\n",
      "i[0] tensor([-2.0056, -0.1083, -1.2930])\n",
      "i[0] tensor([ 0.5309, -0.7592,  0.1997])\n",
      "i[0] tensor([-0.3656, -1.2760,  0.1557])\n",
      "tensor([-0.0217, -0.6917, -0.8798, -2.0056, -0.1083, -1.2930,  0.5309, -0.7592,\n",
      "         0.1997, -0.3656, -1.2760,  0.1557])\n",
      "tensor([5., 0., 8., 0.])\n",
      "i[0] tensor([ 0.2148,  0.2685, -0.2165])\n",
      "i[0] tensor([-0.7134,  0.8399, -0.3379])\n",
      "i[0] tensor([-0.3757,  0.2302, -0.2786])\n",
      "i[0] tensor([-0.0867, -1.0457,  0.2696])\n",
      "tensor([ 0.2148,  0.2685, -0.2165, -0.7134,  0.8399, -0.3379, -0.3757,  0.2302,\n",
      "        -0.2786, -0.0867, -1.0457,  0.2696])\n",
      "tensor([6., 2., 0., 7.])\n",
      "i[0] tensor([-0.5582, -1.8891,  0.1810])\n",
      "i[0] tensor([-1.1203,  0.2676,  0.1837])\n",
      "i[0] tensor([1.6812, 0.4131, 0.7593])\n",
      "i[0] tensor([-0.6991,  0.1999,  0.7065])\n",
      "tensor([-0.5582, -1.8891,  0.1810, -1.1203,  0.2676,  0.1837,  1.6812,  0.4131,\n",
      "         0.7593, -0.6991,  0.1999,  0.7065])\n",
      "tensor([3., 5., 0., 2.])\n",
      "i[0] tensor([1.6588, 0.5587, 0.9101])\n",
      "i[0] tensor([-0.2245, -0.7670,  1.3925])\n",
      "i[0] tensor([0.2684, 0.0746, 1.3480])\n",
      "i[0] tensor([ 0.9169, -0.7538, -0.0657])\n",
      "tensor([ 1.6588,  0.5587,  0.9101, -0.2245, -0.7670,  1.3925,  0.2684,  0.0746,\n",
      "         1.3480,  0.9169, -0.7538, -0.0657])\n",
      "tensor([1., 2., 4., 0.])\n",
      "i[0] tensor([1.1709, 0.5397, 0.3325])\n",
      "i[0] tensor([-0.9057,  0.7429,  0.3735])\n",
      "i[0] tensor([ 0.2713,  0.4848, -1.4314])\n",
      "i[0] tensor([-1.0243, -0.2450,  0.7309])\n",
      "tensor([ 1.1709,  0.5397,  0.3325, -0.9057,  0.7429,  0.3735,  0.2713,  0.4848,\n",
      "        -1.4314, -1.0243, -0.2450,  0.7309])\n",
      "tensor([1., 4., 4., 6.])\n",
      "i[0] tensor([-0.2466,  0.5513,  0.0980])\n",
      "i[0] tensor([-1.2484,  0.2448,  0.8555])\n",
      "i[0] tensor([ 1.0761, -0.6869, -0.3108])\n",
      "i[0] tensor([ 1.1451, -0.7828, -0.0289])\n",
      "tensor([-0.2466,  0.5513,  0.0980, -1.2484,  0.2448,  0.8555,  1.0761, -0.6869,\n",
      "        -0.3108,  1.1451, -0.7828, -0.0289])\n",
      "tensor([1., 1., 6., 0.])\n",
      "i[0] tensor([-1.2959,  0.3479,  0.5826])\n",
      "i[0] tensor([-1.8454, -0.7061, -0.1917])\n",
      "i[0] tensor([0.0031, 2.1839, 0.4287])\n",
      "i[0] tensor([ 0.1966, -1.7814, -0.0242])\n",
      "tensor([-1.2959,  0.3479,  0.5826, -1.8454, -0.7061, -0.1917,  0.0031,  2.1839,\n",
      "         0.4287,  0.1966, -1.7814, -0.0242])\n",
      "tensor([7., 7., 1., 4.])\n",
      "i[0] tensor([-0.5728,  0.3157,  1.6933])\n",
      "i[0] tensor([ 0.3707, -0.0315, -0.6598])\n",
      "i[0] tensor([ 0.2403, -0.9995,  0.4776])\n",
      "i[0] tensor([ 0.8092, -0.1454,  0.6447])\n",
      "tensor([-0.5728,  0.3157,  1.6933,  0.3707, -0.0315, -0.6598,  0.2403, -0.9995,\n",
      "         0.4776,  0.8092, -0.1454,  0.6447])\n",
      "tensor([6., 7., 8., 6.])\n",
      "i[0] tensor([ 0.5967,  1.2049, -2.0166])\n",
      "i[0] tensor([-1.5458, -1.3236, -0.9827])\n",
      "i[0] tensor([0.8163, 0.8255, 0.5564])\n",
      "i[0] tensor([-0.1254,  0.2044, -1.4791])\n",
      "tensor([ 0.5967,  1.2049, -2.0166, -1.5458, -1.3236, -0.9827,  0.8163,  0.8255,\n",
      "         0.5564, -0.1254,  0.2044, -1.4791])\n",
      "tensor([8., 4., 4., 6.])\n",
      "i[0] tensor([-0.3847, -0.0431, -0.4876])\n",
      "i[0] tensor([-1.2819, -0.1341, -1.3970])\n",
      "i[0] tensor([-0.3607,  1.2450, -1.1597])\n",
      "i[0] tensor([ 0.5758, -0.8985,  0.9703])\n",
      "tensor([-0.3847, -0.0431, -0.4876, -1.2819, -0.1341, -1.3970, -0.3607,  1.2450,\n",
      "        -1.1597,  0.5758, -0.8985,  0.9703])\n",
      "tensor([5., 0., 0., 1.])\n",
      "i[0] tensor([-0.5951,  1.8245, -1.4996])\n",
      "i[0] tensor([-1.1301, -0.8380, -2.0892])\n",
      "i[0] tensor([-2.0864,  0.2393, -0.8894])\n",
      "i[0] tensor([-0.0991,  0.7655,  1.5810])\n",
      "tensor([-0.5951,  1.8245, -1.4996, -1.1301, -0.8380, -2.0892, -2.0864,  0.2393,\n",
      "        -0.8894, -0.0991,  0.7655,  1.5810])\n",
      "tensor([2., 0., 2., 8.])\n",
      "i[0] tensor([-1.1579,  0.1125, -1.2487])\n",
      "i[0] tensor([0.4913, 0.5647, 1.3924])\n",
      "i[0] tensor([-0.0949, -0.6503, -0.9451])\n",
      "i[0] tensor([ 0.5879, -2.3908, -1.7331])\n",
      "tensor([-1.1579,  0.1125, -1.2487,  0.4913,  0.5647,  1.3924, -0.0949, -0.6503,\n",
      "        -0.9451,  0.5879, -2.3908, -1.7331])\n",
      "tensor([4., 3., 5., 0.])\n",
      "i[0] tensor([-1.2592,  1.2283, -1.4396])\n",
      "i[0] tensor([-0.2852,  0.1308,  1.2718])\n",
      "i[0] tensor([ 0.9512, -0.2840, -0.4489])\n",
      "i[0] tensor([0.6013, 0.0116, 1.0828])\n",
      "tensor([-1.2592,  1.2283, -1.4396, -0.2852,  0.1308,  1.2718,  0.9512, -0.2840,\n",
      "        -0.4489,  0.6013,  0.0116,  1.0828])\n",
      "tensor([7., 2., 0., 6.])\n",
      "i[0] tensor([0.5610, 0.5498, 0.4800])\n",
      "i[0] tensor([0.5064, 0.2266, 0.1601])\n",
      "i[0] tensor([0.2168, 0.6053, 0.3805])\n",
      "i[0] tensor([-2.0955, -0.4074, -0.2421])\n",
      "tensor([ 0.5610,  0.5498,  0.4800,  0.5064,  0.2266,  0.1601,  0.2168,  0.6053,\n",
      "         0.3805, -2.0955, -0.4074, -0.2421])\n",
      "tensor([3., 0., 3., 8.])\n",
      "i[0] tensor([-0.7168, -0.8265, -2.3400])\n",
      "i[0] tensor([-0.8107, -0.0839,  0.0243])\n",
      "i[0] tensor([-0.1958, -0.7594, -1.1143])\n",
      "i[0] tensor([-1.3228, -0.0304,  0.4720])\n",
      "tensor([-0.7168, -0.8265, -2.3400, -0.8107, -0.0839,  0.0243, -0.1958, -0.7594,\n",
      "        -1.1143, -1.3228, -0.0304,  0.4720])\n",
      "tensor([7., 7., 1., 8.])\n",
      "i[0] tensor([ 0.0914, -1.4212,  0.2835])\n",
      "i[0] tensor([-0.4125, -0.4265,  1.0195])\n",
      "i[0] tensor([-0.9416,  0.0709,  1.2310])\n",
      "i[0] tensor([-1.3377,  1.0040, -0.6639])\n",
      "tensor([ 0.0914, -1.4212,  0.2835, -0.4125, -0.4265,  1.0195, -0.9416,  0.0709,\n",
      "         1.2310, -1.3377,  1.0040, -0.6639])\n",
      "tensor([8., 5., 5., 8.])\n",
      "i[0] tensor([0.1585, 0.2568, 0.1178])\n",
      "i[0] tensor([ 1.1916,  1.9502, -0.8228])\n",
      "i[0] tensor([-0.5619,  2.1322,  1.2165])\n",
      "i[0] tensor([ 0.5782, -1.0168,  1.3139])\n",
      "tensor([ 0.1585,  0.2568,  0.1178,  1.1916,  1.9502, -0.8228, -0.5619,  2.1322,\n",
      "         1.2165,  0.5782, -1.0168,  1.3139])\n",
      "tensor([8., 7., 0., 1.])\n",
      "i[0] tensor([-1.6366,  0.4255,  1.1914])\n",
      "i[0] tensor([ 0.1709, -0.8711, -0.1425])\n",
      "i[0] tensor([ 1.4964, -0.4602, -1.5054])\n",
      "i[0] tensor([-1.6348,  1.7493,  0.4547])\n",
      "tensor([-1.6366,  0.4255,  1.1914,  0.1709, -0.8711, -0.1425,  1.4964, -0.4602,\n",
      "        -1.5054, -1.6348,  1.7493,  0.4547])\n",
      "tensor([2., 4., 6., 3.])\n",
      "i[0] tensor([0.1358, 1.6103, 0.2022])\n",
      "i[0] tensor([ 0.2439, -0.4692, -0.0792])\n",
      "i[0] tensor([-1.2053,  0.7782,  1.4852])\n",
      "i[0] tensor([-0.5674, -0.0779, -0.0210])\n",
      "tensor([ 0.1358,  1.6103,  0.2022,  0.2439, -0.4692, -0.0792, -1.2053,  0.7782,\n",
      "         1.4852, -0.5674, -0.0779, -0.0210])\n",
      "tensor([2., 6., 7., 4.])\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# dd = [(torch.randn(5,3),torch.randint(0,9,(1,))) for _ in range(10)]\n",
    "dd = [(torch.randn(3,),torch.randint(0,9,(1,)).item()) for _ in range(100)]\n",
    "\n",
    "\n",
    "def change(batch):\n",
    "    img = []\n",
    "    label=[]\n",
    "    for i in batch:\n",
    "        print('i[0]',i[0])\n",
    "        img.extend(i[0])\n",
    "        label.append(i[1])\n",
    "    return torch.Tensor(img),torch.Tensor(label)\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    \n",
    "    \n",
    "# dataloader1 = DataLoader(dd,batch_size=4,shuffle=False,collate_fn=change)\n",
    "dataloader1 = DataLoader(dd,batch_size=4,shuffle=False,collate_fn=change)\n",
    "# dataloader1[0]\n",
    "# A,B = next(iter(dataloader1))\n",
    "\n",
    "for a,b in dataloader1:\n",
    "    print(a)\n",
    "    print(b)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "id": "67f26056",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[tensor(0.0803), tensor(0.3852), tensor(1.1239)]"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s=[]\n",
    "a=torch.randn(3,)\n",
    "s.extend(a)\n",
    "s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "d97a8181",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0803, 0.3852, 1.1239])"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.Tensor(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "d207b5fe",
   "metadata": {},
   "outputs": [],
   "source": [
    "in_datas = [0,1,2,3,41,5,6,7,8,9]\n",
    "s=torch.Tensor(in_datas).long()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "d0cea980",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0,  1,  2,  3, 41,  5,  6,  7,  8,  9])"
      ]
     },
     "execution_count": 88,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "id": "d495fd46",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0,  1,  2,  3, 41,  5,  6,  7,  8,  9])"
      ]
     },
     "execution_count": 89,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "4e8f3e84",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([41])"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.topk(1)[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "id": "ecdc8fc0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(4)"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.topk(1)[1].squeeze().detach()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "id": "a1be7b12",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.9160,  1.0691,  1.5943,  1.1000, -0.5951,  1.4542,  1.5170,  0.8958,\n",
       "        -2.3711,  1.8024, -0.4299,  0.0250,  0.6562, -1.8422, -0.2414,  1.0278,\n",
       "        -0.2118, -0.9212, -0.1065,  0.0863,  1.4954, -1.6414,  0.4083, -1.3420,\n",
       "        -1.9440, -1.5797, -0.9065, -0.0494,  0.6341, -0.3842],\n",
       "       grad_fn=<EmbeddingBackward0>)"
      ]
     },
     "execution_count": 114,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "nn.Embedding(100,30)(s.topk(1)[1].squeeze().detach())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "id": "4a06f096",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 求导"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2a44a52b",
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "grad can be implicitly created only for scalar outputs",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[127], line 9\u001b[0m\n\u001b[0;32m      6\u001b[0m v \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mtensor([\u001b[38;5;241m1.0\u001b[39m, \u001b[38;5;241m2.0\u001b[39m])\n\u001b[0;32m      8\u001b[0m \u001b[38;5;66;03m# 计算雅可比乘积 v^T · J\u001b[39;00m\n\u001b[1;32m----> 9\u001b[0m \u001b[43my\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# 等价于 y.backward(torch.tensor([1.0, 2.0]))\u001b[39;00m\n\u001b[0;32m     11\u001b[0m \u001b[38;5;28mprint\u001b[39m(x\u001b[38;5;241m.\u001b[39mgrad)  \u001b[38;5;66;03m# 输出梯度 [∂(v1*y1 + v2*y2)/∂x1, ∂(v1*y1 + v2*y2)/∂x2]\u001b[39;00m\n",
      "File \u001b[1;32md:\\VirtualProject\\Python37Env\\torch_py38\\lib\\site-packages\\torch\\_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[1;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[0;32m    482\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    483\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[0;32m    484\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[0;32m    485\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    490\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[0;32m    491\u001b[0m     )\n\u001b[1;32m--> 492\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    493\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[0;32m    494\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32md:\\VirtualProject\\Python37Env\\torch_py38\\lib\\site-packages\\torch\\autograd\\__init__.py:244\u001b[0m, in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[0;32m    235\u001b[0m inputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m    236\u001b[0m     (inputs,)\n\u001b[0;32m    237\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(inputs, torch\u001b[38;5;241m.\u001b[39mTensor)\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    240\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mtuple\u001b[39m()\n\u001b[0;32m    241\u001b[0m )\n\u001b[0;32m    243\u001b[0m grad_tensors_ \u001b[38;5;241m=\u001b[39m _tensor_or_tensors_to_tuple(grad_tensors, \u001b[38;5;28mlen\u001b[39m(tensors))\n\u001b[1;32m--> 244\u001b[0m grad_tensors_ \u001b[38;5;241m=\u001b[39m \u001b[43m_make_grads\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mis_grads_batched\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[0;32m    245\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m retain_graph \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    246\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n",
      "File \u001b[1;32md:\\VirtualProject\\Python37Env\\torch_py38\\lib\\site-packages\\torch\\autograd\\__init__.py:117\u001b[0m, in \u001b[0;36m_make_grads\u001b[1;34m(outputs, grads, is_grads_batched)\u001b[0m\n\u001b[0;32m    115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m out\u001b[38;5;241m.\u001b[39mrequires_grad:\n\u001b[0;32m    116\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m out\u001b[38;5;241m.\u001b[39mnumel() \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m--> 117\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[0;32m    118\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgrad can be implicitly created only for scalar outputs\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    119\u001b[0m         )\n\u001b[0;32m    120\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m out\u001b[38;5;241m.\u001b[39mdtype\u001b[38;5;241m.\u001b[39mis_floating_point:\n\u001b[0;32m    121\u001b[0m         msg \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m    122\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgrad can be implicitly created only for real scalar outputs\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    123\u001b[0m             \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m but got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mout\u001b[38;5;241m.\u001b[39mdtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    124\u001b[0m         )\n",
      "\u001b[1;31mRuntimeError\u001b[0m: grad can be implicitly created only for scalar outputs"
     ]
    }
   ],
   "source": [
    "# 定义输入和函数（输出是张量）\n",
    "x = torch.tensor([1.0, 2.0], requires_grad=True)\n",
    "y = torch.stack([x[0]**2, x[0] * x[1]])  # y = [x1^2, x1*x2], 输出形状为(2,)\n",
    "\n",
    "# 定义向量v（与y同形状）\n",
    "v = torch.tensor([1.0, 2.0])\n",
    "\n",
    "# 计算雅可比乘积 v^T · J\n",
    "y.backward(gradient=v)  # 等价于 y.backward(torch.tensor([1.0, 2.0]))\n",
    "\n",
    "print(x.grad)  # 输出梯度 [∂(v1*y1 + v2*y2)/∂x1, ∂(v1*y1 + v2*y2)/∂x2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "id": "d99b78c5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1., 2.], requires_grad=True)"
      ]
     },
     "execution_count": 118,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "id": "04961a0c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1., 2.], grad_fn=<StackBackward0>)"
      ]
     },
     "execution_count": 119,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 124,
   "id": "ebb91252",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 2., 3.],\n",
       "        [1., 2., 3.]])"
      ]
     },
     "execution_count": 124,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.stack([torch.Tensor([1,2,3]),torch.Tensor([1,2,3])],axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 125,
   "id": "d957d019",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1., grad_fn=<PowBackward0>)"
      ]
     },
     "execution_count": 125,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x[0]**2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6386ae3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(2., grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 126,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x[0] * x[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 130,
   "id": "bdc8dafc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1., 2.], requires_grad=True)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor(0.5000, requires_grad=True)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor([3., 4.])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "w = torch.tensor([1.0, 2.0], requires_grad=True)\n",
    "b = torch.tensor(0.5, requires_grad=True)\n",
    "x = torch.tensor([3.0, 4.0])\n",
    "display(w,b,x)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "55ddfa56",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(11., grad_fn=<DotBackward0>)"
      ]
     },
     "execution_count": 131,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.dot(w,x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 163,
   "id": "eb33d25a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.9632,  0.2305, -1.4502,  0.1597],\n",
       "        [ 1.5586, -0.0212,  0.0596,  0.3912],\n",
       "        [ 0.2599,  0.1735,  0.9910, -0.2228]], requires_grad=True)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-0.7639,  0.4722,  1.1458],\n",
       "        [ 0.4799, -0.7716, -0.5915],\n",
       "        [-0.3931, -1.2791, -1.4425],\n",
       "        [ 0.0866, -0.4466, -0.5196]], requires_grad=True)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-0.0060,  0.1243,  0.0751],\n",
       "        [-0.0060,  0.1243,  0.0751],\n",
       "        [-0.0060,  0.1243,  0.0751],\n",
       "        [-0.0060,  0.1243,  0.0751]])"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "w = torch.randn(3,4, requires_grad=True)\n",
    "x = torch.randn(4,3, requires_grad=True)\n",
    "loss = torch.mean(x@w)\n",
    "loss.backward()\n",
    "display(w,x,x.grad)\n",
    "# x.grad\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 166,
   "id": "9a7e6e68",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-0.006049999999999996"
      ]
     },
     "execution_count": 166,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "1/16*(0.9632+0.2305+-1.4502+0.1597)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 165,
   "id": "ef6fc5be",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.2980,  0.0126,  2.2713, -0.1925],\n",
       "        [-0.8940,  0.0244, -1.3280, -0.0935],\n",
       "        [-2.7472, -0.3137, -0.9357, -0.2419],\n",
       "        [-0.7477, -0.0607, -0.6671, -0.0451]], grad_fn=<MmBackward0>)"
      ]
     },
     "execution_count": 165,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x@w"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 167,
   "id": "07471f5e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.9483, -1.2095,  1.3009, -0.6932])"
      ]
     },
     "execution_count": 167,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.randn(2,3,4)[0,0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c106f6f6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch_py38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.7rc1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
