{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "False"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "torch.cuda.is_available()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "device(type='cpu')"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "device"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 常用的数学运算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1, 2, 3]) <class 'torch.Tensor'> torch.Size([3]) torch.int64\n"
     ]
    }
   ],
   "source": [
    "a=torch.tensor([1,2,3])\n",
    "print(a,type(a),a.size(),a.dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1) <class 'torch.Tensor'> torch.Size([]) torch.int64\n"
     ]
    }
   ],
   "source": [
    "b=torch.tensor(1)\n",
    "print(b,type(b),b.size(),b.dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([2, 3, 4])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a.add(b)#加法 每个维度上分别去加"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0, 1, 2])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a.sub(b)#减法 每个维度上分别去减"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-1.3053, -0.2761, -0.2393])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#取绝对值\n",
    "c=torch.randn((3,))\n",
    "c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1.3053, 0.2761, 0.2393])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c.abs()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-6.7223e-01,  2.1539e+00, -7.2260e-04, -1.4892e-01],\n",
       "        [ 5.7238e-01, -3.8677e-01,  7.9769e-01,  6.9478e-01],\n",
       "        [ 1.6602e+00, -5.9313e-01, -1.8031e-01,  2.0158e+00]])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#矩阵乘法\n",
    "a=torch.randn([3,4])\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-1.0552,  0.3937, -0.4875, -2.4010, -0.0806],\n",
       "        [-0.9674,  0.4842,  0.6941, -0.7582,  0.1282],\n",
       "        [-0.6709,  0.4705,  1.3088,  0.3559,  0.7508],\n",
       "        [-1.3329, -1.7412, -0.8372, -1.0159, -0.0679]])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b=torch.randn([4,5])\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-1.1754,  1.0373,  1.9465,  0.1320,  0.3398],\n",
       "        [-1.6911, -0.7964, -0.0852, -1.5030,  0.4561],\n",
       "        [-3.7440, -3.2284, -3.1448, -5.6486, -0.4820]])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a.mm(b)#（3,4）与(4,5)相乘得到的结果就是(3,5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 2],\n",
       "        [3, 4]])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### 数学运算同样适用于广播机制\n",
    "a=torch.tensor([[1,2],[3,4]])\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 2])"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b=torch.tensor([1,2])\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[2, 4],\n",
       "        [4, 6]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a+b#1,2去加1,2变成2,4     3,4也去加1,2变成4,6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[4],\n",
       "        [7]])"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c=torch.tensor([[4,],[7,]])\n",
    "c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 5,  6],\n",
       "        [10, 11]])"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a+c"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 就地改变"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "a add b tensor([[2, 4],\n",
      "        [4, 5]])\n",
      "a的值还是原来的值 tensor([[1, 2],\n",
      "        [3, 3]])\n",
      "a add_ b tensor([[2, 4],\n",
      "        [4, 5]])\n",
      "a的值已经是运算之后的值 tensor([[2, 4],\n",
      "        [4, 5]])\n"
     ]
    }
   ],
   "source": [
    "a=torch.tensor([[1,2],[3,3]])\n",
    "b=torch.tensor([1,2])#对应元素相加\n",
    "print('a add b',a.add(b))\n",
    "print('a的值还是原来的值',a)\n",
    "print('a add_ b',a.add_(b))\n",
    "print('a的值已经是运算之后的值',a)#同理 sub_() abs_()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 统计操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[-0.0815,  2.6137,  0.4852,  0.4036],\n",
      "        [-0.6397,  0.9183, -0.3107,  0.1165],\n",
      "        [ 0.8375, -0.8075,  0.5392,  1.0801]])\n"
     ]
    }
   ],
   "source": [
    "a=torch.randn(3,4)\n",
    "print(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "max tensor(2.6137)\n",
      "min tensor(-0.8075)\n",
      "mean tensor(0.4295)\n",
      "median tensor(0.4036)\n",
      "argmax tensor(1)\n",
      "argmax tensor([1, 1, 3])\n"
     ]
    }
   ],
   "source": [
    "print('max',a.max())\n",
    "print('min',a.min())\n",
    "print('mean',a.mean())\n",
    "print('median',a.median())\n",
    "print('argmax',a.argmax())#最大值所在下标)\n",
    "print('argmax',a.argmax(dim=1))#在第一维度上找最大值所在的下标"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 可以发现torch的很多操作和numpy一样"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### pytorch的自动求导"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x: tensor([[1., 1.],\n",
      "        [1., 1.]], requires_grad=True)\n"
     ]
    }
   ],
   "source": [
    "#requires_grad  通过设置成false和true来设定是否求梯度\n",
    "#backward 反向传播\n",
    "\n",
    "x=torch.ones(2,2,requires_grad=True)#这里requires_grad=True 后面的每步的变量 grad_fn的Backward都有\n",
    "print('x:',x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "y: tensor([[3., 3.],\n",
      "        [3., 3.]], grad_fn=<AddBackward0>)\n"
     ]
    }
   ],
   "source": [
    "y=x+2\n",
    "print('y:',y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "z: tensor([[27., 27.],\n",
      "        [27., 27.]], grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "z=y*y*3\n",
    "print('z:',z)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out: tensor(27., grad_fn=<MeanBackward0>)\n"
     ]
    }
   ],
   "source": [
    "out=z.mean()\n",
    "print('out:',out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "a: tensor([[ 1.2898, -0.3608],\n",
      "        [ 1.2219,  2.6635]])\n",
      "a requires_grad: False\n",
      "就地修改a\n",
      "a: tensor([[ 1.2898, -0.3608],\n",
      "        [ 1.2219,  2.6635]], requires_grad=True)\n",
      "a requires_grad: True\n"
     ]
    }
   ],
   "source": [
    "a=torch.randn(2,2)\n",
    "print('a:',a)\n",
    "print('a requires_grad:',a.requires_grad)\n",
    "a.requires_grad_(True)#就地修改\n",
    "print(\"就地修改a\")\n",
    "print('a:',a)\n",
    "print('a requires_grad:',a.requires_grad)#之前的arequires_grad从False变成True了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "b: tensor(10.3812, grad_fn=<SumBackward0>)\n",
      "b requires_grad: True\n",
      "b的grad_fn： <SumBackward0 object at 0x7f2f3470b650>\n",
      "b的data tensor(10.3812)\n",
      "10.381228 <class 'numpy.ndarray'>\n"
     ]
    }
   ],
   "source": [
    "b=(a*a).sum()\n",
    "print('b:',b)\n",
    "print('b requires_grad:',b.requires_grad)\n",
    "print('b的grad_fn：',b.grad_fn)\n",
    "print('b的data',b.data)\n",
    "print(b.detach().numpy(),type(b.detach().numpy()))#requires_grad=True的时候转换成numpy需要多一步detach()操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "c: tensor(10.3812)\n",
      "c requires_grad: False\n",
      "c的grad_fn： None\n",
      "c的data tensor(10.3812)\n",
      "10.381228 <class 'numpy.ndarray'>\n"
     ]
    }
   ],
   "source": [
    "with torch.no_grad():#c跟b在数值上是一样的 但是requires_grad和grad_fn已经变了 因为加上了no_grad()\n",
    "    c=(a*a).sum()\n",
    "print('c:',c)\n",
    "print('c requires_grad:',c.requires_grad)\n",
    "print('c的grad_fn：',c.grad_fn)  \n",
    "print('c的data',c.data)\n",
    "print(c.numpy(),type(c.numpy()))#requires_grad=False的时候可以直接转换"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
