{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "# 比较操作 Comparison Ops"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.ge\n",
    "torch.ge(input, other, out=None) → Tensor  \n",
    "逐元素比较 input 和 other，即是否$input \\geqslant other$。  \n",
    "如果两个张量有相同的形状和元素值，则返回 True ，否则 False 。 第二个参数可以为一个数或与第一个参数相同形状和类型的张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 1],\n",
       "        [0, 1]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.ge(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.gt\n",
    "torch.gt(input, other, out=None) → Tensor  \n",
    "逐元素比较 input 和 other ， 即是否 如果两个张量有相同的形状和元素值，则返回 True ，否则 False 。 第二个参数可以为一个数或与第一个参数相同形状和类型的张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0, 1],\n",
       "        [0, 0]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.gt(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.kthvalue\n",
    "torch.kthvalue(input, k, dim=None, out=None) -> (Tensor, LongTensor)  \n",
    "取输入张量 input 指定维上第k 个最小值。如果不指定 dim ，则默认为 input 的最后一维。\n",
    "返回一个元组 (values,indices)，其中 indices 是原始输入张量 input 中沿 dim 维的第 k 个最小值下标。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 2, 3, 4, 5])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.arange(1, 6)\n",
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.return_types.kthvalue(\n",
       "values=tensor(4),\n",
       "indices=tensor(3))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 取第k个最小值\n",
    "torch.kthvalue(x, 4)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.le\n",
    "torch.le(input, other, out=None) → Tensor  \n",
    "逐元素比较 input 和 other ， 即是否$input \\leqslant other$第二个参数可以为一个数或与第一个参数相同形状和类型的张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 0],\n",
       "        [1, 1]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.le(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.lt\n",
    "torch.lt(input, other, out=None) → Tensor  \n",
    "逐元素比较 input 和 other ， 即是否$input < other$  \n",
    "第二个参数可以为一个数或与第一个参数相同形状和类型的张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0, 0],\n",
       "        [1, 0]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.lt(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.max\n",
    "torch.max()  \n",
    "返回输入张量所有元素的最大值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1.2990, -0.2886, -0.1625]])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(1, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.2990)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.max(a)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "torch.max(input, dim, max=None, max_indices=None) -> (Tensor, LongTensor)  \n",
    "返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。  \n",
    "输出形状中，将 dim 维设定为1，其它与输入形状保持一致。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.0287, -0.3009,  1.1667, -1.0525],\n",
       "        [-0.3748,  0.1419,  1.6414,  0.0937],\n",
       "        [ 0.4776, -0.1963, -1.9010,  0.0935],\n",
       "        [ 0.6471, -0.8917,  0.3899, -0.3949]])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(4, 4)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.return_types.max(\n",
       "values=tensor([1.1667, 1.6414, 0.4776, 0.6471]),\n",
       "indices=tensor([2, 2, 0, 0]))"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 按行取最大值\n",
    "torch.max(a, 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "torch.max(input, other, out=None) → Tensor  \n",
    "返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。 即$out_i=max(input_i, other_i)$  \n",
    "输出形状中，将 dim 维设定为1，其它与输入形状保持一致。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.2908, -0.9157,  0.9614,  0.3274])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(4)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.8886, -0.4459,  0.4276, -0.3026])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b = torch.randn(4)\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.2908, -0.4459,  0.9614,  0.3274])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.max(a, b)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.min\n",
    "torch.min(input) → float  \n",
    "返回输入张量所有元素的最小值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-2.1048, -1.0820,  0.3809]])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(1, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-2.1048)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.min(a)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "torch.min(input, dim, min=None, min_indices=None) -> (Tensor, LongTensor)  \n",
    "返回输入张量给定维度上每行的最小值，并同时返回每个最小值的位置索引。  \n",
    "输出形状中，将 dim 维设定为1，其它与输入形状保持一致。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-1.3044,  1.5756,  1.4488,  1.6483],\n",
       "        [-2.9373, -0.5015, -1.9494,  1.2411],\n",
       "        [-1.2833, -1.0122, -1.4623, -1.1097],\n",
       "        [-0.4786, -0.4191,  0.8416, -0.3656]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(4, 4)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.return_types.min(\n",
       "values=tensor([-1.3044, -2.9373, -1.4623, -0.4786]),\n",
       "indices=tensor([0, 0, 2, 0]))"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.min(a, 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "torch.min(input, other, out=None) → Tensor  \n",
    "input 中逐元素与 other 相应位置的元素对比，返回最小值到输出张量。即$out_i=min(tensor_i, other_i)$  \n",
    "两张量形状不需匹配，但元素数须相同。  \n",
    "注意：当形状不匹配时， input 的形状作为返回张量的形状。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.2847, -0.9817, -0.5501, -0.2095])"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(4)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 1.7007, -0.0122, -0.1574,  0.3346])"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b = torch.randn(4)\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.2847, -0.9817, -0.5501, -0.2095])"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.min(a, b)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.ne\n",
    "torch.ne(input, other, out=None) → Tensor  \n",
    "逐元素比较 input 和 other，即是否$input != other$。第二个参数可以为一个数或与第一个参数相同形状和类型的张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0, 1],\n",
       "        [1, 0]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.ne(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.sort\n",
    "torch.sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor)  \n",
    "对输入张量 input 沿着指定维按升序排序。如果不给定 dim ，则默认为输入的最后一维。如果指定参数 descending 为 True ，则按降序排序  \n",
    "返回元组 (sorted_tensor, sorted_indices) ， sorted_indices 为原始输入中的下标。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.0274,  0.3800,  0.3961,  1.3038],\n",
       "        [-0.5760, -0.4788,  0.0229,  0.4699],\n",
       "        [-0.5173, -0.3512, -0.2068,  1.6947]])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.randn(3, 4)\n",
    "sorted, indices = torch.sort(x)\n",
    "sorted"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 0, 3, 2],\n",
       "        [2, 0, 3, 1],\n",
       "        [1, 2, 0, 3]])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "indices"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.4788, -0.5173, -0.5760,  0.0229],\n",
       "        [-0.2068, -0.0274, -0.3512,  0.3961],\n",
       "        [ 0.3800,  0.4699,  1.3038,  1.6947]])"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sorted, indices = torch.sort(x, 0)\n",
    "sorted"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 2, 1, 1],\n",
       "        [2, 0, 2, 0],\n",
       "        [0, 1, 0, 2]])"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "indices"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.topk\n",
    "torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)  \n",
    "沿给定 dim 维度返回输入张量 input 中 k 个最大值。 如果不指定 dim ，则默认为 input 的最后一维。如果为 largest 为 False ，则返回最小的 k 个值。  \n",
    "返回一个元组 (values,indices)，其中 indices 是原始输入张量 input 中测元素下标。如果设定布尔值 sorted 为_True_，将会确保返回的 k 个值被排序。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 2, 3, 4, 5])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.arange(1, 6)\n",
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.return_types.topk(\n",
       "values=tensor([5, 4, 3]),\n",
       "indices=tensor([4, 3, 2]))"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.topk(x, 3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.return_types.topk(\n",
       "values=tensor([1, 2, 3]),\n",
       "indices=tensor([0, 1, 2]))"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.topk(x, 3, largest=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 其它操作 Other Operations"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.cross\n",
    "torch.cross(input, other, dim=-1, out=None) → Tensor  \n",
    "返回沿着维度 dim 上，两个张量 input 和 other 的向量积（叉积）。 input 和 other 必须有相同的形状，且指定的 dim 维上size必须为 3。  \n",
    "如果不指定 dim ，则默认为第一个尺度为 3 的维。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.5804,  0.3432, -0.2196],\n",
       "        [ 0.3225,  0.2474, -1.6775],\n",
       "        [ 1.1070, -1.0603,  2.0992],\n",
       "        [ 0.5278,  1.4472, -0.6604]])"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(4, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.1671, -0.3354, -1.2149],\n",
       "        [-0.0036, -1.1630,  1.0006],\n",
       "        [-0.5885, -1.3894, -1.4368],\n",
       "        [ 0.2904, -0.2142,  0.4272]])"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "b = torch.randn(4, 3)\n",
    "b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.4906, -0.6685,  0.2520],\n",
       "        [-1.7033, -0.3166, -0.3742],\n",
       "        [ 4.4402,  0.3552, -2.1621],\n",
       "        [ 0.4768, -0.4173, -0.5333]])"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.cross(a, b, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.4906, -0.6685,  0.2520],\n",
       "        [-1.7033, -0.3166, -0.3742],\n",
       "        [ 4.4402,  0.3552, -2.1621],\n",
       "        [ 0.4768, -0.4173, -0.5333]])"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.cross(a, b)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.diag\n",
    "torch.diag(input, diagonal=0, out=None) → Tensor  \n",
    "如果输入是一个向量(1D 张量)，则返回一个以 input 为对角线元素的2D方阵  \n",
    "如果输入是一个矩阵(2D 张量)，则返回一个包含 input 对角线元素的1D张量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**取得以 input 为对角线的方阵：**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.8659, -0.6496,  1.1525])"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.8659,  0.0000,  0.0000],\n",
       "        [ 0.0000, -0.6496,  0.0000],\n",
       "        [ 0.0000,  0.0000,  1.1525]])"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.diag(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000, -0.8659,  0.0000,  0.0000],\n",
       "        [ 0.0000,  0.0000, -0.6496,  0.0000],\n",
       "        [ 0.0000,  0.0000,  0.0000,  1.1525],\n",
       "        [ 0.0000,  0.0000,  0.0000,  0.0000]])"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.diag(a, 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**取得给定矩阵第 k 个对角线：**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.9067,  0.5443,  0.9428],\n",
       "        [-0.8577,  0.0506, -1.2518],\n",
       "        [-1.2374,  0.0361, -0.3659]])"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(3, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.9067,  0.0506, -0.3659])"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.diag(a, 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.5443, -1.2518])"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.diag(a, 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.histc\n",
    "torch.histc(input, bins=100, min=0, max=0, out=None) → Tensor  \n",
    "计算输入张量的直方图。以 min 和 max 为range边界，将其均分成 bins 个直条，然后将排序好的数据划分到各个直条(bins)中。如果 min 和 max 都为0, 则利用数据中的最大最小值作为边界。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0., 2., 1., 0.])"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.histc(torch.FloatTensor([1, 2, 1]), bins=4, min=0, max=3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.renorm\n",
    "torch.renorm(input, p, dim, maxnorm, out=None) → Tensor  \n",
    "返回一个张量，包含规范化后的各个子张量，使得沿着dim维划分的各子张量的p范数小于maxnorm。  \n",
    "**注意：** 如果p范数的值小于 maxnorm ，则当前子张量不需要修改。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 1., 1.],\n",
       "        [2., 2., 2.],\n",
       "        [3., 3., 3.]])"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.ones(3, 3)\n",
    "x[1].fill_(2)\n",
    "x[2].fill_(3)\n",
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0000, 1.0000, 1.0000],\n",
       "        [1.6667, 1.6667, 1.6667],\n",
       "        [1.6667, 1.6667, 1.6667]])"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.renorm(x, 1, 0, 5)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.trace\n",
    "torch.trace(input) → float  \n",
    "返回输入2维矩阵对角线元素的和(迹)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 2, 3],\n",
       "        [4, 5, 6],\n",
       "        [7, 8, 9]])"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.arange(1, 10).view(3, 3)\n",
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(15)"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.trace(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.tril\n",
    "torch.tril(input, diagonal=0, out=None) → Tensor  \n",
    "返回一个张量 out ，包含输入矩阵(2D张量)的下三角部分， out 其余部分被设为 0 。这里所说的下三角部分为矩阵指定对角线 diagonal 之上的元素。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2376, -0.2659, -1.0257],\n",
       "        [-0.8349, -0.0022,  0.0860],\n",
       "        [-0.6707, -0.9868,  0.7312]])"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(3, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2376,  0.0000,  0.0000],\n",
       "        [-0.8349, -0.0022,  0.0000],\n",
       "        [-0.6707, -0.9868,  0.7312]])"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.tril(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2376, -0.2659,  0.0000],\n",
       "        [-0.8349, -0.0022,  0.0860],\n",
       "        [-0.6707, -0.9868,  0.7312]])"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.tril(a, diagonal=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000,  0.0000,  0.0000],\n",
       "        [-0.8349,  0.0000,  0.0000],\n",
       "        [-0.6707, -0.9868,  0.0000]])"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.tril(a, diagonal=-1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.triu\n",
    "triu(input, diagonal=0, out=None) → Tensor  \n",
    "返回一个张量，包含输入矩阵(2D张量)的上三角部分，其余部分被设为 0 。这里所说的上三角部分为矩阵指定对角线 diagonal 之上的元素。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2944, -2.0081, -1.5021],\n",
       "        [ 0.0852, -0.5450, -0.1627],\n",
       "        [-0.1629,  0.6683,  0.3073]])"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(3, 3)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2944, -2.0081, -1.5021],\n",
       "        [ 0.0000, -0.5450, -0.1627],\n",
       "        [ 0.0000,  0.0000,  0.3073]])"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.triu(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000, -2.0081, -1.5021],\n",
       "        [ 0.0000,  0.0000, -0.1627],\n",
       "        [ 0.0000,  0.0000,  0.0000]])"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.triu(a, diagonal=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2944, -2.0081, -1.5021],\n",
       "        [ 0.0852, -0.5450, -0.1627],\n",
       "        [ 0.0000,  0.6683,  0.3073]])"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.triu(a, diagonal=-1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BLAS and LAPACK Operations"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.addbmm\n",
    "torch.addbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor  \n",
    "对两个批 batch1 和 batch2 内存储的矩阵进行批矩阵乘操作，附带reduced add 步骤( 所有矩阵乘结果沿着第一维相加)。矩阵 mat 加到最终结果。 batch1 和 batch2 都为包含相同数量矩阵的3维张量。 如果 batch1 是形为$b \\times n \\times m$的张量， batch1 是形为$b \\times m \\times p$的张量，则out 和 mat 的形状都是$n \\times p$ ，即$res = (beta*M) + (alpha * sum( batch1_i \\mathbin{@} batch2_i,i=0,b))$  \n",
    "对类型为 FloatTensor 或 DoubleTensor 的输入， alpha and beta 必须为实数，否则两个参数\n",
    "须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ -0.3218,  -0.1129,  10.1543,   4.6413,  -1.3953],\n",
       "        [  0.7677, -10.6244,  -1.6786, -14.7861,   5.1038],\n",
       "        [ -4.9798,  -2.1606,  -4.7059,  -0.2605,   9.6562]])"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "M = torch.randn(3, 5)\n",
    "batch1 = torch.randn(10, 3 ,4)\n",
    "batch2 = torch.randn(10, 4, 5)\n",
    "torch.addbmm(M, batch1, batch2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.addmm\n",
    "torch.addmm(beta=1, mat, alpha=1, mat1, mat2, out=None) → Tensor  \n",
    "对矩阵 mat1 和 mat2 进行矩阵乘操作。矩阵 mat 加到最终结果。如果 mat1 是一个$n \\times m$ 张量，mat2 是一个$m \\times p$张量，那么 out 和 mat 的形状为$n \\times p$ 。 alpha 和 beta 分别是两个矩阵$mat1 \\mathbin{@} mat2$和$mat$的比例因子，即$ out=(beta*M) + (alpha*mat1\\mathbin{@}mat2) $  \n",
    "对类型为 FloatTensor 或 DoubleTensor 的输入， beta and alpha 必须为实数，否则两个参数须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.8045, -0.8936,  1.1515],\n",
       "        [ 0.7857,  2.1823, -1.0061]])"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "M = torch.randn(2, 3)\n",
    "mat1 = torch.randn(2, 3)\n",
    "mat2 = torch.randn(3, 3)\n",
    "torch.addmm(M, mat1, mat2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.addmv\n",
    "torch.addmv(beta=1, tensor, alpha=1, mat, vec, out=None) → Tensor  \n",
    "对矩阵 mat 和向量 vec 对进行相乘操作。向量 tensor 加到最终结果。如果 mat 是一个$n \\times m$维矩阵， vec 是一个 维向量，那么 out 和 mat 的为$n$元向量。 可选参数_alpha_ 和beta 分别是$mat*vec$和$mat$的比例因子，即$out = (beta ∗ tensor) + (alpha ∗ (mat@vec))$  \n",
    "对类型为_FloatTensor_或_DoubleTensor_的输入， alpha and beta 必须为实数，否则两个参数须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.3892,  3.1394])"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "M = torch.randn(2)\n",
    "mat = torch.randn(2, 3)\n",
    "vec = torch.randn(3)\n",
    "torch.addmv(M, mat, vec)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.addr\n",
    "torch.addr(beta=1, mat, alpha=1, vec1, vec2, out=None) → Tensor  \n",
    "对向量 vec1 和 vec2 对进行张量积操作。矩阵 mat 加到最终结果。如果 vec1 是一个$n$维向量， vec2 是一个$m$维向量，那么矩阵 mat 的形状须为$n \\times m$。可选参数_beta_ 和 alpha 分别是两个矩阵$mat$和$vec1@vec2$的比例因子，即$resi=(beta*M_i)+(alpha*batch1_i \\times batch2_i)$  \n",
    "对类型为_FloatTensor_或_DoubleTensor_的输入， alpha and beta 必须为实数，否则两个参数须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 2.],\n",
       "        [2., 4.],\n",
       "        [3., 6.]])"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vec1 = torch.arange(1., 4.)\n",
    "vec2 = torch.arange(1., 3.)\n",
    "M = torch.zeros(3, 2)\n",
    "torch.addr(M, vec1, vec2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.baddbmm\n",
    "torch.baddbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor \n",
    "对两个批 batch1 和 batch2 内存储的矩阵进行批矩阵乘操作，矩阵 mat 加到最终结果。\n",
    "batch1 和 batch2 都为包含相同数量矩阵的3维张量。 如果 batch1 是形为$b \\times n \\times m$ 的张量， batch1 是形为$b \\times m \\times p$的张量，则 out 和 mat 的形状都是$n \\times p$ ，即$resi=(beta*M_i)+(alpha*batch1_i \\times batch2_i)$  \n",
    "对类型为_FloatTensor_或_DoubleTensor_的输入， alpha and beta 必须为实数，否则两个参\n",
    "数须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([10, 3, 5])"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "M = torch.randn(10, 3, 5)\n",
    "batch1 = torch.randn(10, 3, 4)\n",
    "batch2 = torch.randn(10, 4, 5)\n",
    "torch.baddbmm(M, batch1, batch2).size()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.bmm\n",
    "torch.bmm(batch1, batch2, out=None) → Tensor  \n",
    "对存储在两个批 batch1 和 batch2 内的矩阵进行批矩阵乘操作。 batch1 和 batch2 都为包含相\n",
    "同数量矩阵的3维张量。 如果 batch1 是形为$b \\times n \\times m$的张量， batch1 是形为$b \\times m \\times p$的张量，则 out 和 mat 的形状都是$n \\times p$，即$res=(beta*M)+(alpha*sum(batch1_i@batch2_i,i=0,b))$  \n",
    "对类型为 FloatTensor 或 DoubleTensor 的输入， alpha and beta 必须为实数，否则两个参数须为整数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([10, 3, 5])"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch1 = torch.randn(10, 3, 4)\n",
    "batch2 = torch.randn(10, 4, 5)\n",
    "res = torch.bmm(batch1, batch2)\n",
    "res.size()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.lu\n",
    "torch.lu(A, pivot=True, get_infos=False, out=None) → Tensor, IntTensor  \n",
    "返回一个元组，包含LU 分解和 pivots。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.0776,  0.0252,  0.9465],\n",
       "         [ 0.5351,  0.9792,  0.6946],\n",
       "         [ 0.1453, -0.2859, -0.3821]],\n",
       "\n",
       "        [[-0.4581,  1.3442,  0.4680],\n",
       "         [ 1.9161,  0.9292, -0.6237],\n",
       "         [-0.2159, -1.0528,  1.3636]]])"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.randn(2, 3, 3)\n",
    "A"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.5351,  0.9792,  0.6946],\n",
       "         [ 0.2715, -0.5518, -0.5708],\n",
       "         [ 0.1451,  0.2117,  0.9666]],\n",
       "\n",
       "        [[ 1.9161,  0.9292, -0.6237],\n",
       "         [-0.2391,  1.5664,  0.3188],\n",
       "         [-0.1127, -0.6053,  1.4863]]])"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A_LU, pivots = torch.lu(A)\n",
    "A_LU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[2, 3, 3],\n",
       "        [2, 2, 3]], dtype=torch.int32)"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pivots"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.lu_solve\n",
    "torch.lu_solve(b, LU_data, LU_pivots, out=None) -> Tensor  \n",
    "返回线性方程组$Ax=b$的LU解。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(4.6290e-07)"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.randn(2, 3, 3)\n",
    "b = torch.randn(2, 3)\n",
    "A_LU = torch.lu(A)\n",
    "x = torch.lu_solve(b, *A_LU)\n",
    "torch.norm(torch.bmm(A, x.unsqueeze(2)) - b.unsqueeze(2))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.dot\n",
    "torch.dot(tensor1, tensor2) → float  \n",
    "计算两个张量的点乘(内乘)，两个张量都为1-D 向量。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(7.)"
      ]
     },
     "execution_count": 63,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.dot(torch.Tensor([2, 3]), torch.Tensor([2, 1]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.eig\n",
    "torch.eig(a, eigenvectors=False, out=None) -> (Tensor, Tensor)  \n",
    "计算实方阵 a 的特征值和特征向量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.gels\n",
    "torch.gels(B, A, out=None) → Tensor  \n",
    "对形如$m \\times n$的满秩矩阵 a 计算其最小二乘和最小范数问题的解。 如果$m \\geqslant n$ , gels 对最小二乘问题进行求解，即：$$\\begin{array}{ll}\n",
    "   \\min_X & \\|AX-B\\|_2.\n",
    "   \\end{array}$$\n",
    "如果$m<n$, gels 求解最小范数问题，即：$$ \\begin{array}{ll}\n",
    "   \\min_X & \\|X\\|_2 & \\text{subject to} & AX = B.\n",
    "   \\end{array}$$\n",
    "返回矩阵X的前n 行包含解。余下的行包含以下残差信息: 相应列从第n 行开始计算的每列的欧式距离。  \n",
    "注意： 返回矩阵总是被转置，无论输入矩阵的原始布局如何，总会被转置；即，总是有stride (1, m) 而不是 (m, 1)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 2.0000,  1.0000],\n",
       "        [ 1.0000,  1.0000],\n",
       "        [ 1.0000,  2.0000],\n",
       "        [10.9635,  4.8501],\n",
       "        [ 8.9332,  5.2418]])"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.tensor([[1., 1, 1],\n",
    "                  [2, 3, 4],\n",
    "                  [3, 5, 2],\n",
    "                  [4, 2, 5],\n",
    "                  [5, 4, 3]])\n",
    "B = torch.tensor([[-10., -3],\n",
    "                  [ 12, 14],\n",
    "                  [ 14, 12],\n",
    "                  [ 16, 16],\n",
    "                  [ 18, 16]])\n",
    "X, _ = torch.gels(B, A)\n",
    "X"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.ger\n",
    "torch.ger(vec1, vec2, out=None) → Tensor  \n",
    "计算两向量 vec1 , vec2 的张量积。如果 vec1 的长度为 n , vec2 长度为 m ，则输出 out 应为形如n x m的矩阵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  2,  3],\n",
       "        [ 2,  4,  6],\n",
       "        [ 3,  6,  9],\n",
       "        [ 4,  8, 12]])"
      ]
     },
     "execution_count": 65,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "v1 = torch.arange(1, 5)\n",
    "v2 = torch.arange(1, 4)\n",
    "torch.ger(v1, v2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.solve\n",
    "torch.solve(B, A, out=None) -> (Tensor, Tensor)  \n",
    "返回线性方程组 AX = B 的解。  \n",
    "LU 包含两个矩阵L，U。A须为非奇异方阵，如果A是一个$m \\times m$ 矩阵，B 是$m \\times k$矩阵，则LU 是 $m \\times m$矩阵， X为$m \\times k$矩阵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(7.0977e-06)"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.tensor([[ 6.80, -2.11,  5.66,  5.97,  8.23],\n",
    "                  [-6.05, -3.30,  5.36, -4.44,  1.08],\n",
    "                  [-0.45,  2.58, -2.70,  0.27,  9.04],\n",
    "                  [ 8.32,  2.71,  4.35, -7.17,  2.14],\n",
    "                  [-9.67, -5.14, -7.26,  6.08, -6.87]]).t()\n",
    "B = torch.tensor([[ 4.02,  6.19, -8.22, -7.57, -3.03],\n",
    "                  [-1.56,  4.00, -8.67,  1.75,  2.86],\n",
    "                  [ 9.81, -4.09, -4.57, -8.61,  8.99]]).t()\n",
    "X, LU = torch.solve(B, A)\n",
    "torch.dist(B, torch.mm(A, X))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(3.2262e-06)"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.randn(2, 3, 1, 4, 4)\n",
    "B = torch.randn(2, 3, 1, 4, 6)\n",
    "X, LU = torch.solve(B, A)\n",
    "torch.dist(B, A.matmul(X))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.inverse\n",
    "torch.inverse(input, out=None) → Tensor  \n",
    "对方阵输入 input 取逆。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1.0000e+00,  2.3842e-07, -5.5879e-08,  1.1921e-07],\n",
       "        [ 0.0000e+00,  1.0000e+00, -5.2154e-08,  1.1921e-07],\n",
       "        [ 4.7684e-07,  2.3842e-07,  1.0000e+00,  0.0000e+00],\n",
       "        [ 4.7684e-07,  2.3842e-07, -1.1176e-07,  1.0000e+00]])"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.rand(4, 4)\n",
    "y = torch.inverse(x)\n",
    "z = torch.mm(x, y)\n",
    "z"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(4.7684e-07)"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.max(torch.abs(z - torch.eye(4)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(2.8610e-06)"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.randn(2, 3, 4, 4)\n",
    "y = torch.inverse(x)\n",
    "z = torch.matmul(x, y)\n",
    "torch.max(torch.abs(z - torch.eye(4).expand_as(x)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.mm\n",
    "torch.mm(mat1, mat2, out=None) → Tensor  \n",
    "对矩阵 mat1 和 mat2 进行相乘。 如果 mat1 是一个$n \\times m$ 张量， mat2 是一个$m \\times p$ 张量，将会输出一个$n \\times p$张量 out 。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1.8535, -1.2918, -3.9691],\n",
       "        [-2.9728,  0.4596, -5.2783]])"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mat1 = torch.randn(2, 3)\n",
    "mat2 = torch.randn(3, 3)\n",
    "torch.mm(mat1, mat2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.mv\n",
    "torch.mv(mat, vec, out=None) → Tensor  \n",
    "对矩阵 mat 和向量 vec 进行相乘。 如果 mat 是一个$n \\times m$ 张量， vec 是一个$m$元 1维张量，将会输出一个$n$元 1维张量。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.4932, -0.2171])"
      ]
     },
     "execution_count": 72,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mat = torch.randn(2, 3)\n",
    "vec = torch.randn(3)\n",
    "torch.mv(mat, vec)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.qr\n",
    "torch.qr(input, out=None) -> (Tensor, Tensor)  \n",
    "计算输入矩阵的QR分解：返回两个矩阵$q,r$，使得$x=q*r$，这里$q$是一个半正交矩阵与$r$是一个上三角矩阵  \n",
    "本函数返回一个thin(reduced)QR分解。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.8571,  0.3943,  0.3314],\n",
       "        [-0.4286, -0.9029, -0.0343],\n",
       "        [ 0.2857, -0.1714,  0.9429]])"
      ]
     },
     "execution_count": 73,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])\n",
    "q, r = torch.qr(a)\n",
    "q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ -14.0000,  -21.0000,   14.0000],\n",
       "        [   0.0000, -175.0000,   70.0000],\n",
       "        [   0.0000,    0.0000,  -35.0000]])"
      ]
     },
     "execution_count": 74,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "r"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 12., -51.,   4.],\n",
       "        [  6., 167., -68.],\n",
       "        [ -4.,  24., -41.]])"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.mm(q, r).round()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 0., 0.],\n",
       "        [0., 1., -0.],\n",
       "        [0., -0., 1.]])"
      ]
     },
     "execution_count": 76,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.mm(q.t(), q).round()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.svd\n",
    "torch.svd(input, some=True, out=None) -> (Tensor, Tensor, Tensor)  \n",
    "返回对形如$n \\times m$的实矩阵 A 进行奇异值分解的结果，使得$A = USV'*$。$U$形状为$n \\times n$，$S$形状为$n \\times m$，$V$形状为$m \\times m$。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.5911,  0.2632,  0.3554,  0.3143,  0.2299],\n",
       "        [-0.3976,  0.2438, -0.2224, -0.7535, -0.3636],\n",
       "        [-0.0335, -0.6003, -0.4508,  0.2334, -0.3055],\n",
       "        [-0.4297,  0.2362, -0.6859,  0.3319,  0.1649],\n",
       "        [-0.4697, -0.3509,  0.3874,  0.1587, -0.5183],\n",
       "        [ 0.2934,  0.5763, -0.0209,  0.3791, -0.6526]])"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[8.79,  6.11, -9.15,  9.57, -3.49,  9.84],\n",
    "                  [9.93,  6.91, -7.93,  1.64,  4.02,  0.15],\n",
    "                  [9.83,  5.04,  4.86,  8.83,  9.80, -8.99],\n",
    "                  [5.45, -0.27,  4.85,  0.74, 10.00, -6.02],\n",
    "                  [3.16,  7.98,  3.01,  5.80,  4.27, -5.31]]).t()\n",
    "u, s, v = torch.svd(a)\n",
    "u"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([27.4687, 22.6432,  8.5584,  5.9857,  2.0149])"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2514,  0.8148, -0.2606,  0.3967, -0.2180],\n",
       "        [-0.3968,  0.3587,  0.7008, -0.4507,  0.1402],\n",
       "        [-0.6922, -0.2489, -0.2208,  0.2513,  0.5891],\n",
       "        [-0.3662, -0.3686,  0.3859,  0.4342, -0.6265],\n",
       "        [-0.4076, -0.0980, -0.4933, -0.6227, -0.4396]])"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.4596e-05)"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### torch.symeig\n",
    "torch.symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)  \n",
    "返回实对称矩阵 input 的特征值和特征向量。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-11.0656,  -6.2287,   0.8640,   8.8655,  16.0948])"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[ 1.96,  0.00,  0.00,  0.00,  0.00],\n",
    "                  [-6.49,  3.80,  0.00,  0.00,  0.00],\n",
    "                  [-0.47, -6.39,  4.17,  0.00,  0.00],\n",
    "                  [-7.20,  1.50, -1.51,  5.70,  0.00],\n",
    "                  [-0.65, -6.34,  2.67,  1.80, -7.10]]).t()\n",
    "e, v = torch.symeig(a, eigenvectors=True)\n",
    "e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2981, -0.6075,  0.4026, -0.3745,  0.4896],\n",
       "        [-0.5078, -0.2880, -0.4066, -0.3572, -0.6053],\n",
       "        [-0.0816, -0.3843, -0.6600,  0.5008,  0.3991],\n",
       "        [-0.0036, -0.4467,  0.4553,  0.6204, -0.4564],\n",
       "        [-0.8041,  0.4480,  0.1725,  0.3108,  0.1622]])"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "v"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
