{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python37764bitd2lconda94fc7ab78ae34cabbef0e75f5636f253",
   "display_name": "Python 3.7.7 64-bit ('d2l': conda)"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "text": "\u001b[1;31mDocstring:\u001b[0m\ngather(input, dim, index, out=None, sparse_grad=False) -> Tensor\n\nGathers values along an axis specified by `dim`.\n\nFor a 3-D tensor the output is specified by::\n\n    out[i][j][k] = input[index[i][j][k]][j][k]  # if dim == 0\n    out[i][j][k] = input[i][index[i][j][k]][k]  # if dim == 1\n    out[i][j][k] = input[i][j][index[i][j][k]]  # if dim == 2\n\nIf :attr:`input` is an n-dimensional tensor with size\n:math:`(x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`\nand ``dim = i``, then :attr:`index` must be an :math:`n`-dimensional tensor with\nsize :math:`(x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})` where :math:`y \\geq 1`\nand :attr:`out` will have the same size as :attr:`index`.\n\nArgs:\n    input (Tensor): the source tensor\n    dim (int): the axis along which to index\n    index (LongTensor): the indices of elements to gather\n    out (Tensor, optional): the destination tensor\n    sparse_grad(bool,optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.\n\nExample::\n\n    >>> t = torch.tensor([[1,2],[3,4]])\n    >>> torch.gather(t, 1, torch.tensor([[0,0],[1,0]]))\n    tensor([[ 1,  1],\n            [ 4,  3]])\n\u001b[1;31mType:\u001b[0m      builtin_function_or_method\n"
    }
   ],
   "source": [
    "import torch\n",
    "torch.gather??"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "tensor([[1, 1],\n        [4, 3]])"
     },
     "metadata": {},
     "execution_count": 8
    }
   ],
   "source": [
    "t = torch.tensor([[1,2],\n",
    "                  [3,4]])\n",
    "torch.gather(t, 1, torch.tensor([[0,0],[1,0]]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "gather的作用是这样的，index实际上是索引，具体是行还是列的索引要看前面dim 的指定，比如对于我们的栗子，【1,2,3;4,5,6,】，\n",
    "- 指定dim=1，也就是横向，那么索引就是列号。index的大小就是输出的大小，\n",
    "- 所以比如index是【1,0;0,0】，那么看index第一行，1列指的是2， 0列指的是1，同理，第二行为4，4 。这样就输出为【2,1;4,4】"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[2., 1.],\n        [4., 4.]])\n"
    }
   ],
   "source": [
    "b = torch.Tensor([[1,2,3],\n",
    "                  [4,5,6]])\n",
    "index_3 = torch.tensor([[1,0],\n",
    "                        [0,0]])\n",
    "print( torch.gather(b, dim=1, index=index_3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[1., 2.],\n        [6., 4.]])\n"
    }
   ],
   "source": [
    "index_1 = torch.tensor([[0,1],\n",
    "                        [2,0]])\n",
    "print( torch.gather(b, dim=1, index=index_1))\n",
    "#dim=1;看列，索引是列号\n",
    "#[0,1],即输出第1行填充的是原矩阵 第1行，第0，1列的数据：1，2\n",
    "#[0,0],即输出第2行填充的是原矩阵 第2行，第0，0列的数据：6，4\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[1., 5., 3.],\n        [1., 2., 6.]])\n"
    }
   ],
   "source": [
    "index_2 = torch.tensor([[0,1,0],\n",
    "                        [0,0,1]])\n",
    "print( torch.gather(b, dim=0, index=index_2))\n",
    "#dim=0;看行，索引是行号，竖向看\n",
    "#[[0,0]],即输出第1列，填充的是原矩阵第1列，第0，0行的数据1，1\n",
    "#[[1,0]],即输出第2列，填充的是原矩阵第2列，第1，0行的数据5，2\n",
    "#[[0,1]],即输出第3列，填充的是原矩阵第3列，第0，1行的数据3，6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}