{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python37764bitd2lconda94fc7ab78ae34cabbef0e75f5636f253",
   "display_name": "Python 3.7.7 64-bit ('d2l': conda)"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor(0.)\n"
    }
   ],
   "source": [
    "import torch\n",
    "other=torch.tensor(0.0)\n",
    "print(other)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = torch.tensor([[-1,0,1],[-2,0,2]], dtype=torch.float)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[0., 0., 1.],\n        [0., 0., 2.]])\n"
    }
   ],
   "source": [
    "y = torch.max(input=X, other=other)\n",
    "print(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[0., 0., 1.],\n        [0., 0., 2.]])\n"
    }
   ],
   "source": [
    "y2 = torch.max(X, other)\n",
    "print(y2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[0., 0., 0.],\n        [0., 0., 0.]])\n"
    }
   ],
   "source": [
    "a = torch.zeros_like(X)\n",
    "print(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[0., 0., 1.],\n        [0., 0., 2.]])\n"
    }
   ],
   "source": [
    "y3 = torch.max(X, a)\n",
    "print(y3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Help on built-in function max:\n\nmax(...)\n    max(input) -> Tensor\n    \n    Returns the maximum value of all elements in the ``input`` tensor.\n    \n    .. warning::\n        This function produces deterministic (sub)gradients unlike ``max(dim=0)``\n    \n    Args:\n        input (Tensor): the input tensor.\n    \n    Example::\n    \n        >>> a = torch.randn(1, 3)\n        >>> a\n        tensor([[ 0.6763,  0.7445, -2.2369]])\n        >>> torch.max(a)\n        tensor(0.7445)\n    \n    .. function:: max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)\n    \n    Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum\n    value of each row of the :attr:`input` tensor in the given dimension\n    :attr:`dim`. And ``indices`` is the index location of each maximum value found\n    (argmax).\n    \n    .. warning::\n        ``indices`` does not necessarily contain the first occurrence of each\n        maximal value found, unless it is unique.\n        The exact implementation details are device-specific.\n        Do not expect the same result when run on CPU and GPU in general.\n        For the same reason do not expect the gradients to be deterministic.\n    \n    If ``keepdim`` is ``True``, the output tensors are of the same size\n    as ``input`` except in the dimension ``dim`` where they are of size 1.\n    Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting\n    in the output tensors having 1 fewer dimension than ``input``.\n    \n    Args:\n        input (Tensor): the input tensor.\n        dim (int): the dimension to reduce.\n        keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.\n        out (tuple, optional): the result tuple of two output tensors (max, max_indices)\n    \n    Example::\n    \n        >>> a = torch.randn(4, 4)\n        >>> a\n        tensor([[-1.2360, -0.2942, -0.1222,  0.8475],\n                [ 1.1949, -1.1127, -2.2379, -0.6702],\n                [ 1.5717, -0.9207,  0.1297, -1.8768],\n                [-0.6172,  1.0036, -0.6060, -0.2432]])\n        >>> torch.max(a, 1)\n        torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))\n    \n    .. function:: max(input, other, out=None) -> Tensor\n    \n    Each element of the tensor ``input`` is compared with the corresponding\n    element of the tensor ``other`` and an element-wise maximum is taken.\n    \n    The shapes of ``input`` and ``other`` don't need to match,\n    but they must be :ref:`broadcastable <broadcasting-semantics>`.\n    \n    .. math::\n        \\text{out}_i = \\max(\\text{tensor}_i, \\text{other}_i)\n    \n    .. note:: When the shapes do not match, the shape of the returned output tensor\n              follows the :ref:`broadcasting rules <broadcasting-semantics>`.\n    \n    Args:\n        input (Tensor): the input tensor.\n        other (Tensor): the second input tensor\n        out (Tensor, optional): the output tensor.\n    \n    Example::\n    \n        >>> a = torch.randn(4)\n        >>> a\n        tensor([ 0.2942, -0.7416,  0.2653, -0.1584])\n        >>> b = torch.randn(4)\n        >>> b\n        tensor([ 0.8722, -1.7421, -0.4141, -0.5055])\n        >>> torch.max(a, b)\n        tensor([ 0.8722, -0.7416,  0.2653, -0.1584])\n\n"
    }
   ],
   "source": [
    "help(torch.max)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}