{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([1., 2., 3., 4., 5.]), tensor([1., 1., 1., 1., 1.]))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([1.,2,3,4,5])\n",
    "b = torch.ones(5)\n",
    "a, b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1., 2., 3., 4., 5.])"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a*b"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "dot中，向量a,b必须数据类型一致，不然会报错"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(15.)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.dot(a, b)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "`torch.ones(5)`和`torch.ones(1,5)`不是一样的，前者是向量，后者是矩阵，在torch中矩阵和向量积需要用torch.mv(A,b) <font color=red>注意括号内顺序"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "注意<font color=red>矩阵A的列维度与向量的维度（长度）必须相同"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([1., 2., 3., 4., 5.]), tensor([[1., 1., 1., 1., 1.]]), tensor([15.]))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([1.,2,3,4,5])\n",
    "B = torch.ones(1,5)\n",
    "a, B, torch.mv(B, a)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "哈达玛积和矩阵乘法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[ 0.,  2.,  4.,  6.],\n",
       "         [ 8., 10., 12., 14.],\n",
       "         [16., 18., 20., 22.],\n",
       "         [24., 26., 28., 30.],\n",
       "         [32., 34., 36., 38.]]),\n",
       " tensor([[ 0.,  2.,  4.,  6.],\n",
       "         [ 8., 10., 12., 14.],\n",
       "         [16., 18., 20., 22.],\n",
       "         [24., 26., 28., 30.],\n",
       "         [32., 34., 36., 38.]]))"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.arange(20, dtype=torch.float32).reshape(5, 4)\n",
    "B = A.clone()  # 通过分配新内存，将A的一个副本分配给B\n",
    "A + A, A + B"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[  0.,   1.,   4.,   9.],\n",
       "        [ 16.,  25.,  36.,  49.],\n",
       "        [ 64.,  81., 100., 121.],\n",
       "        [144., 169., 196., 225.],\n",
       "        [256., 289., 324., 361.]])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A*B"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[  14.,   38.,   62.,   86.,  110.],\n",
       "        [  38.,  126.,  214.,  302.,  390.],\n",
       "        [  62.,  214.,  366.,  518.,  670.],\n",
       "        [  86.,  302.,  518.,  734.,  950.],\n",
       "        [ 110.,  390.,  670.,  950., 1230.]])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.mm(A,B.T)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "范数  矩阵和向量都用下面函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(49.6991)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.norm(A)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 练习"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "2.给出两个矩阵$ \\mathbf{A} $和$ \\mathbf{B}$, 显示转置的和等于和的转置： $\\mathbf{A}^{T}+\\mathbf{B}^{T}=\\left (  \\mathbf{A}+ \\mathbf{B} \\right )^{T}$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[ 0,  1,  2,  3,  4],\n",
       "         [ 5,  6,  7,  8,  9],\n",
       "         [10, 11, 12, 13, 14],\n",
       "         [15, 16, 17, 18, 19]]),\n",
       " tensor([[1., 1., 1., 1., 1.],\n",
       "         [1., 1., 1., 1., 1.],\n",
       "         [1., 1., 1., 1., 1.],\n",
       "         [1., 1., 1., 1., 1.]]))"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "A = torch.arange(20).reshape(4, 5)\n",
    "B = torch.ones(4, 5)\n",
    "A, B"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[True, True, True, True],\n",
       "        [True, True, True, True],\n",
       "        [True, True, True, True],\n",
       "        [True, True, True, True],\n",
       "        [True, True, True, True]])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(A.T+B.T) == (A+B).T"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "3.给定任意方矩阵$ \\mathbf{A} $，$\\mathbf{A}+\\mathbf{A}^{T}$总是对称的吗?为什么?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "是，$ \\mathbf{A}+ \\mathbf{A}^{T}$中，$\\mathbf{A}+\\mathbf{A}^{T}=\\begin{bmatrix}\n",
    "a_{11} &a_{12} & ... & a_{1n} \\\\ \n",
    "a_{21} & a_{22} & ... & a_{2n}\\\\ \n",
    "... & ... &  & \\\\ \n",
    "a_{n1} & a_{n2} & ... & a_{nn}\n",
    "\\end{bmatrix}+\\begin{bmatrix}\n",
    "a_{11} &a_{21} & ... & a_{n1} \\\\ \n",
    "a_{12} & a_{22} & ... & a_{n2}\\\\ \n",
    "... & ... &  & \\\\ \n",
    "a_{1n} & a_{2n} & ... & a_{nn}\n",
    "\\end{bmatrix}=\\begin{bmatrix}\n",
    "2a_{11} & a_{12}+a_{21} & ... & a_{1n}+a_{n1} \\\\ \n",
    "a_{21}+a_{12} & 2a_{22} & ... & a_{2n}+a_{n2}\\\\ \n",
    "... & ... &  & \\\\ \n",
    "a_{n1}+a_{1n} & a_{n2}+a_{2n} & ... & 2a_{nn}\n",
    "\\end{bmatrix}$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "4.我们在本节中定义了形状（2, 3, 4）的张量 X。len(X)的输出结果是什么？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[[ 0.,  1.,  2.,  3.],\n",
       "          [ 4.,  5.,  6.,  7.],\n",
       "          [ 8.,  9., 10., 11.]],\n",
       " \n",
       "         [[12., 13., 14., 15.],\n",
       "          [16., 17., 18., 19.],\n",
       "          [20., 21., 22., 23.]]]),\n",
       " 2)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X = torch.arange(24,dtype=torch.float32).reshape(2, 3, 4)\n",
    "X, len(X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "len()函数解释为返回对象中的项目的数量，当对象为字符串时，返回字符数。可以简单理解为X.shape[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "5.对于任意形状的张量X, len(X)是否总是对应于X特定轴的长度?这个轴是什么?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "回答见4"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "6.运行` A / A.sum(axis=1)`，看看会发生什么。你能分析原因吗？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "The size of tensor a (5) must match the size of tensor b (4) at non-singleton dimension 1",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-13-d5a3308fbb05>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mA\u001b[0m \u001b[1;33m/\u001b[0m \u001b[0mA\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mRuntimeError\u001b[0m: The size of tensor a (5) must match the size of tensor b (4) at non-singleton dimension 1"
     ]
    }
   ],
   "source": [
    "A / A.sum(axis=1,dtype=torch.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "结果报错，注意，这里A.sum()中是没有keepdims的，生成的结果为一个向量，此时向量的维度为4，而矩阵的第一维度为5（或者说默认方向维度是行5），如下示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([4, 5]), torch.Size([4, 1]))"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(A / A.sum(axis=1,keepdims=True)).shape, A.sum(axis=1, keepdims=True).shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "7.当你在曼哈顿的两点之间旅行时，你需要在坐标上走多远，也就是说，就大街和街道而言？你能斜着走吗？"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "答：走多远说的是$ \\mathbf{L}_{1} $范数，一般情况下都不能斜着走，但两点之间的距离是$ \\mathbf{L}_{2} $范数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "8.考虑一个具有形状（2, 3, 4）的张量，在轴 0,1,2 上的求和输出是什么形状?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "答：在轴0上的求和应该是(3，4)，在轴1上求和应该是(2,4)，同理在轴2上求和应该是（2,3），即对哪个轴求和，哪个轴就消失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([3, 4]), torch.Size([2, 4]), torch.Size([2, 3]))"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X.sum(axis=0).shape, X.sum(axis=1).shape, X.sum(axis=2).shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "9.向 linalg.norm 函数提供 3 个或更多轴的张量，并观察其输出。对于任意形状的张量这个函数计算得到什么?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(65.7571), tensor(65.7571))"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.norm(X), torch.sqrt((X*X).sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[-0.6655, -0.7277, -0.1966,  0.8641,  1.3353],\n",
       "          [ 1.1875, -1.5365,  0.4122,  0.0803,  1.9138],\n",
       "          [-0.6885, -0.8113, -0.4817,  1.0928, -0.3823],\n",
       "          [ 1.1893, -0.1677,  2.3397, -0.5767,  1.5040]],\n",
       "\n",
       "         [[ 1.2172, -0.2745,  0.6453,  1.7866,  1.9853],\n",
       "          [-1.1073, -0.7828, -0.4463,  0.7448,  0.6755],\n",
       "          [ 0.1423,  0.1076, -0.1484, -0.8892,  0.7744],\n",
       "          [-2.6724, -0.8906,  0.0701, -0.7599, -0.0271]],\n",
       "\n",
       "         [[-0.9870,  1.0865, -1.3783, -0.9343, -0.5668],\n",
       "          [ 2.4365,  0.2227, -0.4954,  0.0109, -1.2300],\n",
       "          [ 1.1985, -0.5945,  1.2207, -0.5351,  2.4445],\n",
       "          [-1.5237,  2.8896,  0.2409, -0.4454,  1.1582]]],\n",
       "\n",
       "\n",
       "        [[[ 0.1882,  2.8824,  0.5266,  0.3253, -0.0194],\n",
       "          [ 0.4221,  1.3551, -1.1100, -0.5666, -0.9929],\n",
       "          [ 1.2320, -0.2957, -1.0383, -0.3681,  0.3272],\n",
       "          [ 0.5801, -0.1794,  0.8545, -1.2033,  0.1634]],\n",
       "\n",
       "         [[-0.1640,  2.0294,  0.5090,  0.9106, -0.4326],\n",
       "          [-0.0731, -0.4452,  0.5146, -1.2028,  1.3484],\n",
       "          [ 0.7421, -1.4299,  0.1698, -1.3904, -1.0060],\n",
       "          [-0.4109, -1.0153, -0.5975, -0.1822, -0.1744]],\n",
       "\n",
       "         [[ 0.3435, -1.0287, -0.7543, -1.3132, -0.4028],\n",
       "          [-2.1239, -0.0400, -0.0158,  0.1607, -1.1304],\n",
       "          [ 1.2302, -1.0811, -1.2773, -0.8541,  0.9151],\n",
       "          [ 1.0242, -1.2774, -0.8714, -0.1384,  1.7008]]]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "C = torch.randn(2,3,4,5)\n",
    "C"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(11.7087), tensor(11.7087))"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.norm(C), torch.sqrt(pow(C,2).sum())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
