{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## nn.Embedding介绍\n",
    "- [https://pytorch.org/docs/stable/nn.html](https://pytorch.org/docs/stable/nn.html)\n",
    "----\n",
    "- pytorch里面实现word embedding是通过一个函数来实现的:nn.Embedding。Embedding的作用就是将词语向量化，通常会将词语表示为一个连续箱梁。\n",
    "- 官方介绍\n",
    "    - 一个简单的查找表，用于存储固定字典和大小的嵌入。\n",
    "    - 此模块通常用于存储单词嵌入并使用索引检索它们。模块的输入是索引列表，输出是相应的字嵌入。\n",
    "    \n",
    "- CLASS torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None)\n",
    "\n",
    "- 参数：\n",
    "    - num_embeddings (int) – 词典的大小，也就是词典有多少个词。比如你有一个30000的词典，这里就是30000.\n",
    "\n",
    "    - embedding_dim (int) – the size of each embedding vector。将词语表示成embedding_dim维的向量。指定向量的维度。\n",
    "\n",
    "    - padding_idx (int, optional) – If given, pads the output with the embedding vector at padding_idx (initialized to zeros) whenever it encounters the index.设置padding_idx后，padding_idx中的嵌入向量将初始化为全零。但是，请注意，之后可以修改该向量，例如，使用定制的初始化方法，从而改变用于填充输出的向量。嵌入中此向量的渐变始终为零。\n",
    "\n",
    "    - max_norm (float, optional) – If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm.\n",
    "    - norm_type (float, optional) – The p of the p-norm to compute for the max_norm option. Default 2.\n",
    "    - scale_grad_by_freq (boolean, optional) – If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False.\n",
    "    - sparse (bool, optional) – If True, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for more details regarding sparse gradients.\n",
    "- 看个例子吧"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.3746, -1.0797, -0.6317, -0.6281, -0.4120]],\n",
      "       grad_fn=<EmbeddingBackward>)\n"
     ]
    }
   ],
   "source": [
    "# 定义一个词典：word2idx = {'hello': 0, 'world': 1, '!':2}，每个单词我们需要用一个数字去表示它，这里对于hello这个词，用0来表示它。\n",
    "word2idx = {'hello': 0, 'world': 1, '!':2}\n",
    "\n",
    "# 定义Embedding层，这里的3表示词典共有3个词，5表示5维度，其实也就是一个3x5的矩阵.\n",
    "# 如果你有1000个词，每个词希望是100维，你就可以这样建立一个word embedding，nn.Embedding(1000, 100)。\n",
    "# 这就相当于词语和表示词语的向量建立了一张表，想知道一个词的向量表示可以通过这张表去查。\n",
    "embeds = nn.Embedding(3, 5)\n",
    "\n",
    "# 如何查询hello这个词的向量表示呢？\n",
    "\n",
    "# 通过词语在原来字典中的索引查词向量，hello的索引是0\n",
    "hello_idx = torch.LongTensor([word2idx['hello']])\n",
    "\n",
    "# 特别注意这里需要一个Variable，因为我们需要访问nn.Embedding里面定义的元素，且word embeding算是神经网络里面的参数，所以我们需要定义Variable\n",
    "hello_idx = Variable(hello_idx)\n",
    "# 现在输入Variable格式的索引就可以查看词向量了\n",
    "hello_embed = embeds(hello_idx)\n",
    "\n",
    "# 输出hello这个词的<初始词向量>\n",
    "print(hello_embed)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 注意\n",
    "- 注意这里的词向量的建立只是**初始的词向量**，并没有经过任何修改优化，我们需要建立神经网络通过learning修改word embedding里面的参数使得word embedding每一个词向量能够表示每一个不同的词。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## nn.LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "rnn = nn.LSTM(10, 20)     "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "input_data = torch.randn(5, 3, 10)  # seq_len, batch, input_size"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# h0 = torch.randn(1, 3, 20)   # 初始化\n",
    "# h0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# c0 = torch.randn(1, 3, 20)\n",
    "# c0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "output, (hn, cn) = rnn(input_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([5, 3, 20])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.1023, -0.2195, -0.3629, -0.0977, -0.1240, -0.1357, -0.2260, -0.0529,\n",
       "          0.0173,  0.0663,  0.0418, -0.0313, -0.1414,  0.1390,  0.0395, -0.2833,\n",
       "          0.0786, -0.0190,  0.0141, -0.0871],\n",
       "        [-0.0481, -0.1797, -0.0957, -0.0579,  0.0646, -0.1703,  0.0248, -0.1381,\n",
       "         -0.1028,  0.1341,  0.1645,  0.0634, -0.1689,  0.0230, -0.0375, -0.1674,\n",
       "         -0.0029, -0.0914,  0.0958, -0.0738],\n",
       "        [-0.0141, -0.0795,  0.0149,  0.0237,  0.0830, -0.2079,  0.2409, -0.0418,\n",
       "         -0.0247,  0.2118,  0.1507, -0.0559, -0.1114, -0.0887, -0.2693, -0.0222,\n",
       "          0.0394, -0.0820, -0.0806, -0.0231]], grad_fn=<SelectBackward>)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output[4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[-0.1023, -0.2195, -0.3629, -0.0977, -0.1240, -0.1357, -0.2260,\n",
       "          -0.0529,  0.0173,  0.0663,  0.0418, -0.0313, -0.1414,  0.1390,\n",
       "           0.0395, -0.2833,  0.0786, -0.0190,  0.0141, -0.0871],\n",
       "         [-0.0481, -0.1797, -0.0957, -0.0579,  0.0646, -0.1703,  0.0248,\n",
       "          -0.1381, -0.1028,  0.1341,  0.1645,  0.0634, -0.1689,  0.0230,\n",
       "          -0.0375, -0.1674, -0.0029, -0.0914,  0.0958, -0.0738],\n",
       "         [-0.0141, -0.0795,  0.0149,  0.0237,  0.0830, -0.2079,  0.2409,\n",
       "          -0.0418, -0.0247,  0.2118,  0.1507, -0.0559, -0.1114, -0.0887,\n",
       "          -0.2693, -0.0222,  0.0394, -0.0820, -0.0806, -0.0231]]],\n",
       "       grad_fn=<StackBackward>)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hn"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 结论\n",
    "- 我们可以看到hn是最后一次输出，output是所有输出。因此output的最后一个元素和hn是一样的。\n",
    "- cn是细胞状态tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 3, 20])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hn.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 3, 20])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cn.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[-0.2094, -0.5293, -0.7267, -0.1446, -0.1959, -0.2274, -0.4356,\n",
       "          -0.1363,  0.0553,  0.1174,  0.1804, -0.1037, -0.3693,  0.3561,\n",
       "           0.0795, -0.5906,  0.2023, -0.0374,  0.0280, -0.2041],\n",
       "         [-0.1044, -0.4315, -0.1768, -0.1072,  0.1064, -0.3795,  0.0522,\n",
       "          -0.3305, -0.2329,  0.2719,  0.3601,  0.1317, -0.4066,  0.0497,\n",
       "          -0.0728, -0.4711, -0.0061, -0.1587,  0.1639, -0.1596],\n",
       "         [-0.0240, -0.2553,  0.0266,  0.0601,  0.1633, -0.5237,  0.4116,\n",
       "          -0.0992, -0.0575,  0.3210,  0.2774, -0.1068, -0.2709, -0.1610,\n",
       "          -0.4528, -0.0570,  0.0676, -0.1944, -0.1193, -0.0365]]],\n",
       "       grad_fn=<StackBackward>)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "fc = nn.Linear(20, 20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "out = fc(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([5, 3, 20])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "out.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0.0896, -0.0571, -0.0152, -0.0691, -0.0199, -0.0406,  0.2308, -0.2142,\n",
       "        -0.1767, -0.0639, -0.0081,  0.1343, -0.1078, -0.2064, -0.1733, -0.1182,\n",
       "         0.1361, -0.2758,  0.1620,  0.1076], grad_fn=<SelectBackward>)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "out[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "rnn2 = nn.LSTM(10, 20, batch_first=True) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# bacth,seq_len, word_vec_dim\n",
    "input_data2 = torch.randn(32, 5, 10)  # hc层size(layer_num, bacth, out_dim)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "out, (hn, cn) = rnn2(input_data2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([32, 5, 20]) torch.Size([1, 32, 20]) torch.Size([1, 32, 20])\n"
     ]
    }
   ],
   "source": [
    "print(out.size(), hn.size(), cn.size())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.randn(2).unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.4607, -0.4461]])"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 两个形状相同的tonsor相乘"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = torch.tensor([[1,2,3], \n",
    "                  [2,3,4]])\n",
    "b = torch.tensor([[2,2,2], \n",
    "                  [2,2,1]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[2, 4, 6],\n",
       "        [4, 6, 4]])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c = a*b\n",
    "c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 1.0323,  0.1239,  1.1077,  0.2164,  0.4767,  0.4025,  0.2399,\n",
       "          -0.1330,  0.5671,  0.1008],\n",
       "         [ 0.0843,  0.7463,  0.6705,  0.1488,  0.6736, -0.2464, -0.5746,\n",
       "           0.6086,  0.9737,  0.0711],\n",
       "         [ 1.8035,  0.8987, -0.6655, -1.0680, -0.5026,  0.5141,  1.4208,\n",
       "           0.2101,  0.4502,  1.3422],\n",
       "         [ 1.0504,  1.3057,  0.2206, -1.7455, -1.2488, -0.4145,  0.8532,\n",
       "           0.8481,  0.6439,  0.8128],\n",
       "         [ 0.7198,  0.6862,  0.3618,  0.1609, -0.0922, -0.3626,  0.5601,\n",
       "          -0.0230, -1.4171, -0.9066]],\n",
       "\n",
       "        [[ 1.2316,  0.4093, -0.0454, -0.0887, -0.0333, -0.4516,  0.8812,\n",
       "           0.6673, -2.0286,  0.5245],\n",
       "         [-0.6719, -1.0660,  1.0303, -2.0076, -0.1587, -0.7049, -0.4192,\n",
       "          -1.6840, -0.0913, -0.8211],\n",
       "         [-0.0361,  1.6295, -0.2098, -0.5681,  0.6524,  0.3682, -1.0326,\n",
       "           0.8690, -0.8374, -0.7785],\n",
       "         [-1.1685, -0.4528,  0.8089, -0.6248, -1.4823,  1.7067,  0.0343,\n",
       "          -0.1233, -0.4694,  0.7405],\n",
       "         [ 1.4695, -0.7846,  1.3884,  1.2469,  0.4381, -0.4211,  1.2470,\n",
       "           1.1484, -1.1954,  1.2651]]])"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.randn(2, 5, 10)\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 5])"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c = a.sum(2)\n",
    "c.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[-0.1212, -1.1875, -0.8154, -0.2603,  0.1208],\n",
       "         [-1.7234,  0.9401,  0.5191, -1.1418,  0.0485],\n",
       "         [-0.5145,  0.5186,  0.4932,  0.5364, -0.8625]],\n",
       "\n",
       "        [[-0.2743, -0.4564,  1.8824,  0.5533,  2.1877],\n",
       "         [-0.9605,  1.0729,  0.4791, -0.9585, -0.0132],\n",
       "         [-1.3529, -0.8772,  0.2921, -0.8205, -0.1080]]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = torch.randn([2,3,5])\n",
    "x "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[0, 0, 0, 0, 1],\n",
       "         [0, 1, 1, 0, 1],\n",
       "         [0, 1, 1, 1, 0]],\n",
       "\n",
       "        [[0, 0, 1, 1, 1],\n",
       "         [0, 1, 1, 0, 0],\n",
       "         [0, 0, 1, 0, 0]]], dtype=torch.uint8)"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mask = (x>0)\n",
    "mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.1208, 0.9401, 0.5191, 0.0485, 0.5186, 0.4932, 0.5364, 1.8824, 0.5533,\n",
       "        2.1877, 1.0729, 0.4791, 0.2921])"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "d = x[mask]    # 压扁成一个列表\n",
    "d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "s = F.logsigmoid(d)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-0.4262)"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s.mean()   # 得到一个数字"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-1.1681)"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.log(1 - torch.sigmoid(d)).mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## nn.Parameter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "?nn.Parameter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([10])"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embed_dim = 10\n",
    "data = torch.randn(embed_dim)\n",
    "data.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "u = nn.Parameter(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Parameter containing:\n",
       "tensor([-0.6100,  0.1809,  0.7675, -0.5735,  0.1123,  0.4894, -0.8654, -1.0640,\n",
       "        -1.2618, -0.8684], requires_grad=True)"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "u"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.5720, -0.4873, -0.1925, -0.1972, -0.0626,  1.5768,  1.0680,\n",
       "           1.2321, -0.4564, -0.0703],\n",
       "         [ 0.5284, -0.7402,  0.4011,  0.0832, -1.9947, -0.9723,  0.9434,\n",
       "           0.4962,  0.3930,  0.5200],\n",
       "         [ 0.2165,  0.9300,  1.4055, -0.9435, -0.1495, -0.5030,  0.3633,\n",
       "           0.9413,  1.4385, -0.7606],\n",
       "         [ 0.2950,  0.9037, -0.9711,  0.0556, -1.5093, -1.8391, -0.6188,\n",
       "          -1.2471, -0.8059, -0.6884],\n",
       "         [ 1.0337, -0.6484,  2.1221,  0.9859,  2.1802, -2.2094, -0.0289,\n",
       "           0.8381,  0.3553,  0.2861],\n",
       "         [-0.1302,  1.4063,  0.4567, -1.1085,  1.9764, -0.1228,  0.9814,\n",
       "           0.9537, -0.6450,  0.7175],\n",
       "         [-0.1446, -0.1117,  0.0594, -1.7827, -0.4155, -0.0715,  0.0213,\n",
       "           0.7226,  0.6909,  1.2786],\n",
       "         [ 0.6332,  0.4564, -0.1636,  0.3665,  0.6061, -0.0314, -1.5227,\n",
       "           0.0369,  1.3917, -0.5979]],\n",
       "\n",
       "        [[-1.3836, -0.8652, -1.3644, -0.8995, -0.2949,  1.2752,  0.1303,\n",
       "          -0.1627, -0.0075,  0.5403],\n",
       "         [-0.6614,  0.1664, -1.5498,  0.5317,  0.2322, -1.9863, -1.3148,\n",
       "           1.4275,  0.2129, -0.4953],\n",
       "         [ 0.8388,  1.4253,  0.0090, -0.3771,  0.9039, -0.7850,  1.3202,\n",
       "          -0.2476, -0.6281, -0.1383],\n",
       "         [ 0.6462, -1.8107, -1.2915, -2.1775, -0.3828,  1.3362, -2.3330,\n",
       "          -0.5165, -1.5412,  0.8136],\n",
       "         [ 0.2981, -1.2633, -0.5012, -0.6578, -1.3874, -0.8003,  0.6939,\n",
       "          -0.1877, -0.8000,  0.0059],\n",
       "         [ 1.6177, -0.0819, -1.4117,  0.1513, -0.7835, -0.4023, -0.5751,\n",
       "           0.6236,  0.5185, -0.3257],\n",
       "         [-0.1068, -0.0104, -0.2874,  1.0380,  0.3855,  0.3236, -1.3724,\n",
       "           1.6858,  0.5404,  0.6867],\n",
       "         [-1.1484,  0.4463, -0.0490,  0.8154, -1.3611, -1.0358, -1.0115,\n",
       "          -1.4956, -2.3987, -0.7667]],\n",
       "\n",
       "        [[-0.7427,  0.3397,  1.4941, -0.3975,  0.6718,  1.2874,  0.5164,\n",
       "           0.7475,  1.2911, -0.7312],\n",
       "         [ 0.5112,  0.2203, -0.3803, -0.5714,  1.2393,  0.8208, -0.9187,\n",
       "          -0.0765,  0.6135, -0.7404],\n",
       "         [ 1.2199,  0.1526,  0.9363,  1.3509, -0.4017,  0.6033,  1.9226,\n",
       "           0.7188,  1.0180, -0.3175],\n",
       "         [-0.2175,  0.4603, -0.3155, -0.1717, -0.4508, -1.5037, -0.1207,\n",
       "           0.4838,  0.7382,  1.0409],\n",
       "         [-0.9812,  0.3711,  0.3704, -0.9340,  0.1421, -1.5631, -1.3793,\n",
       "          -0.6578, -1.3985, -0.4181],\n",
       "         [-0.3646, -1.0626,  0.4232,  0.9070, -2.3748,  0.1342, -0.0346,\n",
       "           0.3152,  0.8232, -1.2286],\n",
       "         [-0.4408, -0.4749, -0.8820, -0.4570,  0.7494, -0.8769, -0.9241,\n",
       "          -0.4344,  0.7485, -0.6219],\n",
       "         [ 1.4999, -0.0245, -0.8926, -0.4930, -0.1407, -1.3070,  1.9463,\n",
       "          -0.8084,  0.0198, -0.4131]],\n",
       "\n",
       "        [[ 1.6712,  0.5194,  0.8257, -1.2828, -1.3533,  0.5400,  1.3997,\n",
       "           2.2241,  0.3703,  0.0891],\n",
       "         [-0.1004,  0.3773,  2.5188,  0.2890,  0.9379,  0.7492,  0.9603,\n",
       "          -1.4914,  0.8701, -0.2691],\n",
       "         [ 0.0435, -0.1540,  0.3223, -1.1497, -0.4520,  0.3094,  0.3129,\n",
       "           0.4398, -0.1206,  0.2182],\n",
       "         [-1.1895, -3.0451,  1.2849, -0.6265, -0.7783,  1.2033, -0.3295,\n",
       "          -1.4985, -1.9538, -0.5790],\n",
       "         [-1.2533, -1.2236, -0.6427, -0.5228, -0.6071, -1.0755, -0.3622,\n",
       "          -1.4241, -1.7820,  0.3037],\n",
       "         [-1.5930, -0.3212, -1.1160, -0.5363, -1.2299, -0.6331, -0.4287,\n",
       "          -0.5349,  0.2315,  0.8747],\n",
       "         [-0.2205, -0.3187, -0.6099,  1.1537, -0.7896,  0.6231,  0.2791,\n",
       "          -1.3267, -1.3005,  0.5643],\n",
       "         [ 1.7372,  0.3188,  0.6492, -0.5857, -0.2604, -0.4235, -1.3535,\n",
       "           1.3899,  1.4772,  0.6812]]])"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embed = torch.randn(4, 8, 10)  #[batch, seq_len, embed]\n",
    "embed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([4, 8, 10])"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = u.repeat(embed.size(0), embed.size(1), 1)\n",
    "x.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2212, -0.4839, -0.1610,  0.2061, -0.2068, -0.0250, -0.2900, -0.0944],\n",
       "        [ 0.0531, -0.2761, -0.0455,  0.3600, -0.0608, -0.4872, -0.3845,  0.6251],\n",
       "        [ 0.0520,  0.1996, -0.5176, -0.5650,  0.6570, -0.0836,  0.0536, -0.3301],\n",
       "        [-0.3879,  0.2709,  0.0507,  0.6181,  0.4528, -0.0298,  0.2379, -0.4050]],\n",
       "       grad_fn=<DivBackward0>)"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cos = F.cosine_similarity(embed, x, dim=2)\n",
    "cos"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.1154, 0.0888, 0.1226, 0.1769, 0.1171, 0.1404, 0.1077, 0.1310],\n",
       "        [0.1270, 0.0913, 0.1150, 0.1725, 0.1133, 0.0740, 0.0820, 0.2249],\n",
       "        [0.1307, 0.1515, 0.0739, 0.0705, 0.2393, 0.1141, 0.1309, 0.0892],\n",
       "        [0.0724, 0.1398, 0.1122, 0.1979, 0.1677, 0.1035, 0.1353, 0.0711]],\n",
       "       grad_fn=<SoftmaxBackward>)"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "alpha = F.softmax(cos, dim=1)  # 每个序列一行，每每行的每个元素代表对应的单词的权重\n",
    "alpha"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.0660, grad_fn=<MulBackward0>) tensor(-0.0562, grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "d = alpha.unsqueeze(2)\n",
    "print(embed[0][0][0]*d[0][0][0],embed[0][0][1]*d[0][0][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.0660, -0.0562, -0.0222, -0.0228, -0.0072,  0.1820,  0.1233,\n",
       "           0.1422, -0.0527, -0.0081],\n",
       "         [ 0.0469, -0.0657,  0.0356,  0.0074, -0.1770, -0.0863,  0.0837,\n",
       "           0.0440,  0.0349,  0.0462],\n",
       "         [ 0.0265,  0.1140,  0.1723, -0.1157, -0.0183, -0.0617,  0.0445,\n",
       "           0.1154,  0.1763, -0.0932],\n",
       "         [ 0.0522,  0.1599, -0.1718,  0.0098, -0.2671, -0.3254, -0.1095,\n",
       "          -0.2207, -0.1426, -0.1218],\n",
       "         [ 0.1210, -0.0759,  0.2485,  0.1154,  0.2553, -0.2587, -0.0034,\n",
       "           0.0981,  0.0416,  0.0335],\n",
       "         [-0.0183,  0.1975,  0.0641, -0.1557,  0.2776, -0.0173,  0.1378,\n",
       "           0.1339, -0.0906,  0.1008],\n",
       "         [-0.0156, -0.0120,  0.0064, -0.1921, -0.0448, -0.0077,  0.0023,\n",
       "           0.0779,  0.0744,  0.1378],\n",
       "         [ 0.0830,  0.0598, -0.0214,  0.0480,  0.0794, -0.0041, -0.1995,\n",
       "           0.0048,  0.1823, -0.0783]],\n",
       "\n",
       "        [[-0.1756, -0.1098, -0.1732, -0.1142, -0.0374,  0.1619,  0.0165,\n",
       "          -0.0207, -0.0010,  0.0686],\n",
       "         [-0.0604,  0.0152, -0.1416,  0.0486,  0.0212, -0.1814, -0.1201,\n",
       "           0.1304,  0.0194, -0.0452],\n",
       "         [ 0.0965,  0.1640,  0.0010, -0.0434,  0.1040, -0.0903,  0.1519,\n",
       "          -0.0285, -0.0723, -0.0159],\n",
       "         [ 0.1115, -0.3124, -0.2228, -0.3757, -0.0661,  0.2306, -0.4025,\n",
       "          -0.0891, -0.2659,  0.1404],\n",
       "         [ 0.0338, -0.1431, -0.0568, -0.0745, -0.1572, -0.0907,  0.0786,\n",
       "          -0.0213, -0.0906,  0.0007],\n",
       "         [ 0.1196, -0.0061, -0.1044,  0.0112, -0.0580, -0.0298, -0.0425,\n",
       "           0.0461,  0.0383, -0.0241],\n",
       "         [-0.0087, -0.0009, -0.0236,  0.0851,  0.0316,  0.0265, -0.1125,\n",
       "           0.1382,  0.0443,  0.0563],\n",
       "         [-0.2583,  0.1004, -0.0110,  0.1834, -0.3062, -0.2330, -0.2275,\n",
       "          -0.3364, -0.5395, -0.1725]],\n",
       "\n",
       "        [[-0.0970,  0.0444,  0.1952, -0.0519,  0.0878,  0.1682,  0.0675,\n",
       "           0.0977,  0.1687, -0.0955],\n",
       "         [ 0.0774,  0.0334, -0.0576, -0.0866,  0.1877,  0.1243, -0.1392,\n",
       "          -0.0116,  0.0929, -0.1121],\n",
       "         [ 0.0902,  0.0113,  0.0692,  0.0999, -0.0297,  0.0446,  0.1421,\n",
       "           0.0531,  0.0753, -0.0235],\n",
       "         [-0.0153,  0.0325, -0.0222, -0.0121, -0.0318, -0.1060, -0.0085,\n",
       "           0.0341,  0.0520,  0.0734],\n",
       "         [-0.2348,  0.0888,  0.0886, -0.2235,  0.0340, -0.3740, -0.3301,\n",
       "          -0.1574, -0.3346, -0.1000],\n",
       "         [-0.0416, -0.1212,  0.0483,  0.1035, -0.2710,  0.0153, -0.0039,\n",
       "           0.0360,  0.0939, -0.1402],\n",
       "         [-0.0577, -0.0622, -0.1154, -0.0598,  0.0981, -0.1148, -0.1209,\n",
       "          -0.0569,  0.0980, -0.0814],\n",
       "         [ 0.1338, -0.0022, -0.0796, -0.0440, -0.0125, -0.1166,  0.1736,\n",
       "          -0.0721,  0.0018, -0.0368]],\n",
       "\n",
       "        [[ 0.1209,  0.0376,  0.0598, -0.0928, -0.0979,  0.0391,  0.1013,\n",
       "           0.1609,  0.0268,  0.0064],\n",
       "         [-0.0140,  0.0528,  0.3522,  0.0404,  0.1312,  0.1048,  0.1343,\n",
       "          -0.2086,  0.1217, -0.0376],\n",
       "         [ 0.0049, -0.0173,  0.0362, -0.1290, -0.0507,  0.0347,  0.0351,\n",
       "           0.0493, -0.0135,  0.0245],\n",
       "         [-0.2354, -0.6026,  0.2543, -0.1240, -0.1540,  0.2381, -0.0652,\n",
       "          -0.2965, -0.3866, -0.1146],\n",
       "         [-0.2102, -0.2052, -0.1078, -0.0877, -0.1018, -0.1804, -0.0608,\n",
       "          -0.2389, -0.2989,  0.0509],\n",
       "         [-0.1649, -0.0332, -0.1155, -0.0555, -0.1273, -0.0655, -0.0444,\n",
       "          -0.0554,  0.0240,  0.0906],\n",
       "         [-0.0298, -0.0431, -0.0825,  0.1561, -0.1068,  0.0843,  0.0378,\n",
       "          -0.1795, -0.1760,  0.0763],\n",
       "         [ 0.1236,  0.0227,  0.0462, -0.0417, -0.0185, -0.0301, -0.0963,\n",
       "           0.0989,  0.1051,  0.0485]]], grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 110,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embed*alpha.unsqueeze(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3618,  0.3213,  0.3114, -0.3055,  0.0978, -0.5791,  0.0793,  0.3957,\n",
       "          0.2237,  0.0167],\n",
       "        [-0.1417, -0.2927, -0.7323, -0.2796, -0.4680, -0.2061, -0.6581, -0.1812,\n",
       "         -0.8672,  0.0082],\n",
       "        [-0.1451,  0.0247,  0.1265, -0.2745,  0.0626, -0.3589, -0.2194, -0.0770,\n",
       "          0.2479, -0.5162],\n",
       "        [-0.4050, -0.7885,  0.4427, -0.3342, -0.5260,  0.2249,  0.0418, -0.6697,\n",
       "         -0.5975,  0.1450]], grad_fn=<SumBackward2>)"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y = torch.sum(embed*alpha.unsqueeze(2), dim=1)\n",
    "y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([4, 10])"
      ]
     },
     "execution_count": 91,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3618,  0.3213,  0.3114, -0.3055,  0.0978, -0.5791,  0.0793,  0.3957,\n",
       "          0.2237,  0.0167],\n",
       "        [-0.1417, -0.2927, -0.7323, -0.2796, -0.4680, -0.2061, -0.6581, -0.1812,\n",
       "         -0.8672,  0.0082],\n",
       "        [-0.1451,  0.0247,  0.1265, -0.2745,  0.0626, -0.3589, -0.2194, -0.0770,\n",
       "          0.2479, -0.5162],\n",
       "        [-0.4050, -0.7885,  0.4427, -0.3342, -0.5260,  0.2249,  0.0418, -0.6697,\n",
       "         -0.5975,  0.1450]], grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 92,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y.squeeze(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.4853],\n",
       "        [0.5154],\n",
       "        [0.4474],\n",
       "        [0.1978]], grad_fn=<AddmmBackward>)"
      ]
     },
     "execution_count": 97,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fx = nn.Linear(10,1)(y)\n",
    "fx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.6190],\n",
       "        [0.6261],\n",
       "        [0.6100],\n",
       "        [0.5493]], grad_fn=<SigmoidBackward>)"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "score = torch.sigmoid(fx) # 0-1之间的数\n",
    "score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.],\n",
       "        [1.],\n",
       "        [1.],\n",
       "        [1.]], grad_fn=<RoundBackward>)"
      ]
     },
     "execution_count": 106,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.round(score) #返回相邻最近的整数，四舍五入"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## torch.nn.functional.cosine_similarity计算方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.3437,  0.3895,  1.4366],\n",
      "        [-3.1125, -0.4749, -0.6235]]) tensor([[-0.9672,  0.4503, -0.3470],\n",
      "        [ 0.2393,  1.8958, -1.3947]])\n"
     ]
    }
   ],
   "source": [
    "x1 = torch.randn(2,3)\n",
    "x2 = torch.randn(2,3)\n",
    "print(x1, x2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-0.3825, -0.1021])"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "F.cosine_similarity(x1, x2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-0.3825)"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# cosine_similarity的计算公式\n",
    "sum(x1[0]*x2[0]) / (torch.norm(x1[0])*torch.norm(x2[0])+1e-8)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 截断函数\n",
    "----\n",
    "- torch.ceil(input, out=None)   #返回向正方向取得最小整数\n",
    "- torch.floor(input, out=None)  #返回向负方向取得最大整数\n",
    "\n",
    "- torch.round(input, out=None)  #返回相邻最近的整数，四舍五入\n",
    "\n",
    "- torch.trunc(input, out=None)  #返回整数部分数值\n",
    "- torch.frac(tensor, out=None)  #返回小数部分数值\n",
    "\n",
    "- torch.fmod(input, divisor, out=None)  #返回input/divisor的余数\n",
    "- torch.remainder(input, divisor, out=None)  #同上"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## torch.matmul\n",
    "- mm只能进行矩阵乘法,也就是输入的两个tensor维度只能是(n×m)(n\\times m)(n×m)和(m×p)(m\\times p)(m×p)\n",
    "- bmm是两个三维张量相乘, 两个tensor维度是(b×n×m)和(b×m×p)得到 (b×n×p), 第一维b代表batch_size。\n",
    "- matmul可以进行张量乘法, 输入可以是高维。\n",
    "\n",
    "- scores = torch.matmul(x, x.transpose(-2, -1)) / math.sqrt(d_k)\n",
    "\n",
    "- [torch.transpose](https://pytorch.org/docs/stable/torch.html#torch.transpose) # tonser的转置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.0608, -0.1447, -0.3057,  0.9118, -0.3778,  1.2587, -1.7109,\n",
       "          -1.0704,  0.4352, -2.0029],\n",
       "         [ 0.9735,  1.0512, -1.4398, -0.6600,  0.3955,  0.3646,  0.0474,\n",
       "          -1.5265,  0.0718,  0.4676],\n",
       "         [-0.5715, -1.2914,  0.1073, -0.3753,  0.5765, -0.9402, -1.1904,\n",
       "           0.5456,  0.1140, -0.1685],\n",
       "         [ 0.4498,  0.7682,  0.2298,  0.6669, -1.0050,  1.4804,  0.5312,\n",
       "           1.0791, -0.7552, -0.3538],\n",
       "         [-0.8476, -0.7348, -0.9190,  0.1542,  0.4850, -0.5129,  3.2134,\n",
       "           1.1763,  0.1987,  1.1999]],\n",
       "\n",
       "        [[ 1.1443,  0.6204,  0.6688, -0.7729,  0.1621,  0.0467,  0.8106,\n",
       "           1.8824, -0.2517,  1.4625],\n",
       "         [ 0.7914,  0.0971,  0.2454,  0.2563,  1.5868, -0.2585, -0.5065,\n",
       "          -1.3138, -0.2943,  0.3584],\n",
       "         [ 1.2666,  0.3284,  1.6281,  0.7337, -1.0108,  0.1650, -0.0435,\n",
       "           0.9840, -0.8043,  0.7147],\n",
       "         [-1.0370,  0.2351, -1.2820,  0.9013,  1.2597, -1.7748, -2.1565,\n",
       "           0.6012,  2.0741, -0.4460],\n",
       "         [ 1.0765, -0.1866, -1.2976, -0.2885,  0.0313, -0.0154,  1.1737,\n",
       "          -1.2480,  1.9115,  1.0470]]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x1 = torch.randn(2, 5, 10)\n",
    "x1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.0608,  0.9735, -0.5715,  0.4498, -0.8476],\n",
       "         [-0.1447,  1.0512, -1.2914,  0.7682, -0.7348],\n",
       "         [-0.3057, -1.4398,  0.1073,  0.2298, -0.9190],\n",
       "         [ 0.9118, -0.6600, -0.3753,  0.6669,  0.1542],\n",
       "         [-0.3778,  0.3955,  0.5765, -1.0050,  0.4850],\n",
       "         [ 1.2587,  0.3646, -0.9402,  1.4804, -0.5129],\n",
       "         [-1.7109,  0.0474, -1.1904,  0.5312,  3.2134],\n",
       "         [-1.0704, -1.5265,  0.5456,  1.0791,  1.1763],\n",
       "         [ 0.4352,  0.0718,  0.1140, -0.7552,  0.1987],\n",
       "         [-2.0029,  0.4676, -0.1685, -0.3538,  1.1999]],\n",
       "\n",
       "        [[ 1.1443,  0.7914,  1.2666, -1.0370,  1.0765],\n",
       "         [ 0.6204,  0.0971,  0.3284,  0.2351, -0.1866],\n",
       "         [ 0.6688,  0.2454,  1.6281, -1.2820, -1.2976],\n",
       "         [-0.7729,  0.2563,  0.7337,  0.9013, -0.2885],\n",
       "         [ 0.1621,  1.5868, -1.0108,  1.2597,  0.0313],\n",
       "         [ 0.0467, -0.2585,  0.1650, -1.7748, -0.0154],\n",
       "         [ 0.8106, -0.5065, -0.0435, -2.1565,  1.1737],\n",
       "         [ 1.8824, -1.3138,  0.9840,  0.6012, -1.2480],\n",
       "         [-0.2517, -0.2943, -0.8043,  2.0741,  1.9115],\n",
       "         [ 1.4625,  0.3584,  0.7147, -0.4460,  1.0470]]])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x2 = x1.transpose(-2, -1)\n",
    "x2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[10.9509,  0.7026,  0.2158,  1.0130, -9.4263],\n",
       "         [ 0.7026,  7.4071, -2.8955, -1.2250, -1.4394],\n",
       "         [ 0.2158, -2.8955,  5.1194, -3.5163, -1.3244],\n",
       "         [ 1.0130, -1.2250, -3.5163,  6.6340,  0.1013],\n",
       "         [-9.4263, -1.4394, -1.3244,  0.1013, 15.8137]],\n",
       "\n",
       "        [[ 9.1703, -1.1083,  5.0835, -4.2642,  0.1277],\n",
       "         [-1.1083,  5.5442, -0.8025,  1.1083,  1.3530],\n",
       "         [ 5.0835, -0.8025,  8.0778, -5.5296, -3.1245],\n",
       "         [-4.2642,  1.1083, -5.5296, 17.8355,  0.5264],\n",
       "         [ 0.1277,  1.3530, -3.1245,  0.5264, 10.6467]]])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y = torch.matmul(x1, x2) #2, 5,10 * 2, 10, 5\n",
    "y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 5, 5])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 5.4754,  0.3513,  0.1079,  0.5065, -4.7131],\n",
       "         [ 0.3513,  3.7035, -1.4478, -0.6125, -0.7197],\n",
       "         [ 0.1079, -1.4478,  2.5597, -1.7581, -0.6622],\n",
       "         [ 0.5065, -0.6125, -1.7581,  3.3170,  0.0506],\n",
       "         [-4.7131, -0.7197, -0.6622,  0.0506,  7.9068]],\n",
       "\n",
       "        [[ 4.5851, -0.5541,  2.5418, -2.1321,  0.0639],\n",
       "         [-0.5541,  2.7721, -0.4012,  0.5542,  0.6765],\n",
       "         [ 2.5418, -0.4012,  4.0389, -2.7648, -1.5622],\n",
       "         [-2.1321,  0.5542, -2.7648,  8.9177,  0.2632],\n",
       "         [ 0.0639,  0.6765, -1.5622,  0.2632,  5.3234]]])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y / 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = '微信 是否 会 收费 ？ 近日 工信部 和 腾讯 公司 的 不同 回应 让 微信 的 未来 显得 扑朔迷离 。 随着 3G 网络 的 普及 ， 许多 国家 和 地区 都 有 像 “ 微信 ” 这样 能够 实现 即时通讯 、 通话 的 手机 应用 。 “ 微信 ” 在 国外 什么样 ？ 它们 收费 吗 ？ 一张 图带 你 了解 海外 “ 微信 ” 。 ____ ____ '  \n",
    "b = '“ 微信 ” 在 海外'\n",
    "c = '[ 话筒 ] “ 微信 ” 在 国外 什么样 ？ 它们 收费 吗 ？ [ 话筒 ] [ 思考 ] [ 吃惊 ] [ 吃惊 ]'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------------------------------------------------------------------------------------------------------------\n",
      "原   文:  \u001b[01;34m 微信 是否 会 收费 ？ 近日 工信部 和 腾讯 公司 的 不同 回应 让 微信 的 未来 显得 扑朔迷离 。 随着 3G 网络 的 普及 ， 许多 国家 和 地区 都 有 像 “ 微信 ” 这样 能够 实现 即时通讯 、 通话 的 手机 应用 。 “ 微信 ” 在 国外 什么样 ？ 它们 收费 吗 ？ 一张 图带 你 了解 海外 “ 微信 ” 。 ____ ____  \u001b[0m\n",
      "\n",
      "参考摘要: \u001b[01;35m “ 微信 ” 在 海外 \u001b[0m\n",
      "\n",
      "生成摘要: \u001b[01;36m [ 话筒 ] “ 微信 ” 在 国外 什么样 ？ 它们 收费 吗 ？ [ 话筒 ] [ 思考 ] [ 吃惊 ] [ 吃惊 ] \u001b[0m\n",
      "\n",
      "--------------------------------------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "print('-' * 110)\n",
    "print('{}  \\033[01;34m {} \\033[0m\\n'.format('原   文:', a))\n",
    "print('{} \\033[01;35m {} \\033[0m\\n'.format('参考摘要:', b))\n",
    "print('{} \\033[01;36m {} \\033[0m\\n'.format('生成摘要:', c))\n",
    "print('-' * 110)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "a = '日前 ， 教育部 公布 2012 年度 普通 高等学校 本科专业 设置 备案 或 审批 结果 。 全国 高校 258 个 专业 未 通过 审批 ， 且 很多 是 当下 热门 专业 ， 如 法学 、 会计 、 工商管理 等 ， 有些 专业 已经 被 列入 教育部 的 预警 专业 。 教育界 人士 分析 ， 人才 市场 的 需求 已 开始 出现 “ 供大于求 ” 现象 。'  \n",
    "b = '258 个 专业 被 否决 !!____!! !!____!! 部分 热门 专业 遭 “ 预警 ”'\n",
    "c = '教育部 ： 全国 高校 258 个 专业 未 通过 审批 ， 且 很多 是 当下 热门 专业 ， 如 法学 、 会计 、 工商管理 等 专业 已经 审批 结果'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------------------------------------------------------------------------------------------------------------\n",
      "原   文:  \u001b[01;34m 日前 ， 教育部 公布 2012 年度 普通 高等学校 本科专业 设置 备案 或 审批 结果 。 全国 高校 258 个 专业 未 通过 审批 ， 且 很多 是 当下 热门 专业 ， 如 法学 、 会计 、 工商管理 等 ， 有些 专业 已经 被 列入 教育部 的 预警 专业 。 教育界 人士 分析 ， 人才 市场 的 需求 已 开始 出现 “ 供大于求 ” 现象 。 \u001b[0m\n",
      "\n",
      "参考摘要: \u001b[01;35m 258 个 专业 被 否决 !!____!! !!____!! 部分 热门 专业 遭 “ 预警 ” \u001b[0m\n",
      "\n",
      "生成摘要: \u001b[01;36m 教育部 ： 全国 高校 258 个 专业 未 通过 审批 ， 且 很多 是 当下 热门 专业 ， 如 法学 、 会计 、 工商管理 等 专业 已经 审批 结果 \u001b[0m\n",
      "\n",
      "--------------------------------------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "print('-' * 110)\n",
    "print('{}  \\033[01;34m {} \\033[0m\\n'.format('原   文:', a))\n",
    "print('{} \\033[01;35m {} \\033[0m\\n'.format('参考摘要:', b))\n",
    "print('{} \\033[01;36m {} \\033[0m\\n'.format('生成摘要:', c))\n",
    "print('-' * 110)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
