{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-12-14T07:18:03.635476Z",
     "start_time": "2024-12-14T07:18:02.693595Z"
    }
   },
   "source": [
    "# pytorch  nn.Embedding详解\n",
    "# https://blog.csdn.net/lsb2002/article/details/132993128\n",
    "# embedding如何处理文本\n",
    "\n",
    "# 一、nn.Embedding词向量转化\n",
    "# 在PyTorch中，nn.Embedding用来实现词与词向量的映射。nn.Embedding具有一个权重（.weight），形状是(num_words, embedding_dim)。例如一共有100个词，每个词用16维向量表征，对应的权重就是一个100×16的矩阵。\n",
    "# \n",
    "# Embedding的输入形状N×W，N是batch size，W是序列的长度，输出的形状是N×W×embedding_dim。\n",
    "# \n",
    "# Embedding输入必须是LongTensor，FloatTensor需通过tensor.long()方法转成LongTensor。\n",
    "# \n",
    "# Embedding的权重是可以训练的，既可以采用随机初始化，也可以采用预训练好的词向量初始化。\n",
    "\n",
    "\n",
    "# 二、embedding如何处理文本\n",
    "# 在NLP任务中，首先要对文本进行处理，将文本进行编码转换，形成向量表达，embedding处理文本的流程如下：\n",
    "# \n",
    "# （1）输入一段文本，中文会先分词（如jieba分词），英文会按照空格提取词\n",
    "# \n",
    "# （2）首先将单词转成字典的形式，由于英语中以空格为词的分割，所以可以直接建立词典索引结构。类似于：word2id = {'i' : 1, 'like' : 2, 'you' : 3, 'want' : 4, 'an' : 5, 'apple' : 6} 这样的形式。如果是中文的话，首先进行分词操作。\n",
    "# \n",
    "# （3）然后再以句子为list，为每个句子建立索引结构，list [ [ sentence1 ] , [ sentence2 ] ] 。以上面字典的索引来说，最终建立的就是 [ [ 1 , 2 , 3 ] , [ 1 , 4 , 5 , 6 ] ] 。这样长短不一的句子\n",
    "# \n",
    "# （4）接下来要进行padding的操作。由于tensor结构中都是等长的，所以要对上面那样的句子做padding操作后再利用 nn.Embedding 来进行词的初始化。padding后的可能是这样的结构\n",
    "# \n",
    "# [ [ 1 , 2 , 3, 0 ] , [ 1 , 4 , 5 , 6 ] ] 。其中0作为填充。（注意：由于在NMT任务中肯定存在着填充问题，所以在embedding时一定存在着第三个参数，让某些索引下的值为0，代表无实际意义的填充）\n",
    "\n",
    "# 注意：\n",
    "# 句子中的ID不能大于最大词的index（上面例子中，不能大于10）\n",
    "# embeding的输入必须是维度对齐的，如果长度不够，需要预先做填充\n",
    "\n",
    "\n",
    "import torch\n",
    "from torch import nn\n",
    " \n",
    "# 创建最大词个数为10，每个词用维度为4表示\n",
    "embedding = nn.Embedding(10, 512)\n",
    " \n",
    "# 将第一个句子填充0，与第二个句子长度对齐，batch_size * seq_len （2 * 6），1、2、3...表示一个一个的token\n",
    "in_vector = torch.LongTensor([[1, 2, 3, 4, 0, 0], [1, 2, 5, 6, 5, 7]])\n",
    "out_emb = embedding(in_vector)  # 将输入向量进行词嵌入之后，就得到了嵌入向量 batch_size * seq_len * d_model\n",
    "print(in_vector.shape)\n",
    "print((out_emb.shape))\n",
    "print(out_emb)\n",
    "print(embedding.weight)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 6])\n",
      "torch.Size([2, 6, 512])\n",
      "tensor([[[ 0.3131,  1.2353,  2.1049,  ..., -0.3827,  0.7093, -0.7142],\n",
      "         [ 1.1214,  0.1395,  2.2871,  ..., -0.8094,  0.7524, -0.5392],\n",
      "         [-0.1618, -1.4677, -0.2643,  ..., -1.4826,  1.0821,  0.1322],\n",
      "         [ 0.7712, -0.4914, -1.1880,  ..., -0.9657,  1.9504,  0.2596],\n",
      "         [-0.2283, -0.3825,  0.7178,  ...,  2.7660,  0.9905,  0.7628],\n",
      "         [-0.2283, -0.3825,  0.7178,  ...,  2.7660,  0.9905,  0.7628]],\n",
      "\n",
      "        [[ 0.3131,  1.2353,  2.1049,  ..., -0.3827,  0.7093, -0.7142],\n",
      "         [ 1.1214,  0.1395,  2.2871,  ..., -0.8094,  0.7524, -0.5392],\n",
      "         [-0.4910, -0.1690,  0.5884,  ...,  1.2395, -0.1635, -1.1895],\n",
      "         [ 0.7960,  1.0380,  0.1840,  ...,  1.1444,  1.1527, -1.2904],\n",
      "         [-0.4910, -0.1690,  0.5884,  ...,  1.2395, -0.1635, -1.1895],\n",
      "         [ 1.0001,  0.1865,  0.1418,  ...,  1.1899, -0.3619,  0.2042]]],\n",
      "       grad_fn=<EmbeddingBackward0>)\n",
      "Parameter containing:\n",
      "tensor([[-0.2283, -0.3825,  0.7178,  ...,  2.7660,  0.9905,  0.7628],\n",
      "        [ 0.3131,  1.2353,  2.1049,  ..., -0.3827,  0.7093, -0.7142],\n",
      "        [ 1.1214,  0.1395,  2.2871,  ..., -0.8094,  0.7524, -0.5392],\n",
      "        ...,\n",
      "        [ 1.0001,  0.1865,  0.1418,  ...,  1.1899, -0.3619,  0.2042],\n",
      "        [ 1.5374, -0.8177, -0.5011,  ...,  1.7644,  0.2689, -0.8946],\n",
      "        [ 1.6863,  0.4084,  0.3138,  ...,  0.3111, -0.5049,  1.4667]],\n",
      "       requires_grad=True)\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-14T07:18:18.239004Z",
     "start_time": "2024-12-14T07:18:16.544558Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 2.3 nn.Embedding的可学习性\n",
    "# nn.Embedding中的参数并不是一成不变的，它也是会参与梯度下降的。也就是更新模型参数也会更新nn.Embedding的参数，或者说nn.Embedding的参数本身也是模型参数的一部分。\n",
    "\n",
    "import torch\n",
    "from torch import nn\n",
    " \n",
    "# 创建最大词个数为10，每个词用维度为4表示\n",
    "embedding = nn.Embedding(10, 4)\n",
    " \n",
    "# 将第一个句子填充0，与第二个句子长度对齐\n",
    "in_vector = torch.LongTensor([[1, 2, 3, 4, 0, 0], [1, 2, 5, 6, 5, 7]])\n",
    " \n",
    "optimizer = torch.optim.SGD(embedding.parameters(), lr=0.01)\n",
    "criteria = nn.MSELoss()\n",
    " \n",
    "for i in range(3000):\n",
    "    outputs = embedding(torch.LongTensor([1, 2, 3, 4]))\n",
    "    loss = criteria(outputs, torch.ones(4, 4))  # 本轮输出为outputs，样本为torch.ones(4,4)\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "# 最后训练完，下标为1、2、3、4的token的权重值都接近1 \n",
    "print(embedding.weight)\n",
    "new_output = embedding(in_vector)\n",
    "print(new_output)"
   ],
   "id": "47e585109deb1b4b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Parameter containing:\n",
      "tensor([[-0.0594,  1.5142, -1.7659, -0.5419],\n",
      "        [ 1.0002,  0.9743,  1.0132,  1.0119],\n",
      "        [ 0.9895,  0.9978,  0.9978,  0.9531],\n",
      "        [ 0.9697,  0.9745,  0.9965,  0.9647],\n",
      "        [ 1.0354,  1.0213,  0.9466,  1.0002],\n",
      "        [-0.6298,  0.4688,  1.4144,  1.0650],\n",
      "        [-0.6843,  0.5656,  0.4387, -0.4374],\n",
      "        [ 1.7643, -0.5512, -0.3729, -0.2927],\n",
      "        [-0.0634, -0.0638,  1.0794, -1.9515],\n",
      "        [-1.1076,  0.4795,  1.6161, -0.2121]], requires_grad=True)\n",
      "tensor([[[ 1.0002,  0.9743,  1.0132,  1.0119],\n",
      "         [ 0.9895,  0.9978,  0.9978,  0.9531],\n",
      "         [ 0.9697,  0.9745,  0.9965,  0.9647],\n",
      "         [ 1.0354,  1.0213,  0.9466,  1.0002],\n",
      "         [-0.0594,  1.5142, -1.7659, -0.5419],\n",
      "         [-0.0594,  1.5142, -1.7659, -0.5419]],\n",
      "\n",
      "        [[ 1.0002,  0.9743,  1.0132,  1.0119],\n",
      "         [ 0.9895,  0.9978,  0.9978,  0.9531],\n",
      "         [-0.6298,  0.4688,  1.4144,  1.0650],\n",
      "         [-0.6843,  0.5656,  0.4387, -0.4374],\n",
      "         [-0.6298,  0.4688,  1.4144,  1.0650],\n",
      "         [ 1.7643, -0.5512, -0.3729, -0.2927]]], grad_fn=<EmbeddingBackward0>)\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# pytorch nn.Droupout\n",
    "# https://blog.csdn.net/weixin_47050107/article/details/122722516\n",
    "# 一句话总结：Dropout的是为了防止过拟合而设置\n",
    "# 详解部分：\n",
    "# 1.Dropout是为了防止过拟合而设置的\n",
    "# 2.Dropout顾名思义有丢掉的意思\n",
    "# 3.nn.Dropout(p = 0.3) # 表示每个神经元有0.3的可能性不被激活\n",
    "# 4.Dropout只能用在训练部分而不能用在测试部分\n",
    "# 5.Dropout一般用在全连接神经网络映射层之后，如代码的nn.Linear(20, 30)之后\n",
    "\n",
    "# 1.nn.Dropout用法一\n",
    "class Dropout(nn.Module):\n",
    "\tdef __init__(self):\n",
    "\t\tsuper(Dropout, self).__init__()\n",
    "\t\tself.linear = nn.Linear(20, 40)\n",
    "\t\tself.dropout = nn.Dropout(p = 0.3) # p=0.3表示下图（a）中的神经元有p = 0.3的概率不被激活\n",
    "\n",
    "\tdef forward(self, inputs):\n",
    "\t\tout = self.linear(inputs)\n",
    "\t\tout = self.dropout(out)\n",
    "\t\treturn out\n",
    "\n",
    "net = Dropout()\n",
    "# Dropout只能用在train而不能用在test\t\n",
    "\n",
    "\n",
    "\n",
    "# nn.Dropout用法二\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "a = torch.randn(4, 4)\n",
    "print(a)\n",
    "\"\"\"\n",
    "tensor([[ 1.2615, -0.6423, -0.4142,  1.2982],\n",
    "        [ 0.2615,  1.3260, -1.1333, -1.6835],\n",
    "        [ 0.0370, -1.0904,  0.5964, -0.1530],\n",
    "        [ 1.1799, -0.3718,  1.7287, -1.5651]])\n",
    "\"\"\"\n",
    "dropout = nn.Dropout()\n",
    "b = dropout(a)\n",
    "print(b)\n",
    "\"\"\"\n",
    "tensor([[ 2.5230, -0.0000, -0.0000,  2.5964],\n",
    "        [ 0.0000,  0.0000, -0.0000, -0.0000],\n",
    "        [ 0.0000, -0.0000,  1.1928, -0.3060],\n",
    "        [ 0.0000, -0.7436,  0.0000, -3.1303]])\n",
    "\"\"\"\n",
    "# 由以上代码可知Dropout还可以将部分tensor中的值置为0\n",
    "\n"
   ],
   "id": "25e9c76b458a40c8"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-10T08:18:30.308578Z",
     "start_time": "2024-11-10T08:18:30.252435Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# unsqueeze 扩充维度\n",
    "max_len = 10\n",
    "position = torch.arange(0, max_len, dtype=torch.float)\n",
    "print(f'{position}')\n",
    "position = position.unsqueeze(1)  # shape由[10]扩充为[10,1]\n",
    "print(f'{position}')"
   ],
   "id": "34ea9ea7880b08ca",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n",
      "tensor([[0.],\n",
      "        [1.],\n",
      "        [2.],\n",
      "        [3.],\n",
      "        [4.],\n",
      "        [5.],\n",
      "        [6.],\n",
      "        [7.],\n",
      "        [8.],\n",
      "        [9.]])\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-10T09:28:06.328800Z",
     "start_time": "2024-11-10T09:28:06.300708Z"
    }
   },
   "cell_type": "code",
   "source": [
    " import math\n",
    " d_model = 20\n",
    " even = torch.arange(0, d_model, 2).to(torch.float32)\n",
    " print(f'even={even}')  # 偶数序列\n",
    " div_term = torch.exp(even * (-math.log(10000.0) / d_model))\n",
    " div_term2 = 1 / torch.pow(10000.0, (even / d_model))\n",
    " print(f'div_term={div_term}')\n",
    " print(f'div_term2={div_term2}')\n",
    " \n",
    " t = torch.tensor([math.log(123.0)])\n",
    " print(torch.exp(t * 2))\n",
    " print(torch.pow(torch.exp(t), 2))\n",
    " "
   ],
   "id": "28ee4dc5020a122a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "even=tensor([ 0.,  2.,  4.,  6.,  8., 10., 12., 14., 16., 18.])\n",
      "div_term=tensor([1.0000e+00, 3.9811e-01, 1.5849e-01, 6.3096e-02, 2.5119e-02, 1.0000e-02,\n",
      "        3.9811e-03, 1.5849e-03, 6.3096e-04, 2.5119e-04])\n",
      "div_term2=tensor([1.0000e+00, 3.9811e-01, 1.5849e-01, 6.3096e-02, 2.5119e-02, 1.0000e-02,\n",
      "        3.9811e-03, 1.5849e-03, 6.3096e-04, 2.5119e-04])\n",
      "tensor([15128.9990])\n",
      "tensor([15129.])\n"
     ]
    }
   ],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-14T07:18:40.498343Z",
     "start_time": "2024-12-14T07:18:40.491254Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# pytorch的广播机制\n",
    "# PyTorch中的广播机制是指在进行Tensor操作时，小尺寸的Tensor会根据一定的规则自动扩展以匹配较大Tensor的尺寸的机制。这在进行元素间操作时非常有用，例如加法和乘法。\n",
    "# \n",
    "# 广播规则简述如下：\n",
    "# \n",
    "# 当两个Tensor在任何维度上尺寸不一致时，会尝试根据以下规则进行扩展：\n",
    "# \n",
    "# 如果其中一个Tensor的某个维度尺寸是1，那么它可以和另一个Tensor的对应维度大小相匹配。\n",
    "# \n",
    "# 如果两个Tensor在对应的维度上有相同的尺寸，或者其中一个Tensor在该维度上的尺寸是1，则可以进行广播。\n",
    "# \n",
    "# 如果两个Tensor在所有维度上都是相同的尺寸或其中一个是1，则可以进行广播。\n",
    "# \n",
    "# 以下是一个简单的例子，演示了如何在PyTorch中使用广播机制：\n",
    "\n",
    "import torch\n",
    " \n",
    "# 创建两个形状不同的Tensor\n",
    "a = torch.randn(1)\n",
    "b = torch.randn(3)\n",
    " \n",
    "# 由于a的大小是1，可以广播以匹配b在任何维度\n",
    "c = a + b  # 等价于 b + a，因为加法是匹配的\n",
    "print(a)\n",
    "print(b)\n",
    "print(c)"
   ],
   "id": "1b332b1b66b04557",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-0.9795])\n",
      "tensor([ 1.0306,  1.4925, -2.2282])\n",
      "tensor([ 0.0511,  0.5130, -3.2078])\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-14T07:18:43.142245Z",
     "start_time": "2024-12-14T07:18:43.130246Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码 Positional Embedding\n",
    "# 位置编码矩阵元素公式：\n",
    "# 偶数项：PE(pos,2i) = sin(pos/(10000e+(2i/d_model)))   \n",
    "# 奇数项：PE(pos,2i+1) = cos(pos/(10000e+(2i/d_model)))\n",
    "\n",
    "# 位置编码的实现直接对照着公式写就行，上面这个代码只是其中一种实现方式。\n",
    "# 【注意】pos代表的是单词在句子中的绝对索引位置，例如max_len是128，那么索引就是从0,1,2,…,127，假设d_model是512，即用一个512维tensor来编码一个索引位置，那么0<=2i<512，0<=i<=255，那么2i对应取值就是0,2,4…510，即偶数位置；2i+1的取值是1,3,5…511，即奇数位置。\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    # max_len即seq_len，d_model嵌入向量维度\n",
    "    def __init__(self, d_model, dropout=0.1, max_len=5000):\n",
    "        super(PositionalEncoding, self).__init__()\n",
    " \n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "\t\t# 生成一个形状为[max_len,d_model]的全为0的tensor\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        # position:[max_len,1]，即[5000,1]，这里插入一个维度是为了后面能够进行广播机制然后和div_term直接相乘\n",
    "        # 注意，要理解一下这里position的维度。每个pos都需要512个编码。\n",
    "        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        # 奇数项和偶数项的分母是相同的 10000e+(2i/d_model)\n",
    "        div_term = torch.pow(10000.0, torch.arange(0, d_model, 2).float() / d_model)\n",
    " \n",
    "        # 这里position * div_term有广播机制，因为div_term的形状为[d_model/2],即[256],符合广播条件，广播后两个tensor经过复制，形状都会变成[5000,256]，*表示两个tensor对应位置处的两个元素相乘\n",
    "        # 这里需要注意的是pe[:, 0::2]这个用法，:表示pe整个序列，0::2表示从0开始到最后面，步长为2，其实代表的就是偶数位置赋值给pe\n",
    "        pe[:, 0::2] = torch.sin(position / div_term)\n",
    "        # 同理，这里是奇数位置\n",
    "        pe[:, 1::2] = torch.cos(position / div_term)\n",
    "        # 上面代码获取之后得到的pe:[max_len, d_model]\n",
    "        # 下面这个代码之后，我们得到的pe形状是：[1, max_len, d_model]\n",
    "        pe = pe.unsqueeze(0).transpose(0, 1)\n",
    "        print(f'pe.shape={pe.shape}')\n",
    "\t\t# 定一个缓冲区，其实简单理解为这个参数不更新就可以，但是参数仍然作为模型的参数保存\n",
    "        self.register_buffer('pe', pe)  \n",
    " \n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        x: [seq_len, batch_size, d_model]\n",
    "        \"\"\"\n",
    "        # 这里的self.pe是从缓冲区里拿的\n",
    "        # 切片操作，把pe第一维的前seq_len个tensor和x相加，其他维度不变\n",
    "        # 这里其实也有广播机制，pe:[max_len,1,d_model]，第二维大小为1，会自动扩张到batch_size大小。\n",
    "        # 实现词嵌入和位置编码的线性相加\n",
    "        # x：[batch_size, max_len, d_model]， pe：[(1, max_len, d_model]，x和pe相加时，pe.shape[0]会从1广播为batch_size\n",
    "        x = x + self.pe[:x.size(0), :]\n",
    "        return self.dropout(x)\n",
    "\n",
    "# 测试\n",
    "d_model = 512\n",
    "tgt_vocab_size = 100  # 整个词表大小\n",
    "batch_size = 3\n",
    "seq_len = 20  # seq_len\n",
    "tgt_emb = nn.Embedding(tgt_vocab_size, d_model)\n",
    "pos_emb = PositionalEncoding(d_model=d_model, max_len=seq_len)\n",
    "dec_inputs = torch.rand([batch_size, seq_len]).long()  # [batch_size, seq_len]\n",
    "print(f'dec_inputs.shape={dec_inputs.shape}')\n",
    "target_embedding_outputs = tgt_emb(dec_inputs)  # [batch_size, target_len, d_model]\n",
    "print(f'target_embedding_outputs.shape={target_embedding_outputs.shape}')\n",
    "dec_outputs = pos_emb(target_embedding_outputs.transpose(0, 1)).transpose(0, 1) # [batch_size, tgt_len, d_model]\n",
    "print(f'dec_outputs.shape={dec_outputs.shape}')\n"
   ],
   "id": "f798bb406958ac91",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pe.shape=torch.Size([20, 1, 512])\n",
      "dec_inputs.shape=torch.Size([3, 20])\n",
      "target_embedding_outputs.shape=torch.Size([3, 20, 512])\n",
      "dec_outputs.shape=torch.Size([3, 20, 512])\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-14T07:18:47.363764Z",
     "start_time": "2024-12-14T07:18:47.359894Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Scaled DotProduct Attention：缩放点积注意力机制\n",
    "# 缩放点积注意力机制主要原理就是通过 Q 、K 计算出 scores，然后将 scores 和 V 进行matmul操作，即矩阵相乘，这样得到每个单词的 context vector。\n",
    "\n",
    "# 首先将 Q 和 K 的转置相乘，相乘之后得到的 scores 还不能立刻进行 softmax，需要和 attn_mask 相加，把一些需要屏蔽的信息屏蔽掉，attn_mask 是一个仅由 True 和 False 组成的 tensor，并且一定会保证 attn_mask 和 scores 的维度四个值相同（不然无法做对应位置相加）\n",
    "# \n",
    "# mask 完了之后，就可以对 scores 进行 softmax 了。然后再与 V 相乘，得到 context。\n",
    "\n",
    "\n",
    "class ScaledDotProductAttention(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ScaledDotProductAttention, self).__init__()\n",
    " \n",
    "    def forward(self, Q, K, V, attn_mask):\n",
    "        # 输入进来的维度分别是Q:[batch_size x n_heads x len_q x d_k]  K:[batch_size x n_heads x len_k x d_k]  V:[batch_size x n_heads x len_k x d_v]\n",
    "        # matmul操作即矩阵相乘\n",
    "        # [batch_size x n_heads x len_q x d_k] matmul [batch_size x n_heads x d_k x len_k] -> [batch_size x n_heads x len_q x len_k]\n",
    "        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)\n",
    " \n",
    "        # masked_fill_(mask,value)这个函数，用value填充源向量中与mask中值为1位置相对应的元素，\n",
    "        # 要求mask和要填充的源向量形状需一致\n",
    "        # 把被mask的地方置为无穷小，softmax之后会趋近于0，Q会忽视这部分的权重\n",
    "        scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.\n",
    "        attn = nn.Softmax(dim=-1)(scores)\n",
    "        context = torch.matmul(attn, V)\n",
    "        # context:[batch_size,n_heads,len_q,d_k]\n",
    "        # attn:[batch_size,n_heads,len_q,len_k]\n",
    "        return context, attn"
   ],
   "id": "137743331ae90148",
   "outputs": [],
   "execution_count": 6
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
