{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# transformer代码讲解\n",
    "\n",
    "文字，全流程详细讲解，无代码 https://blog.csdn.net/benzhujie1245com/article/details/117173090\n",
    "\n",
    "视频，原因讲解 https://www.bilibili.com/video/BV1dt4y1J7ov/\n",
    "\n",
    "视频，讲解详细 https://www.bilibili.com/video/BV1v3411r78R/\n",
    "\n",
    "文字，简单讲解，有代码 https://blog.csdn.net/qq_52785473/article/details/124537101\n",
    "\n",
    "文字，简单讲解，有代码 https://blog.csdn.net/Datawhale/article/details/120320116\n",
    "\n",
    "讲解更详细  https://wmathor.com/index.php/archives/1438/\n",
    "\n",
    "\n",
    "从零实现Transformer的简易版与强大版：从300多行到3000多行：\n",
    "\n",
    "讲解更完整 https://blog.csdn.net/v_JULY_v/article/details/130090649\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"29.png\" width=\"500\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from IPython.display import Image\n",
    "Image(url= \"29.png\",width=500)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "\n",
    "import math\n",
    "from torch.autograd import Variable\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import copy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## transformer 主要类\n",
    "\n",
    "### 词嵌入（Embeddings）\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#词嵌入\n",
    "class Embeddings(nn.Module):\n",
    "    def __init__(self, d_model, vocab):\n",
    "    # d_model:词嵌入维度\n",
    "    # vocab:字典大小\n",
    "        super(Embeddings, self).__init__()\n",
    "        self.lut = nn.Embedding(vocab, d_model)\n",
    "        self.d_model = d_model\n",
    "    def forward(self, x):\n",
    "        return self.lut(x) * math.sqrt(self.d_model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "d_model = 512  # embedding_size\n",
    "vocab = 1000  # 词典大小\n",
    "x=torch.tensor([[100, 2, 421, 508], [491, 998, 1, 221]], dtype=torch.long)\n",
    "emb = Embeddings(d_model, vocab)\n",
    "embr = emb(x)\n",
    "print(embr.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[  4.4606,  34.4301, -24.5994,  ...,  17.1663,  28.1237,  16.9237],\n",
       "         [  5.1524,   9.8639,  31.0702,  ...,   3.0191,   9.8241,  26.0486],\n",
       "         [ -6.2616,   7.7158,  23.8920,  ...,  -9.6690,  17.4992,  22.7776],\n",
       "         [ 14.7686,  -5.9697,  17.2761,  ..., -19.2139, -36.0296, -19.4070]],\n",
       "\n",
       "        [[-15.1041,  16.8019,  11.4768,  ...,  17.9029,  27.7799,   1.4143],\n",
       "         [-17.6204,   1.9429,  16.7623,  ..., -51.7329, -12.3380,   1.6418],\n",
       "         [ 13.7802,  -3.5118,   5.1110,  ...,   9.2191, -26.6751,  28.3472],\n",
       "         [-21.7961, -42.0103, -23.3687,  ...,   6.7964, -30.1595,  46.5827]]],\n",
       "       grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embr"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 位置编码（PositionalEncoding）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#位置编码\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, dropout, max_len=5000):\n",
    "    # d_model:词嵌入维度\n",
    "    # dropout:置零比率\n",
    "    # max_len:每个句子最大的长度\n",
    "        super(PositionalEncoding, self).__init__()\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        position = torch.arange(0,  max_len).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(1000.0) / d_model))\n",
    "        pe[:, 0::2] = torch.sin(position * div_term)\n",
    "        pe[:, 1::2] = torch.cos(position * div_term)\n",
    "        pe = pe.unsqueeze(0)\n",
    "        self.register_buffer(\"pe\", pe)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n",
    "        return self.dropout(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "dropout = 0.1\n",
    "max_len = 60\n",
    "pe = PositionalEncoding(d_model, dropout, max_len)\n",
    "pe_result = pe(embr)\n",
    "print(pe_result.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[  4.9562,  39.3668, -27.3327,  ...,  20.1848,  31.2486,  19.9152],\n",
       "         [  6.6598,  11.5602,  35.4411,  ...,   4.4657,  10.9168,   0.0000],\n",
       "         [ -5.9470,   8.1107,  27.5802,  ...,  -9.6322,   0.0000,  26.4196],\n",
       "         [ 16.5664,  -7.7330,  19.4397,  ..., -20.2377, -40.0294,  -0.0000]],\n",
       "\n",
       "        [[-16.7823,  19.7799,  12.7519,  ...,  21.0032,   0.0000,   0.0000],\n",
       "         [ -0.0000,   2.7591,  19.5435,  ...,  -0.0000, -13.7077,   2.9353],\n",
       "         [ 16.3216,  -4.3644,   6.7124,  ...,  11.3546, -29.6367,  32.6080],\n",
       "         [-24.0611, -47.7781, -25.7211,  ...,   8.6627, -33.5071,  52.8697]]],\n",
       "       grad_fn=<MulBackward0>)"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pe_result"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 多头自注意力机制（MultiHeadedAttention-attention）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "#mask == 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "def attention(query, key, value, mask=None, dropout=None):\n",
    "    d_k = query.size(-1)\n",
    "    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n",
    "    if mask is not None:\n",
    "        scores = scores.masked_fill(mask == 0, -1e9)\n",
    "    p_attn = F.softmax(scores, dim = -1)\n",
    "\n",
    "    if dropout is not None:\n",
    "        p_attn = dropout(p_attn)\n",
    "    \n",
    "    return torch.matmul(p_attn, value), p_attn\n",
    "\n",
    "# 深层拷贝\n",
    "def clones(module, N):\n",
    "    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n",
    "\n",
    "class MultiHeadedAttention(nn.Module):\n",
    "    def __init__(self, head, embedding_dim, dropout=0.1):\n",
    "        # head:代表几个头\n",
    "        # embedding_dim:词嵌入维度\n",
    "        # dropout:置0比率\n",
    "        super(MultiHeadedAttention, self).__init__()\n",
    "\n",
    "        # 确认embedding_dim能够被head整除\n",
    "        assert embedding_dim % head == 0\n",
    "        self.head = head\n",
    "        self.d_k = embedding_dim // head\n",
    "        # 获得4个线性层， 分别是Q、K、V、以及最终的输出的线形层\n",
    "        self.linears = clones(nn.Linear(embedding_dim, embedding_dim), 4)\n",
    "        self.attn = None\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "\n",
    "    def forward(self, query, key, value, mask=None):\n",
    "        if mask is not None:\n",
    "            mask = mask.unsqueeze(0)\n",
    "        \n",
    "        batch_size = query.size(0)\n",
    "\n",
    "        # 经过线性层投影后分成head个注意力头\n",
    "        query, key, value = [model(x).view(batch_size, -1, self.head, self.d_k).transpose(1, 2) for model, x in zip(self.linears, (query, key, value))]\n",
    "        # 各自计算每个头的注意力\n",
    "        \n",
    "        x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n",
    "        # 转换回来\n",
    "        \n",
    "        x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.head * self.d_k)\n",
    "        # 经过最后一个线性层得到最终多头注意力机制的结果\n",
    "        return self.linears[-1](x)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1, 2)\n",
      "(2, 2)\n",
      "(3, 2)\n"
     ]
    }
   ],
   "source": [
    "for i in zip([1,2,3,4],[2,2,2]):\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "#mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "64.0"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "512/8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4, 512])\n",
      "4\n",
      "torch.Size([2, 8, 4, 64])\n",
      "torch.Size([2, 8, 4, 64])\n",
      "tensor([[[ 3.1447,  9.3652, -0.8106,  ..., -1.9810,  7.4806, -0.8098],\n",
      "         [ 4.6991,  9.4880, -0.0394,  ..., -1.9257,  5.2841, -2.6955],\n",
      "         [ 3.0011,  9.2706,  2.6056,  ..., -6.5442,  4.7360,  2.8949],\n",
      "         [ 4.4883,  8.8427,  0.3545,  ..., -6.7603,  7.7433,  0.1657]],\n",
      "\n",
      "        [[-2.8519, -5.7107, -1.0561,  ...,  7.5895, -1.4115, -0.8601],\n",
      "         [-2.4921, -3.4672, -4.0760,  ...,  2.5844,  1.5122, -3.2994],\n",
      "         [ 0.7264, -6.5059, -3.1966,  ...,  9.5092, -1.2292, -1.1438],\n",
      "         [-0.9436, -8.2308, -1.7429,  ...,  7.9383, -0.5190,  0.6261]]],\n",
      "       grad_fn=<ViewBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "head = 8\n",
    "embedding_dim = 512\n",
    "dropout = 0.2\n",
    "query = key = value = pe_result\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "mha = MultiHeadedAttention(head, embedding_dim, dropout)\n",
    "mha_result = mha(query, key, value, mask)\n",
    "print(mha_result)\n",
    "print(mha_result.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "query = key = value = pe_result\n",
    "mask = Variable(torch.zeros(2, 4, 4))\n",
    "attn, p_attn = attention(query, key, value,mask=mask)\n",
    "# print(attn)\n",
    "# print(attn.shape)\n",
    "# print(p_attn)\n",
    "# print(p_attn.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 前馈全连接层（PositionwiseFeedForward）\n",
    "\n",
    "考虑注意力机制可能对复杂的情况拟合程度不够，因此增加两层网络来增强模型的能力。\n",
    "\n",
    "前馈全连接层就是两次线性层+Relu激活"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PositionwiseFeedForward(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, dropout=0.1):\n",
    "        super(PositionwiseFeedForward, self).__init__()\n",
    "        self.w1 = nn.Linear(d_model, d_ff)\n",
    "        self.w2 = nn.Linear(d_ff, d_model)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        return self.w2(self.dropout(F.relu(self.w1(x))))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-0.6185,  0.0881,  1.4086,  ...,  1.2355, -1.5647,  1.1489],\n",
      "         [-2.3992, -0.9414, -0.4582,  ...,  0.4506, -0.5344,  1.8814],\n",
      "         [-0.5688,  0.0891,  1.4188,  ...,  1.1826, -0.4016,  0.9184],\n",
      "         [-0.6219, -1.2090,  1.4474,  ...,  0.3424, -0.5402,  1.8289]],\n",
      "\n",
      "        [[-1.1074,  2.0183,  0.5329,  ..., -1.3109,  0.8419,  0.2912],\n",
      "         [-2.1584,  3.1729,  0.0619,  ..., -1.6472,  1.3028,  0.3951],\n",
      "         [-1.6058,  2.4022,  0.3588,  ..., -0.4772,  0.4304, -0.1591],\n",
      "         [-0.6955,  1.6537,  0.6962,  ..., -0.1930,  0.5402,  0.2190]]],\n",
      "       grad_fn=<ViewBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "d_model = 512\n",
    "d_ff = 64\n",
    "dropout = 0.2\n",
    "x = mha_result\n",
    "ff = PositionwiseFeedForward(d_model, d_ff, dropout=dropout)\n",
    "ff_result = ff(x)\n",
    "print(ff_result)\n",
    "print(ff_result.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 规范化层（LayerNorm）\n",
    "\n",
    "BatchNorm简单来说就是对一批样本按照每个特征维度进行归一化\n",
    "\n",
    "Layer Norm是对每个单词的Embedding做归一化\n",
    "\n",
    "https://blog.csdn.net/qq_43827595/article/details/121877901\n",
    "\n",
    "https://liumin.blog.csdn.net/article/details/85075706"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"44.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"44.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"35.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"35.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[-8.2004,  0.5804,  1.3639,  ...,  1.7793, -2.9470, -1.8913],\n",
       "         [-6.3798,  2.6990,  3.7775,  ...,  1.6971, -2.1978, -0.7955],\n",
       "         [-6.0106,  2.9720,  4.2135,  ..., -2.0315, -5.0306, -4.9978],\n",
       "         [-6.6885,  0.0595,  1.7017,  ...,  2.5037, -2.7890,  2.4970]],\n",
       "\n",
       "        [[-2.7299, -2.4048,  3.6923,  ..., -7.7733,  1.3931,  1.7657],\n",
       "         [ 3.8099, -1.5517,  0.7698,  ..., -3.5534,  0.2886,  2.9241],\n",
       "         [-2.1036, -3.9115, -4.4982,  ..., -4.2154,  2.1434,  2.2444],\n",
       "         [ 1.0188, -1.2110,  0.7608,  ..., -3.6656,  6.5101,  4.7269]]],\n",
       "       grad_fn=<ViewBackward0>)"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LayerNorm(nn.Module):\n",
    "    def __init__(self, features, eps=1e-6):\n",
    "        super(LayerNorm, self).__init__()\n",
    "        self.a2 = nn.Parameter(torch.ones(features))\n",
    "        self.b2 = nn.Parameter(torch.zeros(features))\n",
    "        self.eps = eps\n",
    "    \n",
    "    def forward(self, x):\n",
    "        mean = x.mean(-1, keepdim = True)\n",
    "        std = x.std(-1, keepdim = True)\n",
    "        return self.a2 * (x - mean) / (std + self.eps) + self.b2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-1.9218,  0.0708,  0.2487,  ...,  0.3429, -0.7296, -0.4901],\n",
      "         [-1.5170,  0.5434,  0.7881,  ...,  0.3160, -0.5679, -0.2497],\n",
      "         [-1.5732,  0.7361,  1.0552,  ..., -0.5502, -1.3212, -1.3128],\n",
      "         [-1.5733, -0.0602,  0.3080,  ...,  0.4879, -0.6989,  0.4864]],\n",
      "\n",
      "        [[-0.6454, -0.5691,  0.8614,  ..., -1.8287,  0.3220,  0.4094],\n",
      "         [ 0.8695, -0.3313,  0.1886,  ..., -0.7796,  0.0808,  0.6711],\n",
      "         [-0.5172, -0.9272, -1.0602,  ..., -0.9961,  0.4457,  0.4686],\n",
      "         [ 0.1812, -0.2806,  0.1277,  ..., -0.7889,  1.3183,  0.9490]]],\n",
      "       grad_fn=<AddBackward0>)\n",
      "tensor([[[-1.9237,  0.0709,  0.2489,  ...,  0.3433, -0.7304, -0.4906],\n",
      "         [-1.5185,  0.5439,  0.7889,  ...,  0.3163, -0.5685, -0.2499],\n",
      "         [-1.5747,  0.7368,  1.0563,  ..., -0.5508, -1.3225, -1.3141],\n",
      "         [-1.5748, -0.0603,  0.3083,  ...,  0.4883, -0.6996,  0.4868]],\n",
      "\n",
      "        [[-0.6460, -0.5697,  0.8623,  ..., -1.8305,  0.3223,  0.4098],\n",
      "         [ 0.8704, -0.3317,  0.1888,  ..., -0.7804,  0.0809,  0.6718],\n",
      "         [-0.5178, -0.9281, -1.0612,  ..., -0.9970,  0.4461,  0.4690],\n",
      "         [ 0.1814, -0.2809,  0.1279,  ..., -0.7896,  1.3196,  0.9500]]],\n",
      "       grad_fn=<NativeLayerNormBackward0>)\n"
     ]
    }
   ],
   "source": [
    "ln = LayerNorm(512)\n",
    "lnn = nn.LayerNorm(512)\n",
    "ln_result = ln(x)\n",
    "lnn_result = lnn(x)\n",
    "print(ln_result)\n",
    "print(lnn_result)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 子层连接结构(SublayerConnection)\n",
    "\n",
    "Add&Norm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"38.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"38.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SublayerConnection(nn.Module):\n",
    "    def __init__(self, size, dropout=0.1):\n",
    "        super(SublayerConnection, self).__init__()\n",
    "        self.norm = LayerNorm(size)\n",
    "        self.dropout = nn.Dropout(p=dropout) \n",
    "        self.size = size\n",
    "    \n",
    "    def forward(self, x, sublayer):\n",
    "        return x + self.dropout(sublayer(self.norm(x)))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4, 512])\n",
      "4\n",
      "torch.Size([2, 8, 4, 64])\n",
      "torch.Size([2, 8, 4, 64])\n",
      "tensor([[[-7.6168e+00, -2.2202e-01, -3.4862e+01,  ..., -1.2145e+01,\n",
      "           2.2603e+01, -1.2610e+01],\n",
      "         [ 5.2091e+01, -8.4343e+00, -3.5232e+01,  ...,  2.0129e+01,\n",
      "           1.1804e+01,  8.5235e+00],\n",
      "         [ 1.3720e+01, -5.2680e+00, -7.8298e+00,  ..., -2.2894e+01,\n",
      "           7.0936e+00,  2.1066e+01],\n",
      "         [-3.3744e+01, -3.4042e+01, -2.4341e+01,  ...,  3.5039e+00,\n",
      "          -9.1709e+00,  1.7512e-02]],\n",
      "\n",
      "        [[ 5.8780e+01, -2.4038e+01, -4.8911e-02,  ..., -6.9139e+00,\n",
      "          -4.7905e+01, -1.3536e+01],\n",
      "         [ 4.7775e+00,  1.3581e+01,  3.4360e+00,  ..., -1.1574e+01,\n",
      "          -9.6948e-02,  3.6499e+01],\n",
      "         [ 2.0096e+01, -2.7572e+01,  6.4582e+00,  ..., -3.8138e+01,\n",
      "          -2.2617e+00, -7.6446e+01],\n",
      "         [-4.3623e+01, -5.4308e-01,  1.4093e+00,  ...,  2.2701e+01,\n",
      "           8.3426e+00, -1.8410e-01]]], grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "size = 512\n",
    "dropout = 0.2\n",
    "head = 8\n",
    "d_model = 512\n",
    "x = pe_result\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "self_attn = MultiHeadedAttention(head, d_model)\n",
    "sublayer = lambda x: self_attn(x, x, x, mask)\n",
    "sc = SublayerConnection(size, dropout)\n",
    "sc_result = sc(x, sublayer)\n",
    "print(sc_result)\n",
    "print(sc_result.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"48.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"48.png\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## transformer的二级部件\n",
    "\n",
    "* Encoder-Decoder架构\n",
    "\n",
    "编码器-解码器（Encoder-Decoder）是深度学习模型的抽象概念。许多模型都基于这一架构，比如CNN，RNN，LSTM和Transformer等。\n",
    "\n",
    "编码器（Encoder）：负责将输入（Input）转化为特征（Feature）\n",
    "\n",
    "解码器（Decoder）：负责将特征（Feature）转化为目标（Target）\n",
    "\n",
    "https://blog.csdn.net/deer2019530/article/details/129675690\n",
    "\n",
    "### 1.编码器层（EncoderLayer）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"38.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"38.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self, size, self_attn, feed_forward, dropout):\n",
    "        # size:词嵌入的维度\n",
    "        # self_attn：代表输入的多头子注意力层的实例化对象\n",
    "        # feed_forward：代表前馈全连接层的实例化对象\n",
    "        # dropout：置0比例\n",
    "        super(EncoderLayer, self).__init__()\n",
    "        self.self_attn = self_attn\n",
    "        self.feed_forward = feed_forward\n",
    "        self.sublayer = clones(SublayerConnection(size, dropout), 2)\n",
    "        self.size = size\n",
    "    def forward(self, x, mask):\n",
    "        x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n",
    "        return self.sublayer[1](x, self.feed_forward)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4, 512])\n",
      "4\n",
      "torch.Size([2, 8, 4, 64])\n",
      "torch.Size([2, 8, 4, 64])\n",
      "tensor([[[-7.5962e+00,  5.7254e-01, -3.5566e+01,  ..., -1.2890e+01,\n",
      "           2.2956e+01, -1.2507e+01],\n",
      "         [ 5.2098e+01, -8.1339e+00, -3.4996e+01,  ...,  1.9243e+01,\n",
      "           1.2127e+01,  7.4026e+00],\n",
      "         [ 1.3874e+01, -5.0351e+00, -8.0736e+00,  ..., -2.4052e+01,\n",
      "           6.9527e+00,  2.0552e+01],\n",
      "         [-3.3589e+01, -3.3747e+01, -2.4778e+01,  ...,  3.1240e+00,\n",
      "          -9.1623e+00, -5.1010e-01]],\n",
      "\n",
      "        [[ 5.8880e+01, -2.3874e+01,  4.2584e-02,  ..., -6.7574e+00,\n",
      "          -4.7951e+01, -1.3427e+01],\n",
      "         [ 4.5244e+00,  1.3642e+01,  3.0564e+00,  ..., -1.1101e+01,\n",
      "           1.3809e-01,  3.6584e+01],\n",
      "         [ 1.9917e+01, -2.7515e+01,  6.9420e+00,  ..., -3.7486e+01,\n",
      "          -1.3902e+00, -7.6213e+01],\n",
      "         [-4.3678e+01, -3.0355e-01,  1.4843e+00,  ...,  2.3051e+01,\n",
      "           8.2299e+00,  8.0651e-02]]], grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "size = 512\n",
    "head = 8\n",
    "d_model = 512\n",
    "d_ff = 64\n",
    "x = pe_result\n",
    "dropout = 0.2\n",
    "self_attn = MultiHeadedAttention(head, d_model)\n",
    "ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "el = EncoderLayer(size, self_attn, ff, dropout)\n",
    "el_result = el(x, mask)\n",
    "print(el_result)\n",
    "print(el_result.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.编码器（Encoder）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"39.jpg\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"39.jpg\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 编码器\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, layer, N):\n",
    "        super(Encoder, self).__init__()\n",
    "        self.layers = clones(layer, N)\n",
    "        self.norm = LayerNorm(layer.size)\n",
    "\n",
    "    def forward(self, x, mask):\n",
    "        for layer in self.layers:\n",
    "            x = layer(x, mask)\n",
    "        return self.norm(x)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-0.4271, -0.1136, -1.5222,  ..., -0.6932,  0.9212, -0.5832],\n",
      "         [ 2.0813, -0.3794, -1.6242,  ...,  0.6505,  0.5845,  0.3873],\n",
      "         [ 0.2288, -0.2102, -0.4917,  ..., -1.0011,  0.3007,  0.7645],\n",
      "         [-1.6307, -1.4349, -1.0386,  ...,  0.0529, -0.3985,  0.0364]],\n",
      "\n",
      "        [[ 2.2717, -1.0465,  0.0164,  ..., -0.4038, -2.1660, -0.6705],\n",
      "         [ 0.1267,  0.6160,  0.2929,  ..., -0.5215, -0.0237,  1.5602],\n",
      "         [ 0.7160, -1.1489,  0.2235,  ..., -1.6382, -0.2661, -3.0683],\n",
      "         [-1.8700, -0.0800,  0.2219,  ...,  1.0834,  0.4636,  0.0974]]],\n",
      "       grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "size = 512\n",
    "head = 8\n",
    "d_model = 512\n",
    "d_ff = 64\n",
    "c = copy.deepcopy\n",
    "dropout = 0.2\n",
    "\n",
    "attn = MultiHeadedAttention(head, d_model)\n",
    "ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "layer = EncoderLayer(size, c(attn), c(ff), dropout)\n",
    "N = 8\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "en = Encoder(layer, N)\n",
    "en_result = en(x, mask)\n",
    "print(en_result)\n",
    "print(en_result.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.解码器层（DecoderLayer）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"40.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"40.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"46.gif\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"46.gif\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n",
    "        super(DecoderLayer, self).__init__()\n",
    "        self.size = size\n",
    "        self.self_attn = self_attn\n",
    "        self.src_attn = src_attn\n",
    "        self.feed_forward = feed_forward\n",
    "        self.sublayer = clones(SublayerConnection(size, dropout), 3)\n",
    "\n",
    "    def forward(self, x, memory, source_mask, target_mask):\n",
    "        m = memory\n",
    "        x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, target_mask))\n",
    "        x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, source_mask))\n",
    "        return self.sublayer[2](x, self.feed_forward)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-7.9105e+00, -2.2174e-01, -3.5165e+01,  ..., -1.2805e+01,\n",
      "           2.3126e+01, -1.2007e+01],\n",
      "         [ 5.1976e+01, -7.9442e+00, -3.5792e+01,  ...,  2.0362e+01,\n",
      "           1.2573e+01,  7.9624e+00],\n",
      "         [ 1.4152e+01, -5.5714e+00, -7.6872e+00,  ..., -2.2731e+01,\n",
      "           7.5578e+00,  2.0932e+01],\n",
      "         [-3.3293e+01, -3.3924e+01, -2.4315e+01,  ...,  2.9771e+00,\n",
      "          -9.0354e+00,  8.2886e-02]],\n",
      "\n",
      "        [[ 5.8963e+01, -2.3734e+01,  1.4181e-01,  ..., -6.5148e+00,\n",
      "          -4.7695e+01, -1.3804e+01],\n",
      "         [ 4.4570e+00,  1.3936e+01,  3.1438e+00,  ..., -1.0971e+01,\n",
      "          -5.5008e-02,  3.6598e+01],\n",
      "         [ 2.0076e+01, -2.7106e+01,  6.5314e+00,  ..., -3.7407e+01,\n",
      "          -2.0241e+00, -7.6543e+01],\n",
      "         [-4.3527e+01, -3.7362e-02,  1.7245e+00,  ...,  2.3904e+01,\n",
      "           8.9378e+00, -3.7832e-01]]], grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "head = 8\n",
    "size = d_model = 512\n",
    "d_ff = 64\n",
    "dropout = 0.2\n",
    "self_attn =  src_attn = MultiHeadedAttention(head, d_model, dropout)\n",
    "ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n",
    "x = pe_result\n",
    "memory = en_result\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "source_mask = target_mask = mask # 这里为了方便演示就直接设置一样了\n",
    "dl = DecoderLayer(size, self_attn, src_attn, ff, dropout)\n",
    "dl_result = dl(x, memory, source_mask, target_mask)\n",
    "print(dl_result)\n",
    "print(dl_result.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4.解码器（Decoder）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Decoder(nn.Module):\n",
    "    def __init__(self, layer, N):\n",
    "        super(Decoder, self).__init__()\n",
    "        self.layers = clones(layer, N)\n",
    "        self.norm = LayerNorm(layer.size)\n",
    "    def forward(self, x, memory, source_mask, target_mask):\n",
    "        # x:词嵌入维度\n",
    "        # memory：代表编码器的输出张量\n",
    "        # source_mask：原数据的掩码张量\n",
    "        # target_mask：目标数据的掩码张量\n",
    "        for layer in self.layers:\n",
    "            x = layer(x, memory, source_mask, target_mask)\n",
    "        return self.norm(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-0.1914,  0.0316, -1.4516,  ..., -0.5109,  0.8120, -0.6192],\n",
      "         [ 2.4436, -0.1856, -1.5101,  ...,  1.0533,  0.3636,  0.4281],\n",
      "         [ 0.7144, -0.0735, -0.3538,  ..., -0.8280,  0.1390,  0.9766],\n",
      "         [-1.3158, -1.3548, -1.1717,  ...,  0.2616, -0.5626,  0.0988]],\n",
      "\n",
      "        [[ 2.2384, -1.1134, -0.1828,  ..., -0.3058, -1.9598, -0.8514],\n",
      "         [ 0.2237,  0.6105,  0.0993,  ..., -0.4678, -0.0176,  1.4192],\n",
      "         [ 0.9140, -1.1424,  0.1541,  ..., -1.2749, -0.0882, -3.3717],\n",
      "         [-1.8234,  0.0832, -0.0134,  ...,  1.0313,  0.4380, -0.2029]]],\n",
      "       grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "size = 512\n",
    "head = 8\n",
    "d_model = 512\n",
    "d_ff = 64\n",
    "c = copy.deepcopy\n",
    "dropout = 0.2\n",
    "attn = MultiHeadedAttention(head, d_model)\n",
    "ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n",
    "layer = DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout)\n",
    "memory = en_result\n",
    "mask = Variable(torch.zeros(8, 4, 4))\n",
    "source_mask = target_mask = mask \n",
    "N = 8\n",
    "x = pe_result\n",
    "\n",
    "de = Decoder(layer, N)\n",
    "de_result = de(x, memory, source_mask, target_mask)\n",
    "print(de_result)\n",
    "print(de_result.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 5.输出层（Generator）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"41.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"41.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Generator(nn.Module):\n",
    "    def __init__(self, d_model, vocab_size):\n",
    "        super(Generator, self).__init__()\n",
    "        self.project = nn.Linear(d_model, vocab_size)\n",
    "    def forward(self, x):\n",
    "        return F.softmax(self.project(x), dim=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[0.0031, 0.0003, 0.0004,  ..., 0.0008, 0.0005, 0.0014],\n",
      "         [0.0010, 0.0012, 0.0005,  ..., 0.0005, 0.0009, 0.0004],\n",
      "         [0.0014, 0.0027, 0.0004,  ..., 0.0013, 0.0024, 0.0006],\n",
      "         [0.0015, 0.0016, 0.0011,  ..., 0.0011, 0.0007, 0.0014]],\n",
      "\n",
      "        [[0.0008, 0.0003, 0.0004,  ..., 0.0006, 0.0015, 0.0006],\n",
      "         [0.0016, 0.0005, 0.0002,  ..., 0.0003, 0.0013, 0.0008],\n",
      "         [0.0012, 0.0005, 0.0033,  ..., 0.0003, 0.0006, 0.0018],\n",
      "         [0.0014, 0.0004, 0.0018,  ..., 0.0004, 0.0009, 0.0004]]],\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "torch.Size([2, 4, 1000])\n"
     ]
    }
   ],
   "source": [
    "vocab_size = 1000\n",
    "gen = Generator(d_model, vocab_size)\n",
    "x = de_result\n",
    "gen_result = gen(x)\n",
    "print(gen_result)\n",
    "print(gen_result.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 6.输入层"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## transformer的一级部件\n",
    "\n",
    "### 1.编码器-解码器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"39.jpg\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"39.jpg\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EncoderDecoder(nn.Module):\n",
    "    def __init__(self, encoder, decoder, source_embed, target_embed, generator):\n",
    "        # encoder:编码器对象\n",
    "        # decoder:解码器对象\n",
    "        # source_embed:原数据词嵌入\n",
    "        # target_embed:目标数据词嵌入\n",
    "        # generator：输出部分类别生成器\n",
    "        super(EncoderDecoder, self).__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "        self.src_embed = source_embed\n",
    "        self.tgt_embed = target_embed\n",
    "        self.generator = generator\n",
    "    def forward(self, source, target, source_mask, target_mask):\n",
    "        return self.decode(self.encode(source, source_mask), source_mask, target, target_mask)\n",
    "    \n",
    "    def encode(self, source, source_mask):\n",
    "        return self.encoder(self.src_embed(source), source_mask)\n",
    "    \n",
    "    def decode(self, memory,  source_mask, target, target_mask):\n",
    "        return self.decoder(self.tgt_embed(target), memory, source_mask, target_mask)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[-0.1339,  0.2593,  0.4469,  ...,  0.0245,  0.2096, -0.2820],\n",
      "         [-0.0375, -0.2829,  0.6640,  ..., -1.2519, -0.4895, -0.2747],\n",
      "         [ 0.7554, -1.0355,  0.7525,  ..., -0.2328,  0.6143, -0.7466],\n",
      "         [-0.3477, -0.0398,  0.6659,  ..., -0.0829, -0.1654, -0.8066]],\n",
      "\n",
      "        [[ 0.6578,  0.8913,  0.9278,  ...,  1.4095, -0.3353, -1.8564],\n",
      "         [-0.3979,  1.1808,  0.5054,  ...,  1.4843, -0.7904, -2.4899],\n",
      "         [ 0.2102,  0.9135,  0.5119,  ...,  1.8622, -0.2029, -1.5313],\n",
      "         [ 0.3884,  1.8151,  1.0496,  ...,  1.0963, -1.1170, -2.5009]]],\n",
      "       grad_fn=<AddBackward0>)\n",
      "torch.Size([2, 4, 512])\n"
     ]
    }
   ],
   "source": [
    "vocab_size = 1000\n",
    "d_model = 512\n",
    "encoder = en\n",
    "decoder = de\n",
    "source_embed = nn.Embedding(vocab_size, d_model)\n",
    "target_embed = nn.Embedding(vocab_size, d_model)\n",
    "generator = gen\n",
    "source = target = Variable(torch.LongTensor([[100, 2, 421, 508], [491, 998, 1, 221]]))\n",
    "source_mask = target_mask = Variable(torch.zeros(8, 4, 4))\n",
    "ed = EncoderDecoder(encoder, decoder, source_embed, target_embed, generator)\n",
    "ed_result = ed(source, target, source_mask, target_mask)\n",
    "print(ed_result)\n",
    "print(ed_result.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### tramsformer中的掩码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"42.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"42.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"43.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"43.png\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### transformer的实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_transformer_model(source_vocab, target_vocab, N=6, d_model=512, d_ff=64, head=8, dropout=0.1):\n",
    "    c = copy.deepcopy\n",
    "    attn = MultiHeadedAttention(head, d_model)\n",
    "    ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n",
    "    position = PositionalEncoding(d_model, dropout)\n",
    "    model = EncoderDecoder(\n",
    "    Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), \n",
    "    Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), \n",
    "    nn.Sequential(Embeddings(d_model, source_vocab), c(position)),\n",
    "    nn.Sequential(Embeddings(d_model, target_vocab), c(position)),\n",
    "    Generator(d_model, target_vocab))\n",
    "\n",
    "    for p in model.parameters():\n",
    "        if p.dim()>1:\n",
    "            nn.init.xavier_uniform(p)\n",
    "    return model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "EncoderDecoder(\n",
      "  (encoder): Encoder(\n",
      "    (layers): ModuleList(\n",
      "      (0-5): 6 x EncoderLayer(\n",
      "        (self_attn): MultiHeadedAttention(\n",
      "          (linears): ModuleList(\n",
      "            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)\n",
      "          )\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (feed_forward): PositionwiseFeedForward(\n",
      "          (w1): Linear(in_features=512, out_features=64, bias=True)\n",
      "          (w2): Linear(in_features=64, out_features=512, bias=True)\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (sublayer): ModuleList(\n",
      "          (0-1): 2 x SublayerConnection(\n",
      "            (norm): LayerNorm()\n",
      "            (dropout): Dropout(p=0.1, inplace=False)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (norm): LayerNorm()\n",
      "  )\n",
      "  (decoder): Decoder(\n",
      "    (layers): ModuleList(\n",
      "      (0-5): 6 x DecoderLayer(\n",
      "        (self_attn): MultiHeadedAttention(\n",
      "          (linears): ModuleList(\n",
      "            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)\n",
      "          )\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (src_attn): MultiHeadedAttention(\n",
      "          (linears): ModuleList(\n",
      "            (0-3): 4 x Linear(in_features=512, out_features=512, bias=True)\n",
      "          )\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (feed_forward): PositionwiseFeedForward(\n",
      "          (w1): Linear(in_features=512, out_features=64, bias=True)\n",
      "          (w2): Linear(in_features=64, out_features=512, bias=True)\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (sublayer): ModuleList(\n",
      "          (0-2): 3 x SublayerConnection(\n",
      "            (norm): LayerNorm()\n",
      "            (dropout): Dropout(p=0.1, inplace=False)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (norm): LayerNorm()\n",
      "  )\n",
      "  (src_embed): Sequential(\n",
      "    (0): Embeddings(\n",
      "      (lut): Embedding(11, 512)\n",
      "    )\n",
      "    (1): PositionalEncoding(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "    )\n",
      "  )\n",
      "  (tgt_embed): Sequential(\n",
      "    (0): Embeddings(\n",
      "      (lut): Embedding(11, 512)\n",
      "    )\n",
      "    (1): PositionalEncoding(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "    )\n",
      "  )\n",
      "  (generator): Generator(\n",
      "    (project): Linear(in_features=512, out_features=11, bias=True)\n",
      "  )\n",
      ")\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_5348\\1771242440.py:15: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n",
      "  nn.init.xavier_uniform(p)\n"
     ]
    }
   ],
   "source": [
    "source_vocab = 11\n",
    "target_vocab = 11\n",
    "N = 6\n",
    "res = make_transformer_model(source_vocab, target_vocab, N)\n",
    "print(res)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 繁荣的transformer 家族\n",
    "\n",
    "https://arxiv.org/pdf/2304.13712.pdf\n",
    "\n",
    "\n",
    "* 深入浅出Prompt Learning要旨及常用方法\n",
    "* llm lora 大模型微调"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<img src=\"47.png\"/>"
      ],
      "text/plain": [
       "<IPython.core.display.Image object>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Image(url= \"47.png\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
