{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [],
   "id": "4017e78d18723b73"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [],
   "id": "fec4acd90384ab71"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-17T12:10:57.988407Z",
     "start_time": "2024-11-17T12:10:53.799495Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import math\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch import nn, optim\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# device='cuda'\n",
    "# epochs=100\n",
    "#\n",
    "# #Transformer Paramters\n",
    "# d_model=512 #Embedding Size Token embedding和position编码维度\n",
    "# d_ff=2048   #FeedForward dimension  两次线形层中的隐藏层 512->2048->512 线形层用来做特征提取\n",
    "# d_k=d_v=64  #dimension of K=Q,V Q,K维度需要相同  QK需要做点积\n",
    "# n_layers=6  #number of Encoder of Decoder Layer\n",
    "# n_heads=8   #number of heads in Multi-Head Attention\n",
    "#\n",
    "# #S:Symbol that shows starting of decoding input\n",
    "# #E:Symbol that shows starting of decoding output\n",
    "# #P:Symbol that will fill in blank sequence if current batch data size is short than time steps\n",
    "# sentences=[\n",
    "#     #enc_input                 #dec_input          #dec_output\n",
    "#     ['ich mochte ein bier P','S i want a beer  .','i want a beer  . E'],\n",
    "#     ['ich mochte ein cola P','S i want a coke  .','i want a coke  . E']\n",
    "# ]\n",
    "#\n",
    "# #德语和英语的单词要分开建立单词库\n",
    "# #padding should be zero\n",
    "# src_vocab={'P':0,'ich':1,'mochte':2,'ein':3,'bier':4,'cola':5}\n",
    "# src_idx2word={i: w for i,w in enumerate(src_vocab)}\n",
    "# src_vocab_size = len(src_vocab)\n",
    "#\n",
    "# tgt_vocab={'P':0,'i':1,'want':2,'a':3,'beer':4,'coke':5,'S':6,'E':7,'.':8}\n",
    "# idx2word={i: w for i,w in enumerate(tgt_vocab)}\n",
    "# tgt_vocab_size = len(tgt_vocab)\n",
    "#\n",
    "# src_len=5 #enc_input max sequence length\n",
    "# tgt_leb=6 #dec_input max sequence length\n",
    "\n",
    "class Embeddings(nn.Module):\n",
    "    '''\n",
    "    d_model: 词向量维度\n",
    "    vocab:当前语言词表大小\n",
    "    '''\n",
    "    def __init__(self,d_model,vocab):\n",
    "        super(Embeddings,self).__init__()\n",
    "        #调用nn.Embedding预定义层 获得实例化词嵌入对象self.lut\n",
    "        self.lut=nn.Embedding(vocab,d_model)\n",
    "        self.d_model=d_model  #表示词向量维度\n",
    "\n",
    "    def forward(self,x):\n",
    "        '''\n",
    "        :param x: 输入给模型的单词文本通过此表映射后的one-hot向量\n",
    "        x传给self.lut 得到形状（batch_size sequencr_length d_model)张量与self.d_model相乘\n",
    "        以保持不同维度间方差一致性 及在训练过程中稳定梯度\n",
    "        :return:\n",
    "        '''\n",
    "        return self.lut(x)*math.sqrt(self.d_model)\n",
    "\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    '''实现Positional Encoding功能'''\n",
    "    def __init__(self,d_model,dropout=0.1,max_len=5000):\n",
    "        '''\n",
    "        位置编码器的初始化函数\n",
    "        :param d_model: 词向量的维度 与输入序列特征维度相同 512\n",
    "        :param dropout:  置零比率\n",
    "        :param max_len: 句子最大长度 5000\n",
    "        '''\n",
    "        super(PositionalEncoding,self).__init__()\n",
    "        #初始化一个nn.Dropout层\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "\n",
    "        #初始化一个位置编码矩阵\n",
    "        #（5000，512）矩阵 保持每个位置的位置编码 一共5000个位置 每个位置用一个512维向量表示其位置\n",
    "        pe=torch.zeros(max_len,d_model)\n",
    "\n",
    "        #偶数和奇数在公式上有一个共同的部分 使用log函数把次方拿下来 方便计算\n",
    "        #position表示的是在字词在句中的索引\n",
    "        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        #计算用于控制正余弦的系数 确保不同频率成分在d_model维空间内均匀分布\n",
    "        div_term = torch.exp(torch.arange(0, d_model,2).float() * (-math.log(1000.0)/d_model))\n",
    "        #根据位置和div_term计算正余弦值 分布赋值给pe偶数列和奇数列\n",
    "        pe[:,0::2] = torch.sin(position*div_term)#从0开始至末尾 步长为2\n",
    "        pe[:,1::2] = torch.cos(position*div_term)#从1开始至末尾 步长为2\n",
    "        #pe[max_len*d_model]\n",
    "        pe=pe.unsqueeze(0)\n",
    "        #增加一维 pe[1*max_len*d_model] 为适应batch_size （5000，512）->(1,5000,512)\n",
    "        #将计算好的位置编码矩阵注册为模块缓冲区buffer 意味着塔将成为模块一部分并随模型保存与加载 并不会被视为参数参与反向传播\n",
    "        self.register_buffer('pe',pe)\n",
    "\n",
    "    def forward(self,x):\n",
    "        '''\n",
    "        :param x: [seq_len,batch_size,d_model] 经过词向量输入\n",
    "        '''\n",
    "        x=x+self.pe[:,:x.size(1)].clone().detach() #经过词向量的输入与位置编码相加\n",
    "        #Dropout层会按照随机比例置零一部分位置编码与词向量相加后的元素 以此达到正则化 防止过拟合\n",
    "        return self.dropout(x)\n",
    "\n",
    "class ScaledDotProductAttention(nn.Module):\n",
    "    def __init__(self,scale_factor,dropout=0.0):\n",
    "        super().__init__()\n",
    "        self.scale_factor=scale_factor\n",
    "        #dropout用于防止过拟合\n",
    "        self.dropout=nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self,q,k,v,mask=None):\n",
    "        '''\n",
    "        batch_size:批量大小 len_q,len_v,len_k:序列长度 此处皆相等  n_head:多头注意力\n",
    "        d_k,d_v:k,v的dim默认为64\n",
    "        q,k,v shape为(batch_size,n_head,len_q,d_q/k/v)(batch_size,8,len_q/k/v,64)\n",
    "        q先除以self,scale_factor 再乘以k的转置(交换最后两个维度)\n",
    "        attn的shape为(batch_size,n_head,len_q,len_k)\n",
    "        '''\n",
    "        attn=torch.matmul(q/self.scale_factor,k.transpose(2,3))  # 最后两个维度做矩阵乘\n",
    "\n",
    "        if mask is not None:\n",
    "            '''\n",
    "            用-1e9代替0  其经过softmax后接近0\n",
    "            去除掉padding在训练过程中的影响 将输入进行遮盖 避免decoder看到后面要预测的东西\n",
    "            '''\n",
    "            attn=attn.masked_fill(mask==0,-1e9)\n",
    "\n",
    "        #先将attn最后一个维度做softmax再dropout得到注意力分数\n",
    "        attn=self.dropout(torch.softmax(attn,dim=-1))\n",
    "        #最后与attn与v相乘 attn shape为（batch_size,8,len_q,64)\n",
    "        output=torch.matmul(attn,v)\n",
    "        return output,attn\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    #def __init__(self,n_head,d_model,d_k,d_v,dropout=0.1):\n",
    "    def __init__(self):\n",
    "        #论文中n_head d_model d_k d_v默认为8，512，64，64\n",
    "        #q.k,v先经过不同的线形层 再用ScaledDotProductAttention 最后再经过一个线性层\n",
    "        super().__init__()\n",
    "\n",
    "        self.n_head=n_head\n",
    "        self.d_k=d_k\n",
    "        self.d_v=d_v\n",
    "        dropout=0.1\n",
    "        \n",
    "        # Q * t(K)，Q与K的转置做矩阵乘，因此Q的最后一维的维度和K的最后一维的维度肯定要相等，此处都用d_k表示\n",
    "        self.w_qs=nn.Linear(d_model,n_head*d_k,bias=False)  # emb_out * wq = Q，只是最后两个维度做矩阵乘，即 [batch_size, seq_len, d_model] * [batch_size, d_model, d_model] = [batch_size, seq_len, d_model]\n",
    "        self.w_ks=nn.Linear(d_model,n_head*d_k,bias=False)\n",
    "        self.w_vs=nn.Linear(d_model,n_head*d_v,bias=False)\n",
    "        self.fc=nn.Linear(n_head*d_v,d_model,bias=False)\n",
    "\n",
    "        self.attention=ScaledDotProductAttention(scale_factor=d_k**0.5)\n",
    "        self.dropout=nn.Dropout(dropout)\n",
    "        self.layer_norm=nn.LayerNorm(d_model,eps=1e-6)#默认最后一个维度初始化\n",
    "\n",
    "    def forward(self,q,k,v,mask=None):\n",
    "        #q,k,v初次输入为含位置信息的嵌入矩阵x 由于堆叠N次 后面的输入则是多头的输出\n",
    "        #q,k,v:batch_size*seq_num*d_model\n",
    "        d_k,d_v,n_head=self.d_k,self.d_v,self.n_head\n",
    "        #len_q,len_k,len_v为输入序列的长度\n",
    "        batch_size,len_q,len_k,len_v=q.size(0),q.size(1),k.size(1),v.size(1)\n",
    "        #用作残差连接\n",
    "        residual=q\n",
    "        #q k v分别经过一个线性层改变维度\n",
    "        #(batch_size len_q n_head*d_k)->(batch_size len_q n_head d_k)\n",
    "        q=self.layer_norm(q)\n",
    "        k=self.layer_norm(k)\n",
    "        v=self.layer_norm(v)\n",
    "\n",
    "        #与q k v相关矩阵相乘 得到相应矩阵q k v向量 d_model=n_head*d_k\n",
    "        q=self.w_qs(q).view(batch_size,len_q,n_head,d_k)\n",
    "        k=self.w_ks(k).view(batch_size,len_k,n_head,d_k)\n",
    "        v=self.w_vs(v).view(batch_size,len_v,n_head,d_v)\n",
    "\n",
    "        #交换维度做attention\n",
    "        #(batch_size len_q n_head d_k)->(batch_size n_head len_q d_k)\n",
    "        q,k,v=q.transpose(1,2),k.transpose(1,2),v.transpose(1,2)\n",
    "\n",
    "        if mask is not None:\n",
    "            #为head增加一个维度\n",
    "            mask=mask.unsqueeze(1)\n",
    "        q,attn=self.attention(q,k,v,mask=mask)\n",
    "        # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n",
    "        # (batch_size, 8, len_k, 64) => (batch_size, len_k, 8, 64) => (batch_size, len_k, 512)\n",
    "        q = q.transpose(1, 2).contiguous().view(batch_size, len_q, -1)\n",
    "        # 经过fc和dropout\n",
    "        q = self.dropout(self.fc(q))\n",
    "        # 残差连接 论文中的Add & Norm中的Add\n",
    "        q += residual\n",
    "        # 论文中的Add & Norm中的Norm\n",
    "        q = self.layer_norm(q)\n",
    "        # q的shape为(batch_size, len_q, 512)\n",
    "        # attn的shape为(batch_size, n_head, len_q, len_k)\n",
    "        return q, attn\n",
    "\n",
    "#Add&Layer normalization\n",
    "class LayerNorm(nn.Module):\n",
    "    def __init__(self,d_model,eps=1e-12):\n",
    "        super().__init__()\n",
    "        #初始化尺度参数gamma\n",
    "        self.gamma=nn.Parameter(torch.ones(d_model))\n",
    "        #初始化偏参数beta\n",
    "        self.beta=nn.Parameter(torch.zeros(d_model))\n",
    "        #设置小常数 防止除0\n",
    "        self.eps=eps\n",
    "\n",
    "    def forward(self,x):\n",
    "        #计算均值\n",
    "        mean=x.mean(-1,keepdim=True)\n",
    "        #计算方差 unbiased=False时 方差计算使用n而不是n-1做分母\n",
    "        var=x.var(-1,unbiased=False,keepdim=True)\n",
    "\n",
    "        #归一化计算\n",
    "        out=(x-mean)/torch.sqrt(var+self.eps)\n",
    "        out=self.gamma*out+self.beta\n",
    "\n",
    "#Feed Forward前馈神经网络\n",
    "# Transformer前馈神经网络： 在Transformer的编码器和解码器中，自注意力层之后紧跟着的是前馈神经网络（FFNN）。FFNN的主要作用是接收自注意力层的输出，并对其进行进一步的非线性变换，以捕获更复杂的特征和表示。\n",
    "# Transformer前馈神经网络两层结构： 包括两个线性变换，并在它们之间使用ReLU激活函数。 两个线性层的差异主要体现在它们的作用和维度变化上。\n",
    "\n",
    "# 第一层线性变换负责将输入映射到更高维度的空间，并引入非线性；而第二层线性变换则负责将输出映射回与输入相同的维度（或兼容的维度），通常不引入额外的非线性。\n",
    "# \n",
    "# 第一层线性变换：这是一个全连接层，它接收自注意力层的输出作为输入，并将其映射到一个更高维度的空间。这个步骤有助于模型学习更复杂的特征表示。\n",
    "# \n",
    "# 激活函数：在第一层全连接层之后，通常会应用一个非线性激活函数，如ReLU（Rectified Linear Unit）。ReLU函数帮助模型捕获非线性关系，提高模型的表达能力。\n",
    "# \n",
    "# 第二层线性变换：这也是一个全连接层，它将前一层的输出映射回与输入相同的维度（或与模型其他部分兼容的维度）。这一层通常没有非线性激活函数。\n",
    "class PoswiseFeedForwardNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(PoswiseFeedForwardNet, self).__init__()\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(d_model, d_ff, bias=False),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(d_ff, d_model, bias=False))\n",
    "\n",
    "    def forward(self, inputs):                             # inputs: [batch_size, seq_len, d_model]\n",
    "        residual = inputs\n",
    "        output = self.fc(inputs)\n",
    "        return nn.LayerNorm(d_model)(output + residual)   # [batch_size, seq_len, d_model]\n",
    "\n",
    "# seq_q: [batch_size, seq_len] ,seq_k: [batch_size, seq_len]\n",
    "def get_attn_pad_mask(seq_q, seq_k):\n",
    "    batch_size, len_q = seq_q.size()\n",
    "    batch_size, len_k = seq_k.size()\n",
    "    # eq(zero) is PAD token\n",
    "    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)  # batch_size x 1 x len_k(=len_q), one is masking\n",
    "    # 扩展成多维度\n",
    "    return pad_attn_mask.expand(batch_size, len_q, len_k)  # batch_size x len_q x len_k\n",
    "\n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(EncoderLayer, self).__init__()\n",
    "        self.enc_self_attn = MultiHeadAttention()                                     # 多头注意力机制\n",
    "        self.pos_ffn = PoswiseFeedForwardNet()                                        # 前馈神经网络\n",
    "\n",
    "    def forward(self, enc_inputs, enc_self_attn_mask):                                # enc_inputs: [batch_size, src_len, d_model]\n",
    "        #输入3个enc_inputs分别与W_q、W_k、W_v相乘得到Q、K、V                          # enc_self_attn_mask: [batch_size, src_len, src_len]\n",
    "        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs,  enc_self_attn_mask)      # enc_outputs: [batch_size, src_len, d_model],\n",
    "                                                               # attn: [batch_size, n_heads, src_len, src_len]\n",
    "        enc_outputs = self.pos_ffn(enc_outputs)                                       # enc_outputs: [batch_size, src_len, d_model]\n",
    "        return enc_outputs, attn\n",
    "\n",
    "\"\"\"\n",
    "编码器\n",
    "\"\"\"\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Encoder, self).__init__()\n",
    "\n",
    "        self.src_emb = nn.Embedding(src_vocab_size, d_model)                     # 把字转换为向量\n",
    "        self.pos_emb = PositionalEncoding(d_model)                               # 加入位置信息\n",
    "        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])\n",
    "\n",
    "    def forward(self, enc_inputs):                                               # enc_inputs: [batch_size, src_len]\n",
    "        # enc_inputs.shape=torch.Size([1, 5])\n",
    "        # src_emb.shape=torch.Size([1, 5, 512])\n",
    "        # enc_outputs.shape=torch.Size([1, 5, 512])\n",
    "        # enc_self_attn_mask.shape=torch.Size([1, 5, 5])\n",
    "        print(f'enc_inputs.shape={enc_inputs.shape}')\n",
    "        # 1. 中文字索引进行Embedding，转换成512维度的字向量\n",
    "        enc_outputs = self.src_emb(enc_inputs)                                   # enc_outputs: [batch_size, src_len, d_model]\n",
    "        print(f'src_emb.shape={enc_outputs.shape}')\n",
    "        # 2. 在字向量上面加上位置信息\n",
    "        enc_outputs = self.pos_emb(enc_outputs)                                  # enc_outputs: [batch_size, src_len, d_model]\n",
    "        print(f'enc_outputs.shape={enc_outputs.shape}')\n",
    "        # 3. Mask掉句子中的占位符号\n",
    "        enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs)           # enc_self_attn_mask: [batch_size, src_len, src_len]\n",
    "        print(f'enc_self_attn_mask.shape={enc_self_attn_mask.shape}')\n",
    "        enc_self_attns = []\n",
    "        # 4. 通过6层的encoder（上一层的输出作为下一层的输入）\n",
    "        for layer in self.layers:\n",
    "            enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)  # enc_outputs :   [batch_size, src_len, d_model],\n",
    "                                                                                 # enc_self_attn : [batch_size, n_heads, src_len, src_len]\n",
    "            enc_self_attns.append(enc_self_attn)\n",
    "        return enc_outputs, enc_self_attns\n",
    "\n",
    "def get_attn_subsequence_mask(seq):                               # seq: [batch_size, tgt_len]\n",
    "    attn_shape = [seq.size(0), seq.size(1), seq.size(1)]\n",
    "    subsequence_mask = np.triu(np.ones(attn_shape), k=1)          # 生成上三角矩阵,[batch_size, tgt_len, tgt_len]\n",
    "    subsequence_mask = torch.from_numpy(subsequence_mask).byte()  #  [batch_size, tgt_len, tgt_len]\n",
    "    return subsequence_mask\n",
    "\n",
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(DecoderLayer, self).__init__()\n",
    "        self.dec_self_attn = MultiHeadAttention()\n",
    "        self.dec_enc_attn = MultiHeadAttention()\n",
    "        self.pos_ffn = PoswiseFeedForwardNet()\n",
    "\n",
    "    def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask): # dec_inputs: [batch_size, tgt_len, d_model]\n",
    "                                                                                       # enc_outputs: [batch_size, src_len, d_model]\n",
    "                                                                                       # dec_self_attn_mask: [batch_size, tgt_len, tgt_len]\n",
    "                                                                                       # dec_enc_attn_mask: [batch_size, tgt_len, src_len]\n",
    "        dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs,\n",
    "                                                 dec_inputs, dec_self_attn_mask)   # dec_outputs: [batch_size, tgt_len, d_model]\n",
    "                                                                                   # dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len]\n",
    "        dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs,\n",
    "                                                enc_outputs, dec_enc_attn_mask)    # dec_outputs: [batch_size, tgt_len, d_model]\n",
    "                                                                                   # dec_enc_attn: [batch_size, h_heads, tgt_len, src_len]\n",
    "        dec_outputs = self.pos_ffn(dec_outputs)                                    # dec_outputs: [batch_size, tgt_len, d_model]\n",
    "        return dec_outputs, dec_self_attn, dec_enc_attn\n",
    "\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Decoder, self).__init__()\n",
    "        self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)\n",
    "        # self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model),freeze=True)\n",
    "        self.pos_emb =PositionalEncoding(d_model)\n",
    "        self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])\n",
    "\n",
    "    def forward(self, dec_inputs, enc_inputs, enc_outputs): # dec_inputs : [batch_size x target_len]\n",
    "        # 1. 英文字索引进行Embedding，转换成512维度的字向量，并在字向量上加上位置信息\n",
    "        dec_outputs = self.tgt_emb(dec_inputs)# + self.pos_emb(torch.LongTensor([[5,1,2,3,4]]))\n",
    "        dec_outputs = self.pos_emb(enc_outputs)\n",
    "        # 2. Mask掉句子中的占位符号\n",
    "        dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs)\n",
    "        dec_self_attn_subsequent_mask = get_attn_subsequence_mask(dec_inputs)\n",
    "        dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0)\n",
    "\n",
    "        dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs)\n",
    "\n",
    "        dec_self_attns, dec_enc_attns = [], []\n",
    "        # 3. 通过6层的decoder（上一层的输出作为下一层的输入）\n",
    "        for layer in self.layers:\n",
    "            dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)\n",
    "            dec_self_attns.append(dec_self_attn)\n",
    "            dec_enc_attns.append(dec_enc_attn)\n",
    "        return dec_outputs, dec_self_attns, dec_enc_attns\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Transformer, self).__init__()\n",
    "        # 编码器\n",
    "        self.encoder = Encoder()\n",
    "        # 解码器\n",
    "        self.decoder = Decoder()\n",
    "        # 解码器最后的分类器，分类器的输入d_model是解码层每个token的输出维度大小，需要将其转为词表大小，再计算softmax；计算哪个词出现的概率最大\n",
    "        self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False)\n",
    "\n",
    "    def forward(self, enc_inputs, dec_inputs):\n",
    "        #  Transformer的两个输入，一个是编码器的输入（源序列），一个是解码器的输入（目标序列）\n",
    "        # 其中，enc_inputs的大小应该是 [batch_size, src_len] ;  dec_inputs的大小应该是 [batch_size, dec_inputs]\n",
    "\n",
    "        \"\"\"\n",
    "        源数据输入到encoder之后得到 enc_outputs, enc_self_attns；\n",
    "        enc_outputs是需要传给decoder的矩阵，表示源数据的表示特征\n",
    "        enc_self_attns表示单词之间的相关性矩阵\n",
    "        \"\"\"\n",
    "        enc_outputs, enc_self_attns = self.encoder(enc_inputs)\n",
    "\n",
    "        \"\"\"\n",
    "        decoder的输入数据包括三部分：\n",
    "        1. encoder得到的表示特征enc_outputs、\n",
    "        2. 解码器的输入dec_inputs（目标序列）、\n",
    "        3. 以及enc_inputs\n",
    "        \"\"\"\n",
    "        dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs)\n",
    "\n",
    "        \"\"\"\n",
    "        将decoder的输出映射到词表大小，最后进行softmax输出即可\n",
    "        \"\"\"\n",
    "        dec_logits = self.projection(dec_outputs) # dec_logits : [batch_size x src_vocab_size x tgt_vocab_size]\n",
    "        return dec_logits.view(-1, dec_logits.size(-1)), enc_self_attns, dec_self_attns, dec_enc_attns\n",
    "\n",
    "\n",
    "def showgraph(attn):\n",
    "    attn = attn[-1].squeeze(0)[0]\n",
    "    attn = attn.squeeze(0).data.numpy()\n",
    "    fig = plt.figure(figsize=(n_head, n_head))  # [n_heads, n_heads]\n",
    "    ax = fig.add_subplot(1, 1, 1)\n",
    "    ax.matshow(attn, cmap='viridis')\n",
    "    ax.set_xticklabels([''] + sentences[0].split(), fontdict={'fontsize': 14}, rotation=90)\n",
    "    ax.set_yticklabels([''] + sentences[2].split(), fontdict={'fontsize': 14})\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def make_batch(sentences):\n",
    "    # 分词，得到一个一个的token：sentences[0].split()\n",
    "    # 查找词表，得到token对应的token_index：[src_vocab[n]\n",
    "    \n",
    "    # input_batch=[[1, 2, 3, 4, 0]]   # 即 enc_inputs\n",
    "    # output_batch=[[5, 1, 2, 3, 4]]\n",
    "    # target_batch=[[1, 2, 3, 4, 6]]  # 即 dec_inputs\n",
    "    # \n",
    "    # enc_inputs=tensor([[1, 2, 3, 4, 0]])\n",
    "    # dec_inputs=tensor([[5, 1, 2, 3, 4]])\n",
    "    input_batch = [[src_vocab[word] for word in sentences[0].split()]]\n",
    "    print(f'input_batch={input_batch}')\n",
    "    output_batch = [[tgt_vocab[word] for word in sentences[1].split()]]\n",
    "    print(f'output_batch={output_batch}')\n",
    "    target_batch = [[tgt_vocab[word] for word in sentences[2].split()]]\n",
    "    print(f'target_batch={target_batch}')\n",
    "    return torch.LongTensor(input_batch), torch.LongTensor(output_batch), torch.LongTensor(target_batch)\n",
    "\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    sentences = ['ich mochte ein bier P', 'S i want a beer', 'i want a beer E']\n",
    "\n",
    "    # Transformer Parameters\n",
    "    # Padding Should be Zero\n",
    "    src_vocab = {'P': 0, 'ich': 1, 'mochte': 2, 'ein': 3, 'bier': 4}\n",
    "    src_vocab_size = len(src_vocab)\n",
    "\n",
    "    tgt_vocab = {'P': 0, 'i': 1, 'want': 2, 'a': 3, 'beer': 4, 'S': 5, 'E': 6}\n",
    "    number_dict = {i: w for i, w in enumerate(tgt_vocab)}\n",
    "    tgt_vocab_size = len(tgt_vocab)\n",
    "\n",
    "    src_len = 5  # length of source\n",
    "    tgt_len = 5  # length of target\n",
    "\n",
    "    d_model = 512  # Embedding Size\n",
    "    d_ff = 2048  # FeedForward dimension\n",
    "    d_k = d_v = 64  # dimension of K(=Q), V\n",
    "    n_layers = 6  # number of Encoder of Decoder Layer\n",
    "    n_head = 8  # number of heads in Multi-Head Attention\n",
    "\n",
    "    model = Transformer()\n",
    "\n",
    "    criterion = nn.CrossEntropyLoss(ignore_index=0)\n",
    "    optimizer=optim.SGD(model.parameters(),lr=1e-3,momentum=0.99)\n",
    "\n",
    "    enc_inputs, dec_inputs, target_batch = make_batch(sentences)\n",
    "\n",
    "    for epoch in range(1):\n",
    "        optimizer.zero_grad()\n",
    "        outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_inputs, dec_inputs)\n",
    "        loss = criterion(outputs, target_batch.contiguous().view(-1))\n",
    "        # print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "    # Test\n",
    "    # enc_inputs=tensor([[1, 2, 3, 4, 0]])\n",
    "    # dec_inputs=tensor([[5, 1, 2, 3, 4]])\n",
    "    print(f'enc_inputs={enc_inputs}')\n",
    "    print(f'dec_inputs={dec_inputs}')\n",
    "    predict, _, _, _ = model(enc_inputs, dec_inputs)\n",
    "    predict = predict.data.max(1, keepdim=True)[1]\n",
    "    print(sentences[0], '->', [number_dict[n.item()] for n in predict.squeeze()])\n",
    "\n",
    "    # print('first head of last state enc_self_attns')\n",
    "    # showgraph(enc_self_attns)\n",
    "    # \n",
    "    # print('first head of last state dec_self_attns')\n",
    "    # showgraph(dec_self_attns)\n",
    "    # \n",
    "    # print('first head of last state dec_enc_attns')\n",
    "    # showgraph(dec_enc_attns)\n",
    "\n",
    "\n"
   ],
   "id": "b633a74660da1e6c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_batch=[[1, 2, 3, 4, 0]]\n",
      "output_batch=[[5, 1, 2, 3, 4]]\n",
      "target_batch=[[1, 2, 3, 4, 6]]\n",
      "enc_inputs.shape=torch.Size([1, 5])\n",
      "src_emb.shape=torch.Size([1, 5, 512])\n",
      "enc_outputs.shape=torch.Size([1, 5, 512])\n",
      "enc_self_attn_mask.shape=torch.Size([1, 5, 5])\n",
      "enc_inputs=tensor([[1, 2, 3, 4, 0]])\n",
      "dec_inputs=tensor([[5, 1, 2, 3, 4]])\n",
      "enc_inputs.shape=torch.Size([1, 5])\n",
      "src_emb.shape=torch.Size([1, 5, 512])\n",
      "enc_outputs.shape=torch.Size([1, 5, 512])\n",
      "enc_self_attn_mask.shape=torch.Size([1, 5, 5])\n",
      "ich mochte ein bier P -> ['i', 'i', 'i', 'i', 'i']\n"
     ]
    }
   ],
   "execution_count": 5
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
