{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math, re, random, torch, time, os, sys\n",
    "import numpy as np\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = ['cuda:0' if torch.cuda.is_available() else 'cpu'][0]\n",
    "# BERT Parameters\n",
    "max_len = 30 # 允许的句子最大长度:9+9+1+2=21\n",
    "batch_size = 10 # batch中的句子对个数,因为positive数最多为5,所以batch_size最大为10 # 必须是偶数, 否则make_batch陷入死循环\n",
    "max_words_pred = 5  # 最大标记预测长度 21*0.15=3.15\n",
    "n_layers = 12 # number of Encoder of Encoder Layer\n",
    "n_heads = 12 # number of heads in Multi-Head Attention\n",
    "d_model = 768 # Embedding Size\n",
    "d_ff = 4*d_model   # 4*d_model, FeedForward dimension\n",
    "d_k = d_v = 64  # dimension of K(=Q), V\n",
    "n_segments = 2 # ab两个句子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''1.数据预处理'''\n",
    "def pre_process(text):\n",
    "    # sentences 共有6个样本句子 \n",
    "    # 去除字符\n",
    "    sentences = re.sub(\"[.,!?\\\\-]\", '', text.lower()).split('\\n')  # filter '.', ',', '?', '!'\n",
    "    word_sequence = \" \".join(sentences).split()\n",
    "    word_list = []\n",
    "    '''\n",
    "    如果用list(set(word_sequence))来去重,得到的将是一个随机顺序的列表(因为set无序),\n",
    "    这样得到的字典不同,保存的上一次训练的模型很有可能在这一次不能用\n",
    "    (比如上一次的模型预测碰见i:0,love:1,就输出you:2,但这次模型you在字典3号位置,也就无法输出正确结果)\n",
    "    '''\n",
    "    for word in word_sequence:\n",
    "        if word not in word_list:\n",
    "            word_list.append(word)\n",
    "    word_dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[MASK]': 3}\n",
    "    # word_dict前4个均为特殊字符\n",
    "    for i, w in enumerate(word_list):\n",
    "        word_dict[w] = i + 4\n",
    "    number_dict = {i:w for i,w in enumerate(word_dict)}\n",
    "    vocab_size = len(word_dict)\n",
    "\n",
    "    # 二维列表\n",
    "    sentences_list = []\n",
    "    for sentence in sentences:\n",
    "        arr = [word_dict[s] for s in sentence.split()]\n",
    "        sentences_list.append(arr) # 用序号token来替代单词\n",
    "    print(word_dict)\n",
    "    return sentences, word_list, word_dict, number_dict, vocab_size, sentences_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[MASK]': 3, 'hello': 4, 'how': 5, 'are': 6, 'you': 7, 'i': 8, 'am': 9, 'romeo': 10, 'my': 11, 'name': 12, 'is': 13, 'juliet': 14, 'nice': 15, 'to': 16, 'meet': 17, 'too': 18, 'today': 19, 'great': 20, 'baseball': 21, 'team': 22, 'won': 23, 'the': 24, 'competition': 25, 'oh': 26, 'congratulations': 27, 'thanks': 28}\n",
      "sentences ['hello how are you i am romeo', 'hello romeo my name is juliet nice to meet you', 'nice meet you too how are you today', 'great my baseball team won the competition', 'oh congratulations juliet', 'thanks you romeo']\n",
      "word_list ['hello', 'how', 'are', 'you', 'i', 'am', 'romeo', 'my', 'name', 'is', 'juliet', 'nice', 'to', 'meet', 'too', 'today', 'great', 'baseball', 'team', 'won', 'the', 'competition', 'oh', 'congratulations', 'thanks']\n",
      "word_dict {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[MASK]': 3, 'hello': 4, 'how': 5, 'are': 6, 'you': 7, 'i': 8, 'am': 9, 'romeo': 10, 'my': 11, 'name': 12, 'is': 13, 'juliet': 14, 'nice': 15, 'to': 16, 'meet': 17, 'too': 18, 'today': 19, 'great': 20, 'baseball': 21, 'team': 22, 'won': 23, 'the': 24, 'competition': 25, 'oh': 26, 'congratulations': 27, 'thanks': 28}\n",
      "number_dict {0: '[PAD]', 1: '[CLS]', 2: '[SEP]', 3: '[MASK]', 4: 'hello', 5: 'how', 6: 'are', 7: 'you', 8: 'i', 9: 'am', 10: 'romeo', 11: 'my', 12: 'name', 13: 'is', 14: 'juliet', 15: 'nice', 16: 'to', 17: 'meet', 18: 'too', 19: 'today', 20: 'great', 21: 'baseball', 22: 'team', 23: 'won', 24: 'the', 25: 'competition', 26: 'oh', 27: 'congratulations', 28: 'thanks'}\n",
      "vocab_size 29\n",
      "sentences_list [[4, 5, 6, 7, 8, 9, 10], [4, 10, 11, 12, 13, 14, 15, 16, 17, 7], [15, 17, 7, 18, 5, 6, 7, 19], [20, 11, 21, 22, 23, 24, 25], [26, 27, 14], [28, 7, 10]]\n"
     ]
    }
   ],
   "source": [
    "text = (\n",
    "    'Hello, how are you? I am Romeo.\\n'\n",
    "    'Hello, Romeo My name is Juliet. Nice to meet you.\\n'\n",
    "    'Nice meet you too. How are you today?\\n'\n",
    "    'Great. My baseball team won the competition.\\n'\n",
    "    'Oh Congratulations, Juliet\\n'\n",
    "    'Thanks you Romeo'\n",
    ")\n",
    "sentences, word_list, word_dict, number_dict, vocab_size, sentences_list = pre_process(text)\n",
    "print(\"sentences\", sentences)\n",
    "print(\"word_list\", word_list)\n",
    "print(\"word_dict\", word_dict)\n",
    "print(\"number_dict\", number_dict)\n",
    "print(\"vocab_size\", vocab_size)\n",
    "print(\"sentences_list\", sentences_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''根据句子数据,构建词元的输入向量'''\n",
    "def make_batch():\n",
    "    batch = []\n",
    "    positive = negative = 0\n",
    "    a_b = []\n",
    "    '''由于随机选取句子,所以可能依照pt文件中已训练的模型也误差较大'''\n",
    "    while positive != batch_size/2 or negative != batch_size/2:\n",
    "        \"\"\"random.randrange(start, stop=None, step=1) \n",
    "        在(start, stop)范围内随即返回一个数\n",
    "        Args:\n",
    "            start: 指定范围内的开始值,包含在范围内\n",
    "            stop: 指定范围内的结束值,不包含在范围内,若无参数stop则默认范围为0~start\n",
    "            step: 指定递增基数,默认为1\n",
    "        Examples:\n",
    "            >>> random.randrange(1, 100, 2) \n",
    "            67 # 以2递增,只能返回奇数\n",
    "        \"\"\"\n",
    "        # 在样本集中随机选取a和b两个句子\n",
    "        sentence_a_index, sentence_b_index = random.randrange(len(sentences)), random.randrange(len(sentences)) \n",
    "        # sentence_ab是包含一个句子中所含单词的序号列表\n",
    "        sentence_a, sentence_b = sentences_list[sentence_a_index], sentences_list[sentence_b_index]\n",
    "\n",
    "        if (sentence_a_index, sentence_b_index) not in a_b: # 屏蔽已经选择过的\n",
    "            a_b.append((sentence_a_index, sentence_b_index))\n",
    "        elif len(a_b) < batch_size: # 达到要求的样本值\n",
    "            continue\n",
    "        else:\n",
    "            break\n",
    "        # 加入分类符和分隔符\n",
    "        input_ids = [word_dict['[CLS]']] + sentence_a + [word_dict['[SEP]']] + sentence_b + [word_dict['[SEP]']] \n",
    "        # segment_ids用来标识a和b两个句子\n",
    "        segment_ids = [0] * (1 + len(sentence_a) + 1) + [1] * (len(sentence_b) + 1)\n",
    "\n",
    "        # MASK LM \n",
    "        # 把句子15%单词准备用于mask \n",
    "        # max_words_pred(=5): 最大预测单词数  n_pred: 预测个数\n",
    "        n_pred = int(0.15 * len(input_ids))\n",
    "        n_pred = min(max_words_pred, max(1, n_pred)) # 预测个数不能为0,也不能大于最大预测单词数\n",
    "        # 构建候选词列表(随机打乱后只有前n_pred个会被选择)\n",
    "        # 要屏蔽cls和seq,所以不能用torch.arange(len(input_ids))\n",
    "        candidate_mask_tokens = [i for i, token in enumerate(input_ids) if token != word_dict['[CLS]']\n",
    "                               and token != word_dict['[SEP]']] # [1,2,3......](不包含0([CLS])和[SEP])\n",
    "        # 随机打乱candidate_mask_tokens\n",
    "        random.shuffle(candidate_mask_tokens)\n",
    "\n",
    "        # random.shuffle打乱候选表后,直接选择前n_pred个 \n",
    "        masked_tokens, masked_pos = [], []\n",
    "        for pos in candidate_mask_tokens[:n_pred]:\n",
    "            # 不管是被mask还是被随机还是不变,都会加入masked列表(masked中的不会被替换,替换的是input_ids)\n",
    "            # 记录原始单词,用于和模型输出比对计算loss\n",
    "            masked_pos.append(pos) \n",
    "            masked_tokens.append(input_ids[pos])\n",
    "            if random.random() < 0.8:  # 80%  -> MASK\n",
    "                input_ids[pos] = word_dict['[MASK]'] # mask\n",
    "            elif random.random() < 0.5:  # 10% -> random\n",
    "                random_index = random.randint(0, vocab_size - 1) # random index in vocabulary\n",
    "                input_ids[pos] = word_dict[number_dict[random_index]] # replace\n",
    "    \n",
    "        '''把所有存储单词的变量都填充至最大长度,有利于统一处理.'''\n",
    "        # Zero Paddings\n",
    "        n_pad = max_len - len(input_ids) # max_len: 允许的句子最大长度\n",
    "        input_ids.extend([0] * n_pad) # word_dict['Pad'] = 0\n",
    "        segment_ids.extend([0] * n_pad)\n",
    "\n",
    "        # Zero Padding (100% - 15%) tokens\n",
    "        if max_words_pred > n_pred: # max_words_pred(=5): 最大预测长度\n",
    "            n_pad = max_words_pred - n_pred\n",
    "            masked_tokens.extend([0] * n_pad) \n",
    "            masked_pos.extend([0] * n_pad)\n",
    "\n",
    "        # 判断句间关系\n",
    "        if sentence_a_index + 1 == sentence_b_index and positive < batch_size/2: # 论文要求严格保持50% 50%\n",
    "            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, True]) # IsNext\n",
    "            positive += 1\n",
    "        elif sentence_a_index + 1 != sentence_b_index and negative < batch_size/2: # 论文要求严格保持50% 50%\n",
    "            batch.append([input_ids, segment_ids, masked_tokens, masked_pos, False]) # NotNext\n",
    "            negative += 1\n",
    "    # batch: [6,5]\n",
    "    return batch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[1, 26, 3, 14, 2, 20, 11, 21, 22, 23, 24, 25, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [27, 0, 0, 0, 0], [2, 0, 0, 0, 0], False], [[1, 4, 3, 6, 7, 8, 9, 10, 2, 26, 27, 14, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 0, 0, 0, 0], [2, 0, 0, 0, 0], False], [[1, 26, 27, 3, 2, 4, 5, 6, 7, 8, 9, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [14, 0, 0, 0, 0], [3, 0, 0, 0, 0], False], [[1, 4, 10, 11, 12, 13, 14, 15, 16, 17, 7, 2, 20, 23, 21, 22, 3, 4, 25, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [23, 11, 24, 0, 0], [16, 13, 17, 0, 0], False], [[1, 3, 10, 3, 12, 13, 14, 15, 16, 17, 7, 2, 3, 5, 6, 7, 8, 9, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 11, 0, 0], [1, 12, 3, 0, 0], False], [[1, 15, 17, 7, 3, 5, 6, 7, 19, 2, 20, 11, 21, 22, 23, 24, 25, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [18, 7, 0, 0, 0], [4, 3, 0, 0, 0], True], [[1, 4, 5, 6, 7, 8, 9, 10, 2, 4, 3, 11, 3, 13, 14, 15, 16, 17, 7, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 7, 12, 0, 0], [10, 4, 12, 0, 0], True]]\n"
     ]
    }
   ],
   "source": [
    "batch = make_batch()\n",
    "print(batch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''论文所用激活函数,在BERT中优于RELU'''\n",
    "def gelu(x):\n",
    "    \"Implementation of the gelu activate_ation function by Hugging Face\"\n",
    "    # erf(x):计算x的误差函数,\n",
    "    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_attn_pad_mask(seq_q, seq_k): \n",
    "    '''mask大小和(len_q,len_k)一致,\n",
    "    是为了在点积注意力中,与torch.matmul(Q,K)的大小一致'''\n",
    "    # (seq_q, seq_k): (dec_inputs, enc_inputs) \n",
    "    # dec_inputs:[batch_size, tgt_len] # [1,5]\n",
    "    # enc_inputs:[batch_size, src_len] # [1,6]\n",
    "    batch_size, len_q = seq_q.size() # 1,5\n",
    "    batch_size, len_k = seq_k.size() # 1,6\n",
    "    \"\"\"Tensor.data.eq(element)\n",
    "    eq即equal,对Tensor中所有元素进行判断,和element相等即为True,否则为False,返回二值矩阵\n",
    "    Examples:\n",
    "        >>> tensor = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])\n",
    "        >>> tensor.data.eq(1) \n",
    "        tensor([[ True, False, False],\n",
    "                [False, False, False]])\n",
    "    \"\"\"\n",
    "    # eq(zero) is PAD token\n",
    "    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # 升维 enc: [1,6] -> [1,1,6]\n",
    "    # 矩阵扩充: enc: pad_attn_mask: [1,1,6] -> [1,5,6]\n",
    "    return pad_attn_mask.expand(batch_size, len_q, len_k) # batch_size, len_q, len_k\n",
    "\n",
    "'''Attention = Softmax(Q * K^T) * V '''\n",
    "def Scaled_Dot_Product_Attention(Q, K, V, attn_mask): \n",
    "    # Q_s: [batch_size, n_heads, len_q, d_k] # [1,8,5,64]\n",
    "    # K_s: [batch_size, n_heads, len_k, d_k] # [1,8,6,64]\n",
    "    # attn_mask: [batch_size, n_heads, len_q, len_k] # [1,8,5,6]\n",
    "\n",
    "    \"\"\"torch.matmul(Q, K)\n",
    "        torch.matmul是tensor的乘法,输入可以是高维的.\n",
    "        当输入是都是二维时,就是普通的矩阵乘法.\n",
    "        当输入有多维时,把多出的一维作为batch提出来,其他部分做矩阵乘法.\n",
    "        Exeamples:\n",
    "            >>> a = torch.ones(3,4)\n",
    "            >>> b = torch.ones(4,2)\n",
    "            >>> torch.matmul(a,b).shape\n",
    "            torch.Size([3,2])   \n",
    "\n",
    "            >>> a = torch.ones(5,3,4)\n",
    "            >>> b = torch.ones(4,2)\n",
    "            >>> torch.matmul(a,b).shape\n",
    "            torch.Size([5,3,2])\n",
    "\n",
    "            >>> a = torch.ones(2,5,3)\n",
    "            >>> b = torch.ones(1,3,4)\n",
    "            >>> torch.matmul(a,b).shape\n",
    "            torch.Size([2,5,4])\n",
    "        \"\"\"\n",
    "    # [1,8,5,64] * [1,8,64,6] -> [1,8,5,6]\n",
    "    # scores : [batch_size, n_heads, len_q, len_k]\n",
    "    scores = torch.matmul(Q, K.transpose(2,3)) / np.sqrt(d_k) # divided by scale\n",
    "\n",
    "    \"\"\"scores.masked_fill_(attn_mask, -1e9) \n",
    "    由于scores和attn_mask维度相同,根据attn_mask中的元素值,把和attn_mask中值为True的元素的\n",
    "    位置相同的scores元素的值赋为-1e9\n",
    "    \"\"\"\n",
    "    scores.masked_fill_(attn_mask, -1e9)\n",
    "\n",
    "    # 'P'的scores元素值为-1e9, softmax值即为0\n",
    "    softmax = nn.Softmax(dim=-1) # 求行的softmax\n",
    "    attn = softmax(scores) # [1,8,6,6]\n",
    "    # [1,8,6,6] * [1,8,6,64] -> [1,8,6,64]\n",
    "    context = torch.matmul(attn, V) # [1,8,6,64]\n",
    "    return context, attn\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    # dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask)\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.W_Q = nn.Linear(d_model, d_k*n_heads) # (512, 64*8) # d_q必等于d_k\n",
    "        self.W_K = nn.Linear(d_model, d_k*n_heads) # (512, 64*8) # 保持维度不变\n",
    "        self.W_V = nn.Linear(d_model, d_v*n_heads) # (512, 64*8)\n",
    "        self.linear = nn.Linear(n_heads*d_v, d_model)\n",
    "        self.layer_norm = nn.LayerNorm(d_model)\n",
    "\n",
    "    def forward(self, Q, K, V, attn_mask):\n",
    "        # dec_outputs: [batch_size, tgt_len, d_model] # [1,5,512]\n",
    "        # enc_outputs: [batch_size, src_len, d_model] # [1,6,512]\n",
    "        # dec_enc_attn_mask: [batch_size, tgt_len, src_len] # [1,5,6]\n",
    "        # q/k/v: [batch_size, len_q/k/v, d_model]\n",
    "        residual, batch_size = Q, len(Q)\n",
    "        '''用n_heads=8把512拆成64*8,在不改变计算成本的前提下,让各注意力头相互独立,更有利于学习到不同的特征'''\n",
    "        # Q_s: [batch_size, len_q, n_heads, d_q] # [1,5,8,64]\n",
    "        # new_Q_s: [batch_size, n_heads, len_q, d_q] # [1,8,5,64]\n",
    "        Q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1,2)  \n",
    "        # K_s: [batch_size, n_heads, len_k, d_k] # [1,8,6,64]\n",
    "        K_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1,2)  \n",
    "        # V_s: [batch_size, n_heads, len_k, d_v] # [1,8,6,64]\n",
    "        V_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1,2)  \n",
    "\n",
    "        # attn_mask : [1,5,6] -> [1,1,5,6] -> [1,8,5,6]\n",
    "        # attn_mask : [batch_size, n_heads, len_q, len_k]\n",
    "        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) \n",
    "\n",
    "        # context: [batch_size, n_heads, len_q, d_v]\n",
    "        # attn: [batch_size, n_heads, len_q(=len_k), len_k(=len_q)]\n",
    "        # context: [1,8,5,64] attn: [1,8,5,6]\n",
    "        context, attn = Scaled_Dot_Product_Attention(Q_s, K_s, V_s, attn_mask)\n",
    "        \"\"\"contiguous() 连续的\n",
    "        contiguous: view只能用在连续(contiguous)的变量上.\n",
    "        如果在view之前用了transpose, permute等,\n",
    "        需要用contiguous()来返回一个contiguous copy\n",
    "        \"\"\"\n",
    "        # context: [1,8,5,64] -> [1,5,512]\n",
    "        context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v)\n",
    "        # context: [1,5,512] -> [1,5,512]\n",
    "        output = self.linear(context)\n",
    "        \"\"\"nn.LayerNorm(output) 样本归一化\n",
    "        和对所有样本的某一特征进行归一化的BatchNorm不同,\n",
    "        LayerNorm是对每个样本进行归一化,而不是一个特征\n",
    "\n",
    "        Tips:\n",
    "            归一化Normalization和Standardization标准化区别:\n",
    "            Normalization(X[i]) = (X[i] - np.min(X)) / (np.max(X) - np.min(X))\n",
    "            Standardization(X[i]) = (X[i] - np.mean(X)) / np.var(X)\n",
    "        \"\"\"\n",
    "        output = self.layer_norm(output + residual)\n",
    "        return output, attn \n",
    "\n",
    "class Position_wise_Feed_Forward_Networks(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        '''输出层相当于1*1卷积层,也就是全连接层'''\n",
    "        \"\"\"nn.Conv1d\n",
    "        in_channels应该理解为嵌入向量维度,out_channels才是卷积核的个数(厚度)\n",
    "        \"\"\"\n",
    "        # 512 -> 2048\n",
    "        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n",
    "        # 2048 -> 512\n",
    "        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n",
    "        self.layer_norm = nn.LayerNorm(d_model)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # enc_outputs: [batch_size, source_len, d_model] # [1,6,512]\n",
    "        residual = inputs \n",
    "        relu = nn.ReLU()\n",
    "        # output: 512 -> 2048 [1,2048,6]\n",
    "        output = relu(self.conv1(inputs.transpose(1, 2)))\n",
    "        # output: 2048 -> 512 [1,6,512]\n",
    "        output = self.conv2(output).transpose(1, 2)\n",
    "        return self.layer_norm(output + residual)\n",
    "    \n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(EncoderLayer, self).__init__()\n",
    "        self.enc_attn = MultiHeadAttention()\n",
    "        self.pos_ffn = Position_wise_Feed_Forward_Networks()\n",
    "\n",
    "    def forward(self, enc_outputs, enc_attn_mask):\n",
    "        # enc_attn_mask: [1,6,6]\n",
    "        # enc_outputs to same Q,K,V\n",
    "        # enc_outputs: [batch_size, source_len, d_model] # [1, 6, 512]\n",
    "        enc_outputs, attn = self.enc_attn(enc_outputs, \\\n",
    "            enc_outputs, enc_outputs, enc_attn_mask) \n",
    "        # enc_outputs: [batch_size , len_q , d_model]\n",
    "        enc_outputs = self.pos_ffn(enc_outputs) \n",
    "        return enc_outputs, attn\n",
    "    \n",
    "'''论文图2所设计的输入encoder的嵌入向量'''\n",
    "class Embedding(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        # vocab_size: len(input_ids)\n",
    "        # vocab_size: 29  max_len: 30  n_segments: 2\n",
    "        self.tok_embed = nn.Embedding(vocab_size, d_model) # token embedding\n",
    "        self.pos_embed = nn.Embedding(max_len, d_model) # position embedding\n",
    "        self.seg_embed = nn.Embedding(n_segments, d_model) # segment(token type) embedding\n",
    "        self.norm = nn.LayerNorm(d_model) # 对每个样本的特征归一化\n",
    "    \n",
    "    def forward(self, x, seg): # embedding(input_ids, segment_ids) # [6,30] [6,30]\n",
    "        seq_len = x.size(1) # 30\n",
    "        # torch.arange(n): 生成0~n-1的一维张量,默认类型为int\n",
    "        # 加入序列信息,不用transformer的positional encoding\n",
    "        pos = torch.arange(seq_len, dtype=torch.long).to(device) # [0,1,2,3...seq-1]\n",
    "        '''\n",
    "        expand_as/expand拓展后,每一个(1,seq_len)都是同步变化的,不能单独修改某一个\n",
    "        单独修改某一个只能用repeat(重复倍数)\n",
    "        '''\n",
    "        pos = pos.unsqueeze(0).expand_as(x)  # (seq_len,) -> (batch_size, seq_len) [6,30]\n",
    "        '''\n",
    "        虽然tok_embed是(29,768),但embedding首参数是指单词类别,x虽然长度=30>29,\n",
    "        但是单词总类别依然肯定不大于29,所以tok_embed可以作为x的嵌入矩阵\n",
    "        seg_embed同理\n",
    "        '''\n",
    "        # [6,30,768], [6,30,768], [6,30,768], [6,30,768]\n",
    "        '''论文图2, Embedding: input = word + position + segment'''\n",
    "        embedding = self.tok_embed(x) + self.pos_embed(pos) + self.seg_embed(seg)\n",
    "        return self.norm(embedding)\n",
    "    \n",
    "'''2.构建模型'''\n",
    "class BERT(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.embedding = Embedding()\n",
    "        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])\n",
    "        self.fc = nn.Linear(d_model, d_model)\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "        self.classifier = nn.Linear(d_model, 2)\n",
    "        self.activate_1 = gelu\n",
    "        self.activate_2 = nn.Tanh()\n",
    "\n",
    "        # decoder is shared with embedding layer\n",
    "        token_embed_weight = self.embedding.tok_embed.weight\n",
    "        vocab_size, n_dim = token_embed_weight.size() # 29, 768\n",
    "        self.decoder = nn.Linear(n_dim, vocab_size, bias=False)\n",
    "        self.decoder.weight = token_embed_weight\n",
    "        self.decoder_bias = nn.Parameter(torch.zeros(vocab_size))\n",
    "\n",
    "    def forward(self, input_ids, segment_ids, masked_pos):\n",
    "        \n",
    "        # 按照论文图2构建输入encoder的嵌入矩阵, Embedding: input = word + position + segment\n",
    "        output = self.embedding(input_ids, segment_ids).to(device)\n",
    "        # padding mask, 使其在softmax中为0而不影响注意力机制\n",
    "        enc_self_attn_mask = get_attn_pad_mask(input_ids, input_ids)\n",
    "\n",
    "        for layer in self.layers:\n",
    "            # output : [batch_size, max_len, d_model] [6,30,768]\n",
    "            # attn : [batch_size, n_heads, d_mode, d_model]\n",
    "            output, enc_self_attn = layer(output, enc_self_attn_mask)\n",
    "\n",
    "        '''Task1'''\n",
    "        # masked_pos ->  [batch_size, max_words_pred, d_model] [6,5,768]\n",
    "        # [:, :, None]在第三个维度增加一维 masked_pos: [6,5] -> [6,5,768]\n",
    "        masked_pos = masked_pos[:, :, None].expand(-1, -1, output.size(-1)) # expand(-1)表示不变\n",
    "        # 从最终输出output中提取mask信息\n",
    "        \"\"\"torch.gather() 收集输入的特定维度指定位置的数值\n",
    "        Args:\n",
    "            input(tensor): 待操作数.不妨设其维度为(x1, x2, ···, xn)\n",
    "            dim(int): 待操作的维度\n",
    "            index(LongTensor): 如何对input进行操作\n",
    "        \"\"\"\n",
    "        # predict_masked: [batch_size, max_words_pred, d_model] # [6,5,768]\n",
    "        # output:[6,30,768] -> predict_masked:[6,5,768]\n",
    "        predict_masked = torch.gather(output, 1, masked_pos) \n",
    "        predict_masked = self.norm(self.activate_1(self.fc(predict_masked))) # activate_1: gelu\n",
    "        # (self.decoder:nn.Linear(768,29), self.decoder_bias: nn.Parameter(torch.zeros(29))) \n",
    "        # logits_lm: [batch_size, max_words_pred, vocab_size] [6,5,29]\n",
    "        '''所谓的语言模型就是vocab_size个单词的嵌入矩阵'''\n",
    "        logits_lm = self.decoder(predict_masked) + self.decoder_bias\n",
    "\n",
    "        '''Task2'''\n",
    "        '''只训练第一列? CLS能训练?   input是cls,但输出不是'''\n",
    "        # 从第一个CLS值得到结果\n",
    "        # output[:, 0]取output第一列的数据 [6,30,768] -> [6,768]\n",
    "        # h_pooled: [batch_size, d_model] [6,768]\n",
    "        h_pooled = self.activate_2(self.fc(output[:, 0])) # activate_2: Tanh\n",
    "        # 全连接分类 (classifier:768->2) \n",
    "        logits_clsf = self.classifier(h_pooled) # [batch_size, 2]\n",
    "        return logits_lm, logits_clsf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''2.构建模型'''\n",
    "model = BERT()\n",
    "model.to(device)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "'''lr如果是0.001,将会很快陷入局部收敛!'''\n",
    "optimizer = optim.AdamW(model.parameters(), lr=0.0001) # AdamW: 论文采用的Adam扩展版本\n",
    "\n",
    "if os.path.exists('model_param.pt') == True:\n",
    "    # 加载模型参数到模型结构\n",
    "    model.load_state_dict(torch.load('model_param.pt', map_location=device))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_ids tensor([[ 1, 26,  3, 14,  2, 20, 11, 21, 22, 23, 24, 25,  2,  0,  0,  0,  0,  0,\n",
      "          0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1,  4,  3,  6,  7,  8,  9, 10,  2, 26, 27, 14,  2,  0,  0,  0,  0,  0,\n",
      "          0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1, 26, 27,  3,  2,  4,  5,  6,  7,  8,  9, 10,  2,  0,  0,  0,  0,  0,\n",
      "          0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1,  4, 10, 11, 12, 13, 14, 15, 16, 17,  7,  2, 20, 23, 21, 22,  3,  4,\n",
      "         25,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1,  3, 10,  3, 12, 13, 14, 15, 16, 17,  7,  2,  3,  5,  6,  7,  8,  9,\n",
      "         10,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1, 15, 17,  7,  3,  5,  6,  7, 19,  2, 20, 11, 21, 22, 23, 24, 25,  2,\n",
      "          0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
      "        [ 1,  4,  5,  6,  7,  8,  9, 10,  2,  4,  3, 11,  3, 13, 14, 15, 16, 17,\n",
      "          7,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0]])\n",
      "segment_ids tensor([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,\n",
      "         0, 0, 0, 0, 0, 0]])\n",
      "masked_tokens tensor([[27,  0,  0,  0,  0],\n",
      "        [ 5,  0,  0,  0,  0],\n",
      "        [14,  0,  0,  0,  0],\n",
      "        [23, 11, 24,  0,  0],\n",
      "        [ 4,  4, 11,  0,  0],\n",
      "        [18,  7,  0,  0,  0],\n",
      "        [10,  7, 12,  0,  0]])\n",
      "masked_pos tensor([[ 2,  0,  0,  0,  0],\n",
      "        [ 2,  0,  0,  0,  0],\n",
      "        [ 3,  0,  0,  0,  0],\n",
      "        [16, 13, 17,  0,  0],\n",
      "        [ 1, 12,  3,  0,  0],\n",
      "        [ 4,  3,  0,  0,  0],\n",
      "        [10,  4, 12,  0,  0]])\n",
      "isNext tensor([0, 0, 0, 0, 0, 1, 1])\n"
     ]
    }
   ],
   "source": [
    "input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(*batch))\n",
    "print(\"input_ids\", input_ids)\n",
    "print(\"segment_ids\", segment_ids)\n",
    "print(\"masked_tokens\", masked_tokens)\n",
    "print(\"masked_pos\", masked_pos)\n",
    "print(\"isNext\", isNext)\n",
    "input_ids, segment_ids, masked_tokens, masked_pos, isNext = \\\n",
    "    input_ids.to(device), segment_ids.to(device), masked_tokens.to(device), masked_pos.to(device), isNext.to(device)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "******************************\n",
      "Train\n",
      "******************************\n",
      "Epoch: 0010 Loss = 9.208880\n",
      "Epoch: 0020 Loss = 4.755416\n",
      "Epoch: 0030 Loss = 2.422399\n",
      "Epoch: 0040 Loss = 1.290673\n",
      "Epoch: 0050 Loss = 0.714696\n",
      "Epoch: 0060 Loss = 0.529201\n",
      "Epoch: 0070 Loss = 0.370062\n",
      "Epoch: 0080 Loss = 0.092607\n",
      "Epoch: 0090 Loss = 1.020612\n",
      "Epoch: 0100 Loss = 0.653910\n",
      "Epoch: 0110 Loss = 0.573958\n",
      "Epoch: 0120 Loss = 0.537888\n",
      "Epoch: 0130 Loss = 0.455684\n",
      "Epoch: 0140 Loss = 0.280406\n",
      "Epoch: 0150 Loss = 0.051323\n",
      "Epoch: 0160 Loss = 0.684589\n",
      "Epoch: 0170 Loss = 0.610207\n",
      "Epoch: 0180 Loss = 0.266591\n",
      "Epoch: 0190 Loss = 0.100869\n",
      "Epoch: 0200 Loss = 0.024835\n",
      "Epoch: 0210 Loss = 0.010223\n",
      "Epoch: 0220 Loss = 0.006277\n",
      "Epoch: 0230 Loss = 0.004083\n"
     ]
    }
   ],
   "source": [
    "'''3.训练'''\n",
    "print('{}\\nTrain\\n{}'.format('*'*30, '*'*30))   \n",
    "loss_record = []\n",
    "for epoch in range(1000):\n",
    "    optimizer.zero_grad()\n",
    "    logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)\n",
    "    # Task1: 对比完形填空准确率\n",
    "    # logits_lm:[6,5,29]->[6,29,5] masked_tokens:[6,5] \n",
    "    \"\"\"nn.CrossEntropyLoss()(input,target)\n",
    "    如果target是一维,则只要求input.size(0)=target.size\n",
    "    如果是多维,那么要求input.size(0)=target.size(0)且input.size(-1)=target.size(-1)\n",
    "    \"\"\"\n",
    "    loss_lm = criterion(logits_lm.transpose(1, 2), masked_tokens) \n",
    "    # Task2: 对比句间关系分类准确率\n",
    "    # logits_clsf:[6,2] isNext:[6,]\n",
    "    loss_clsf = criterion(logits_clsf, isNext) \n",
    "    loss = loss_lm + loss_clsf\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    if loss >= 0.01: # 连续30轮loss小于0.01则提前结束训练\n",
    "        loss_record = []\n",
    "    else:\n",
    "        loss_record.append(loss.item())\n",
    "        if len(loss_record) == 30:\n",
    "            torch.save(model.state_dict(), 'model_param.pt')\n",
    "            break            \n",
    "\n",
    "    if (epoch + 1) % 10 == 0:\n",
    "        print('Epoch:', '%04d' % (epoch + 1), 'Loss = {:.6f}'.format(loss))\n",
    "    if(epoch + 1) % 100 ==0:\n",
    "        torch.save(model.state_dict(), 'model_param.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "******************************\n",
      "Test\n",
      "******************************\n",
      "text:\n",
      "Hello, how are you? I am Romeo.\n",
      "Hello, Romeo My name is Juliet. Nice to meet you.\n",
      "Nice meet you too. How are you today?\n",
      "Great. My baseball team won the competition.\n",
      "Oh Congratulations, Juliet\n",
      "Thanks you Romeo\n",
      "******************************\n",
      "input_ids : ['[CLS]', '[MASK]', 'you', 'romeo', '[SEP]', 'oh', 'congratulations', 'juliet', '[SEP]']\n",
      "real masked tokens list : [(28, 'thanks')]\n",
      "predict masked tokens list : [(28, 'thanks')]\n",
      "Real isNext : False\n",
      "predict isNext : False\n",
      "******************************\n",
      "input_ids : ['[CLS]', 'thanks', 'you', 'romeo', '[SEP]', 'hello', '[MASK]', 'my', 'name', 'is', 'juliet', 'nice', 'to', '[MASK]', 'you', '[SEP]']\n",
      "real masked tokens list : [(17, 'meet'), (10, 'romeo')]\n",
      "predict masked tokens list : [(17, 'meet'), (10, 'romeo')]\n",
      "Real isNext : False\n",
      "predict isNext : False\n",
      "******************************\n",
      "input_ids : ['[CLS]', '[MASK]', 'how', 'are', 'you', 'i', 'am', 'romeo', '[SEP]', 'hello', '[MASK]', 'my', 'name', 'is', 'juliet', 'nice', '[MASK]', 'meet', 'you', '[SEP]']\n",
      "real masked tokens list : [(4, 'hello'), (16, 'to'), (10, 'romeo')]\n",
      "predict masked tokens list : [(4, 'hello'), (16, 'to'), (10, 'romeo')]\n",
      "Real isNext : True\n",
      "predict isNext : True\n",
      "******************************\n",
      "input_ids : ['[CLS]', 'oh', 'congratulations', 'juliet', '[SEP]', 'oh', 'congratulations', '[MASK]', '[SEP]']\n",
      "real masked tokens list : [(14, 'juliet')]\n",
      "predict masked tokens list : [(14, 'juliet')]\n",
      "Real isNext : False\n",
      "predict isNext : False\n",
      "******************************\n",
      "input_ids : ['[CLS]', 'nice', 'meet', 'you', 'am', 'how', 'are', 'you', 'today', '[SEP]', 'great', 'my', '[MASK]', 'team', 'won', 'the', 'competition', '[SEP]']\n",
      "real masked tokens list : [(21, 'baseball'), (18, 'too')]\n",
      "predict masked tokens list : [(21, 'baseball'), (18, 'too')]\n",
      "Real isNext : True\n",
      "predict isNext : True\n",
      "******************************\n",
      "input_ids : ['[CLS]', 'thanks', 'you', 'romeo', '[SEP]', 'nice', 'oh', 'you', 'too', 'how', '[MASK]', 'you', 'today', '[SEP]']\n",
      "real masked tokens list : [(6, 'are'), (17, 'meet')]\n",
      "predict masked tokens list : [(6, 'are'), (17, 'meet')]\n",
      "Real isNext : False\n",
      "predict isNext : False\n",
      "******************************\n",
      "input_ids : ['[CLS]', 'hello', 'romeo', '[MASK]', 'name', 'is', 'juliet', 'nice', 'to', 'meet', 'you', '[SEP]', '[MASK]', 'my', 'baseball', 'team', 'won', 'the', 'competition', '[SEP]']\n",
      "real masked tokens list : [(11, 'my'), (7, 'you'), (20, 'great')]\n",
      "predict masked tokens list : [(11, 'my'), (7, 'you'), (20, 'great')]\n",
      "Real isNext : False\n",
      "predict isNext : False\n"
     ]
    }
   ],
   "source": [
    "'''4.测试'''\n",
    "print('{}\\nTest\\n{}'.format('*'*30, '*'*30))\n",
    "print('text:\\n%s'%text)\n",
    "for i in range(len(batch)): # 遍历每个句子对\n",
    "    print('*'*30)\n",
    "    # Predict mask tokens ans isNext\n",
    "    input_ids, segment_ids, masked_tokens, masked_pos, isNext = map(torch.LongTensor, zip(batch[i]))\n",
    "    input_ids, segment_ids, masked_tokens, masked_pos, isNext = \\\n",
    "            input_ids.to(device), segment_ids.to(device), masked_tokens.to(device), masked_pos.to(device), isNext.to(device)\n",
    "\n",
    "    # 输出input_ids   # w.item() != 0 不是'PAD'\n",
    "    print('input_ids :', [number_dict[w.item()] for w in input_ids[0] if w.item() != 0]) \n",
    "\n",
    "    logits_lm, logits_clsf = model(input_ids, segment_ids, masked_pos)\n",
    "    logits_lm, logits_clsf = logits_lm.to('cpu'), logits_clsf.to('cpu')\n",
    "\n",
    "    # Task1\n",
    "    logits_lm = logits_lm.data.max(2)[1][0].data.numpy()\n",
    "    print('real masked tokens list :', \\\n",
    "        [(pos.item(), number_dict[pos.item()]) for pos in masked_tokens[0] if pos.item() != 0]) # 不是PAD\n",
    "    print('predict masked tokens list :', \\\n",
    "        [(pos, number_dict[pos]) for pos in logits_lm if pos != 0]) # 不是PAD\n",
    "\n",
    "    # Task2\n",
    "    logits_clsf = logits_clsf.data.max(1)[1].data.numpy()[0]\n",
    "    print('Real isNext :', bool(isNext))\n",
    "    print('predict isNext :', bool(logits_clsf))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
