{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a167bfab",
   "metadata": {},
   "source": [
    "## 读取词表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4238443f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(            token\n",
       " word             \n",
       " <PAD>           0\n",
       " <SOS>           1\n",
       " <EOS>           2\n",
       " <NUM>           3\n",
       " <UNK>           4\n",
       " ...           ...\n",
       " eastbound   14784\n",
       " clouds      14785\n",
       " repave      14786\n",
       " complained  14787\n",
       " dominate    14788\n",
       " \n",
       " [14789 rows x 1 columns],\n",
       "              word\n",
       " token            \n",
       " 0           <PAD>\n",
       " 1           <SOS>\n",
       " 2           <EOS>\n",
       " 3           <NUM>\n",
       " 4           <UNK>\n",
       " ...           ...\n",
       " 14784   eastbound\n",
       " 14785      clouds\n",
       " 14786      repave\n",
       " 14787  complained\n",
       " 14788    dominate\n",
       " \n",
       " [14789 rows x 1 columns])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/读取字典\n",
    "import pandas as pd\n",
    "\n",
    "vocab = pd.read_csv('data/msr_paraphrase_vocab.csv', index_col='word')\n",
    "vocab_r = pd.read_csv('data/msr_paraphrase_vocab.csv', index_col='token')\n",
    "\n",
    "vocab, vocab_r"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ffb14114",
   "metadata": {},
   "source": [
    "## 读取数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "66af0842",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5801,\n",
       " same                                                        1\n",
       " s1_lens                                                    16\n",
       " s2_lens                                                    17\n",
       " pad_lens                                                   39\n",
       " sent        1,11,12,13,14,15,16,17,18,19,20,21,22,13,23,2,...\n",
       " Name: 0, dtype: object)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义数据集\n",
    "import torch\n",
    "\n",
    "class MsrDataset(torch.utils.data.Dataset):\n",
    "    def __init__(self):\n",
    "        data = pd.read_csv('data/msr_paraphrase_data.csv')\n",
    "        self.data = data\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, i):\n",
    "        return self.data.iloc[i]\n",
    "\n",
    "dataset = MsrDataset()\n",
    "\n",
    "len(dataset), dataset[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "25f0f10e",
   "metadata": {},
   "source": [
    "## 数据处理函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "84198c6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/var/folders/0q/2y7xgmvn00b1090yx3d_n1140000gn/T/ipykernel_98896/1260793463.py:20: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  sent = [np.array(i.split(','), dtype=np.int) for i in sent]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(tensor([1, 0]),\n",
       " tensor([[ 1, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 13, 23,  2, 24, 25,\n",
       "          26, 27, 28, 18, 19, 11, 12, 13, 14, 20, 21, 22, 13, 23,  2,  0,  0,  0,\n",
       "           0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,\n",
       "           0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0],\n",
       "         [ 1, 29, 30, 31, 32, 33, 34, 18, 35, 25, 36, 37,  3, 38,  3,  3, 39,  2,\n",
       "          29, 40, 31, 32, 37,  3, 38,  3, 41, 42, 43, 44, 25, 36, 38,  3,  3, 39,\n",
       "          37,  3,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,\n",
       "           0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0]]),\n",
       " tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,\n",
       "          2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "         [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,\n",
       "          2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义数据整理函数\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "def collate_fn(data):\n",
    "    #取出数据\n",
    "    same = [i['same'] for i in data]\n",
    "    sent = [i['sent'] for i in data]\n",
    "    s1_lens = [i['s1_lens'] for i in data]\n",
    "    s2_lens = [i['s2_lens'] for i in data]\n",
    "    pad_lens = [i['pad_lens'] for i in data]\n",
    "\n",
    "    seg = []\n",
    "    for i in range(len(sent)):\n",
    "        #seg的形状和sent一样,但是内容不一样\n",
    "        #补PAD的位置是0,s1的位置是1,s2的位置是2\n",
    "        seg.append([1] * s1_lens[i] + [2] * s2_lens[i] + [0] * pad_lens[i])\n",
    "\n",
    "    #sent由字符型转换为list\n",
    "    sent = [np.array(i.split(','), dtype=np.int) for i in sent]\n",
    "\n",
    "    same = torch.LongTensor(same)\n",
    "    sent = torch.LongTensor(sent)\n",
    "    seg = torch.LongTensor(seg)\n",
    "\n",
    "    return same, sent, seg\n",
    "\n",
    "\n",
    "collate_fn([dataset[0], dataset[1]])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7585524e",
   "metadata": {},
   "source": [
    "## 数据加载器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "cc355fc4",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "181"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义数据加载器\n",
    "loader = torch.utils.data.DataLoader(dataset=dataset,\n",
    "                                    batch_size=32,\n",
    "                                    shuffle=True,\n",
    "                                    drop_last=True,\n",
    "                                    collate_fn=collate_fn)\n",
    "\n",
    "len(loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "804db849",
   "metadata": {},
   "source": [
    "## 查看数据样例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "45f2de87",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/var/folders/0q/2y7xgmvn00b1090yx3d_n1140000gn/T/ipykernel_98896/1260793463.py:20: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  sent = [np.array(i.split(','), dtype=np.int) for i in sent]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(tensor([0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
       "         1, 1, 0, 1, 1, 0, 1, 0]),\n",
       " torch.Size([32, 72]),\n",
       " torch.Size([32, 72]),\n",
       " tensor([   1,   18, 6362, 2741,   18,  604,   20, 1030,   42, 3651, 1300,  632,\n",
       "          936,  937,   25, 8542,  119,   18, 9609, 1531,  398, 2513, 9610,   42,\n",
       "          237,  398, 1432, 7704,    2,  183,   18, 4100,  374, 2741,   18,  604,\n",
       "           20, 1030,   42, 3651, 1300,   25, 8542,  119,   18, 9611, 1531,  398,\n",
       "         2513, 9610,    2,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "            0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0]),\n",
       " tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
       "         1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n",
       "         2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/查看数据样例\n",
    "for i, (same, sent, seg) in enumerate(loader):\n",
    "    break\n",
    "\n",
    "same, sent.shape, seg.shape, sent[0], seg[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "59c6404f",
   "metadata": {},
   "source": [
    "## 辅助函数"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bd5f2a34",
   "metadata": {},
   "source": [
    "### 随机替换函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "21ad1849",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([    5,     5,     5, 14455,     5,     5,     5,  3318,     5,     5,\n",
       "            5,     5,     5,     5,     5,     5,     5,     5,     5,     5,\n",
       "         3273, 13763,     5,     5,     5,     5,     5,     5,     5,     5,\n",
       "            5,     5,     5,     5,     5,     5,     5,     5,     5,     5,\n",
       "         4398,     5,     5,     5,     5,     5,  6569,    25,     5,   186,\n",
       "            5,    44,     5,     5,     5,     5,     5,     5,     5,     5,\n",
       "            5,  2416,     5,     5,     5,     5,  1211,     5,     5,     5,\n",
       "            5,     5,     5,     5,     5,     5, 14311,     5,    27,     5,\n",
       "         8277,     5,     5,     5,  9279,   970,     5,     5,     5,     5,\n",
       "            5,     5,     5, 10254, 11122,   740,     5,     5,     5,     5,\n",
       "            5,  1528,     5,     5,     5,     5,     5,     5,     5,     5,\n",
       "            5,     5,     5,    42,     5,    18,     5,     5,     5,     5,\n",
       "            5,     5,     5,     5,    37,     5,     5,     5,     5,     5,\n",
       "            5,  2402,     5,   568,     5,     5,     5,     5,     5,     5,\n",
       "         9695,     5,     5,     5,    25,     5,     5,  8170,     5,     5,\n",
       "            5,     5,    20,     5,     5,     5,     5,     5,  2886,  9809,\n",
       "            5,     5,     5,   474,     5,     5,     5,     5,     5,     5,\n",
       "            5,     5,     5,     5,     5,  7132, 10593,     5,     5,     5,\n",
       "            5,  3641,     5,     5,  4272,  4031,     5,     5,    25,     5])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义随机替换函数\n",
    "import random\n",
    "\n",
    "\n",
    "def random_replace(sent):\n",
    "    #sent = [b, 72]\n",
    "    #不影响原来的sent\n",
    "    sent = sent.clone()\n",
    "\n",
    "    #替换矩阵,形状和sent一样,被替换过的位置是True,其他位置是False\n",
    "    replace = sent == -1\n",
    "\n",
    "    #遍历所有的字\n",
    "    for i in range(len(sent)):\n",
    "        for j in range(len(sent[i])):\n",
    "            #如果是符号就不操作了,只替换字\n",
    "            if sent[i, j] <= 10:\n",
    "                continue\n",
    "\n",
    "            #0.15的概率做操作\n",
    "            if random.random() > 0.15:\n",
    "                continue\n",
    "\n",
    "            #被操作过的位置标记下,这里的操作包括什么也不做\n",
    "            replace[i, j] = True\n",
    "\n",
    "            #分概率做不同的操作\n",
    "            p = random.random()\n",
    "\n",
    "            #0.8的概率替换为mask\n",
    "            if p < 0.8:\n",
    "                sent[i, j] = vocab.loc['<MASK>'].token\n",
    "\n",
    "            #0.1的概率不替换\n",
    "            elif p < 0.9:\n",
    "                pass\n",
    "\n",
    "            #0.1的概率替换成随机字\n",
    "            else:\n",
    "                #随机一个不是符号的字\n",
    "                rand_word = 0\n",
    "                while rand_word <= 10:\n",
    "                    rand_word = random.randint(0, len(vocab) - 1)\n",
    "                sent[i, j] = rand_word\n",
    "\n",
    "    return sent, replace\n",
    "\n",
    "\n",
    "replace_sent, replace = random_replace(sent)\n",
    "\n",
    "replace_sent[replace]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "20cb6c40",
   "metadata": {},
   "source": [
    "### 获取MASK函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6d23d512",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([32, 72]),\n",
       " torch.Size([72, 72]),\n",
       " tensor([False, False, False, False, False, False, False, False, False, False,\n",
       "         False, False, False, False, False, False, False, False, False, False,\n",
       "         False, False, False, False, False, False, False, False, False, False,\n",
       "         False, False, False, False, False, False, False, False, False, False,\n",
       "         False, False, False, False, False, False, False, False, False, False,\n",
       "         False,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
       "          True,  True,  True,  True,  True,  True,  True,  True,  True,  True,\n",
       "          True,  True]),\n",
       " tensor([[False, False, False,  ..., False, False, False],\n",
       "         [False, False, False,  ..., False, False, False],\n",
       "         [False, False, False,  ..., False, False, False],\n",
       "         ...,\n",
       "         [False, False, False,  ..., False, False, False],\n",
       "         [False, False, False,  ..., False, False, False],\n",
       "         [False, False, False,  ..., False, False, False]]))"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义获取mask函数\n",
    "def get_mask(seg):\n",
    "    #key_padding_mask的定义方式为句子中是PAD的位置为True，否则是False\n",
    "    key_padding_mask = seg == 0\n",
    "\n",
    "    #在encode阶段不需要定义encode_attn_mask\n",
    "    #定义为None或者全False都可以\n",
    "    encode_attn_mask = torch.ones(72, 72) == -1\n",
    "\n",
    "    return key_padding_mask, encode_attn_mask\n",
    "\n",
    "\n",
    "key_padding_mask, encode_attn_mask = get_mask(seg)\n",
    "\n",
    "key_padding_mask.shape, encode_attn_mask.shape, key_padding_mask[\n",
    "    0], encode_attn_mask"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "767b5b3c",
   "metadata": {},
   "source": [
    "## BERT模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "569f02c5",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([32, 2]), torch.Size([32, 72, 14789]))"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义模型\n",
    "class BERTModel(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "        #定义词向量编码层\n",
    "        self.sent_embed = torch.nn.Embedding(num_embeddings=len(vocab),\n",
    "                                            embedding_dim=256)\n",
    "\n",
    "        #定义seg编码层\n",
    "        self.seg_embed = torch.nn.Embedding(num_embeddings=3,\n",
    "                                            embedding_dim=256)\n",
    "\n",
    "        #定义位置编码层\n",
    "        self.position_embed = torch.nn.Parameter(torch.randn(72, 256) / 10)\n",
    "\n",
    "        #定义编码层\n",
    "        encoder_layer = torch.nn.TransformerEncoderLayer(d_model=256,\n",
    "                                                        nhead=4,\n",
    "                                                        dim_feedforward=256,\n",
    "                                                        dropout=0.2,\n",
    "                                                        activation='relu',\n",
    "                                                        batch_first=True,\n",
    "                                                        norm_first=True)\n",
    "\n",
    "        #定义标准化层\n",
    "        norm = torch.nn.LayerNorm(normalized_shape=256,\n",
    "                                    elementwise_affine=True)\n",
    "\n",
    "        #定义编码器\n",
    "        self.encoder = torch.nn.TransformerEncoder(encoder_layer=encoder_layer,\n",
    "                                                    num_layers=3,\n",
    "                                                    norm=norm)\n",
    "\n",
    "        #定义same输出层\n",
    "        self.fc_same = torch.nn.Linear(in_features=256, out_features=2)\n",
    "\n",
    "        #定义sent输出层\n",
    "        self.fc_sent = torch.nn.Linear(in_features=256,\n",
    "                                        out_features=len(vocab))\n",
    "\n",
    "    def forward(self, sent, seg):\n",
    "        #sent -> [b, 72]\n",
    "        #seg -> [b, 72]\n",
    "\n",
    "        #获取mask\n",
    "        #[b, 72] -> [b, 72],[72, 72]\n",
    "        key_padding_mask, encode_attn_mask = get_mask(seg)\n",
    "\n",
    "        #编码,添加位置信息\n",
    "        #[b, 72] -> [b, 72, 256]\n",
    "        embed = self.sent_embed(sent) + self.seg_embed(\n",
    "            seg) + self.position_embed\n",
    "\n",
    "        #编码器计算\n",
    "        #[b, 72, 256] -> [b, 72, 256]\n",
    "        memory = self.encoder(src=embed,\n",
    "                                mask=encode_attn_mask,\n",
    "                                src_key_padding_mask=key_padding_mask)\n",
    "\n",
    "        #计算输出,same的输出使用第0个词的信息计算\n",
    "        #[b, 256] -> [b, 2]\n",
    "        same = self.fc_same(memory[:, 0])\n",
    "        #[b, 72, 256] -> [b, 72, V]\n",
    "        sent = self.fc_sent(memory)\n",
    "\n",
    "        return same, sent\n",
    "\n",
    "\n",
    "model = BERTModel()\n",
    "\n",
    "pred_same, pred_sent = model(sent, seg)\n",
    "\n",
    "pred_same.shape, pred_sent.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cddc031d",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2b205cfe",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/var/folders/0q/2y7xgmvn00b1090yx3d_n1140000gn/T/ipykernel_98896/1260793463.py:20: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  sent = [np.array(i.split(','), dtype=np.int) for i in sent]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 180 7.569920063018799 0.71875 0.04597701149425287\n",
      "5 180 7.676056385040283 0.78125 0.05945945945945946\n"
     ]
    }
   ],
   "source": [
    "#第14章/训练\n",
    "def train():\n",
    "    loss_func = torch.nn.CrossEntropyLoss()\n",
    "    optim = torch.optim.Adam(model.parameters(), lr=1e-4)\n",
    "    for epoch in range(10):\n",
    "        for i, (same, sent, seg) in enumerate(loader):\n",
    "            #same = [b]\n",
    "            #sent = [b, 72]\n",
    "            #seg = [b, 72]\n",
    "\n",
    "            #随机替换x中的某些字符,replace为是否被操作过的矩阵,这里的操作包括不替换\n",
    "            #replace_sent = [b, 72]\n",
    "            #replace = [b, 72]\n",
    "            replace_sent, replace = random_replace(sent)\n",
    "\n",
    "            #模型计算\n",
    "            #[b, 72],[b, 72] -> [b, 2],[b, 72, V]\n",
    "            pred_same, pred_sent = model(replace_sent, seg)\n",
    "\n",
    "            #pred_sent = pred_sent.flatten(end_dim=1)\n",
    "            #sent = sent.flatten()\n",
    "\n",
    "            #只把被操作过的字提取出来\n",
    "            #[b, 72, V] -> [replace, V]\n",
    "            pred_sent = pred_sent[replace]\n",
    "\n",
    "            #把被操作之前的字取出来\n",
    "            #[b, 72] -> [replace]\n",
    "            sent = sent[replace]\n",
    "\n",
    "            #计算两份loss,再加权求和\n",
    "            loss_same = loss_func(pred_same, same)\n",
    "            loss_sent = loss_func(pred_sent, sent)\n",
    "            loss = loss_same * 0.1 + loss_sent\n",
    "\n",
    "            loss.backward()\n",
    "            optim.step()\n",
    "            optim.zero_grad()\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            #计算same预测正确率\n",
    "            pred_same = pred_same.argmax(dim=1)\n",
    "            acc_same = (same == pred_same).sum().item() / len(same)\n",
    "\n",
    "            #计算替换词预测正确率\n",
    "            pred_sent = pred_sent.argmax(dim=1)\n",
    "            acc_sent = (sent == pred_sent).sum().item() / len(sent)\n",
    "\n",
    "            print(epoch, i, loss.item(), acc_same, acc_sent)\n",
    "\n",
    "\n",
    "train()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "22e32e81",
   "metadata": {},
   "source": [
    "## 工具函数"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "87597bb8",
   "metadata": {},
   "source": [
    "### tensor转字符串函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "46c86c75",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<SOS> the provision requires the secretary of health and human services news web sites to certify that the importations can be done safely and will be cost effective <EOS> but the measure also requires the secretary of health and human services to certify that the reimportation can be done safely <EOS>'"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#第14章/定义工具函数，tensor转换为字符串\n",
    "def tensor_to_str(tensor):\n",
    "    #转换为list格式\n",
    "    tensor = tensor.tolist()\n",
    "    #过滤掉PAD\n",
    "    tensor = [i for i in tensor if i != vocab.loc['<PAD>'].token]\n",
    "    #转换为词\n",
    "    tensor = [vocab_r.loc[i].word for i in tensor]\n",
    "    #转换为字符串\n",
    "    return ' '.join(tensor)\n",
    "\n",
    "tensor_to_str(sent[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f9166045",
   "metadata": {},
   "source": [
    "### 打印预测结果函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2b589fc3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "same= 0 pred_same= 1\n",
      "\n",
      "replace_sent= <SOS> the provision requires the secretary <MASK> health and human <MASK> news web sites to certify that the <MASK> can be done refinery and will <MASK> cost effective <EOS> but <MASK> measure also requires the secretary of health and <MASK> services to certify that the percentage can be done safely <EOS>\n",
      "\n",
      "sent= of services importations safely be the human reimportation\n",
      "\n",
      "pred_sent= <Symbol7> referring percent billion set up jumped in\n",
      "\n",
      "-------------------------------------\n"
     ]
    }
   ],
   "source": [
    "#第14章/定义工具函数，打印预测结果\n",
    "def print_predict(same, pred_same, replace_sent, sent, pred_sent, replace):\n",
    "    #输出same预测结果\n",
    "    same = same[0].item()\n",
    "    pred_same = pred_same.argmax(dim=1)[0].item()\n",
    "    print('same=', same, 'pred_same=', pred_same)\n",
    "    print()\n",
    "\n",
    "    #输出句子替换词的预测结果\n",
    "    replace_sent = tensor_to_str(replace_sent[0])\n",
    "    sent = tensor_to_str(sent[0][replace[0]])\n",
    "    pred_sent = tensor_to_str(pred_sent.argmax(dim=2)[0][replace[0]])\n",
    "    print('replace_sent=', replace_sent)\n",
    "    print()\n",
    "    print('sent=', sent)\n",
    "    print()\n",
    "    print('pred_sent=', pred_sent)\n",
    "    print()\n",
    "    print('-------------------------------------')\n",
    "\n",
    "\n",
    "print_predict(same, torch.randn(32, 2), replace_sent, sent,\n",
    "              torch.randn(32, 72, 100), replace)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3bdfaf4b",
   "metadata": {},
   "source": [
    "## 测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "1ff8b9ba",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/var/folders/0q/2y7xgmvn00b1090yx3d_n1140000gn/T/ipykernel_98896/1260793463.py:20: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  sent = [np.array(i.split(','), dtype=np.int) for i in sent]\n",
      "/Users/macbook/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/transformer.py:562: UserWarning: Converting mask without torch.bool dtype to bool; this will negatively affect performance. Prefer to use a boolean mask directly. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/transformers/attention.cpp:152.)\n",
      "  return torch._transformer_encoder_layer_fwd(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "same= 0 pred_same= 1\n",
      "\n",
      "replace_sent= <SOS> overall control will be repugnant by a national security council headed by mr arafat <EOS> the other six security agencies <MASK> <MASK> to a national security council headed by arafat <EOS>\n",
      "\n",
      "sent= wielded will report\n",
      "\n",
      "pred_sent= the a a\n",
      "\n",
      "-------------------------------------\n",
      "same= 1 pred_same= 1\n",
      "\n",
      "replace_sent= <SOS> microsoft is preparing to alter its internet <MASK> <MASK> following a patent <MASK> that went against the company microsoft said friday <EOS> microsoft <MASK> <MASK> preparing <MASK> to its internet explorer ie browser <MASK> of <MASK> patent verdict against it the company said <MASK> <EOS>\n",
      "\n",
      "sent= explorer browser verdict corp is changes internet because a friday\n",
      "\n",
      "pred_sent= the the the the the the the the the the\n",
      "\n",
      "-------------------------------------\n",
      "same= 0 pred_same= 0\n",
      "\n",
      "replace_sent= <SOS> harry potter <MASK> <MASK> order of the phoenix by j k rowling <MASK> <NUM> pages <NUM> <NUM> <EOS> in the order of the phoenix harry is <NUM> and fully a teenager at last <EOS>\n",
      "\n",
      "sent= and the scholastic\n",
      "\n",
      "pred_sent= the the the\n",
      "\n",
      "-------------------------------------\n",
      "same= 0 pred_same= 1\n",
      "\n",
      "replace_sent= <SOS> the <MASK> estimates california lost <NUM> <NUM> billion the most of any state to tax shelters in <NUM> <EOS> the commission estimated california lost <NUM> million to corporate tax shelters in <NUM> <EOS>\n",
      "\n",
      "sent= commission\n",
      "\n",
      "pred_sent= the\n",
      "\n",
      "-------------------------------------\n",
      "same= 0 pred_same= 0\n",
      "\n",
      "replace_sent= <SOS> the <MASK> jones industrial average dji jumped <NUM> <NUM> percent while the standard <MASK> s <NUM> index spx leapt <NUM> <NUM> percent <EOS> the broad standard poor s <NUM> index spx gained <NUM> <NUM> points or <NUM> <NUM> percent at <NUM> <NUM> <NUM> <EOS>\n",
      "\n",
      "sent= dow poor\n",
      "\n",
      "pred_sent= percent percent\n",
      "\n",
      "-------------------------------------\n",
      "0.7125\n",
      "0.08239277652370203\n"
     ]
    }
   ],
   "source": [
    "#第14章/测试\n",
    "def test():\n",
    "    model.eval()\n",
    "    correct_same = 0\n",
    "    total_same = 0\n",
    "    correct_sent = 0\n",
    "    total_sent = 0\n",
    "    for i, (same, sent, seg) in enumerate(loader):\n",
    "        #测试5个批次\n",
    "        if i == 5:\n",
    "            break\n",
    "        #same = [b]\n",
    "        #sent = [b, 72]\n",
    "        #seg = [b, 72]\n",
    "\n",
    "        #随机替换x中的某些字符,replace为是否被操作过的矩阵,这里的操作包括不替换\n",
    "        #replace_sent = [b, 72]\n",
    "        #replace = [b, 72]\n",
    "        replace_sent, replace = random_replace(sent)\n",
    "\n",
    "        #模型计算\n",
    "        #[b, 72],[b, 72] -> [b, 2],[b, 72, V]\n",
    "        with torch.no_grad():\n",
    "            pred_same, pred_sent = model(replace_sent, seg)\n",
    "\n",
    "        #输出预测结果\n",
    "        print_predict(same, pred_same, replace_sent, sent, pred_sent, replace)\n",
    "\n",
    "        #只把被操作过的字提取出来\n",
    "        #[b, 72, V] -> [replace, V]\n",
    "        pred_sent = pred_sent[replace]\n",
    "\n",
    "        #把被操作之前的字取出来\n",
    "        #[b, 72] -> [replace]\n",
    "        sent = sent[replace]\n",
    "\n",
    "        #计算same预测正确率\n",
    "        pred_same = pred_same.argmax(dim=1)\n",
    "        correct_same += (same == pred_same).sum().item()\n",
    "        total_same += len(same)\n",
    "\n",
    "        #计算替换词预测正确率\n",
    "        pred_sent = pred_sent.argmax(dim=1)\n",
    "        correct_sent += (sent == pred_sent).sum().item()\n",
    "        total_sent += len(sent)\n",
    "\n",
    "    print(correct_same / total_same)\n",
    "    print(correct_sent / total_sent)\n",
    "\n",
    "\n",
    "test()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
