{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9d9b9065",
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "import torchtext\n",
    "\n",
    "from torchtext.data.utils import get_tokenizer\n",
    "\n",
    "from pyitcast.transformer import TransformerModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "aff9b261",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "downloading wikitext-2-v1.zip\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "H:\\Workplace\\Learn\\NLP_01\\.data\\wikitext-2\\wikitext-2-v1.zip: 100%|████████████████| 4.48M/4.48M [00:05<00:00, 861kB/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "extracting\n"
     ]
    }
   ],
   "source": [
    "TEXT = torchtext.legacy.data.Field(tokenize=get_tokenizer('basic_english'), init_token='<sos>', eos_token='<eos>', lower=True)\n",
    "\n",
    "train_text, val_text, test_text = torchtext.legacy.datasets.WikiText2.splits(TEXT)\n",
    "\n",
    "TEXT.build_vocab(train_text)\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7f85045d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['<eos>',\n",
       " '=',\n",
       " 'robert',\n",
       " '<unk>',\n",
       " '=',\n",
       " '<eos>',\n",
       " '<eos>',\n",
       " 'robert',\n",
       " '<unk>',\n",
       " 'is']"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 测试\n",
    "test_text.examples[0].text[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "87a120ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "def batchify(data, bsz):\n",
    "    \"\"\"batchify函数用于将文本数据映射成连续数字, 并转换成指定的样式, 指定的样式可参考下图.\n",
    "       它有两个输入参数, data就是我们之前得到的文本数据(train_txt, val_txt, test_txt),\n",
    "       bsz是就是batch_size, 每次模型更新参数的数据量\"\"\"\n",
    "    # 使用TEXT的numericalize方法将单词映射成对应的连续数字.\n",
    "    data = TEXT.numericalize([data.examples[0].text])\n",
    "    # >>> data\n",
    "    # tensor([[   3],\n",
    "    #    [  12],\n",
    "    #    [3852],\n",
    "    #    ...,\n",
    "    #    [   6],\n",
    "    #    [   3],\n",
    "    #    [   3]])\n",
    "\n",
    "    # 接着用数据词汇总数除以bsz,\n",
    "    # 取整数得到一个nbatch代表需要多少次batch后能够遍历完所有数据\n",
    "    nbatch = data.size(0) // bsz\n",
    "\n",
    "    # 之后使用narrow方法对不规整的剩余数据进行删除,\n",
    "    # 第一个参数是代表横轴删除还是纵轴删除, 0为横轴，1为纵轴\n",
    "    # 第二个和第三个参数代表保留开始轴到结束轴的数值.类似于切片\n",
    "    # 可参考下方演示示例进行更深理解.\n",
    "    data = data.narrow(0, 0, nbatch * bsz)\n",
    "    # >>> data\n",
    "    # tensor([[   3],\n",
    "    #    [  12],\n",
    "    #    [3852],\n",
    "    #    ...,\n",
    "    #    [  78],\n",
    "    #    [ 299],\n",
    "    #    [  36]])\n",
    "    # 后面不能形成bsz个的一组数据被删除\n",
    "\n",
    "    # 接着我们使用view方法对data进行矩阵变换, 使其成为如下样式:\n",
    "    # tensor([[    3,    25,  1849,  ...,     5,    65,    30],\n",
    "    #    [   12,    66,    13,  ...,    35,  2438,  4064],\n",
    "    #    [ 3852, 13667,  2962,  ...,   902,    33,    20],\n",
    "    #    ...,\n",
    "    #    [  154,     7,    10,  ...,     5,  1076,    78],\n",
    "    #    [   25,     4,  4135,  ...,     4,    56,   299],\n",
    "    #    [    6,    57,   385,  ...,  3168,   737,    36]])\n",
    "    # 因为会做转置操作, 因此这个矩阵的形状是[None, bsz],\n",
    "    # 如果输入是训练数据的话，形状为[104335, 20], 可以通过打印data.shape获得.\n",
    "    # 也就是data的列数是等于bsz的值的.\n",
    "    data = data.view(bsz, -1).t().contiguous()\n",
    "    # 最后将数据分配在指定的设备上.\n",
    "    return data.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7704eb91",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练数据的batch size\n",
    "batch_size = 20\n",
    "\n",
    "# 验证和测试数据（统称为评估数据）的batch size\n",
    "eval_batch_size = 10\n",
    "\n",
    "# 获得train_data, val_data, test_data\n",
    "train_data = batchify(train_text, batch_size)\n",
    "val_data = batchify(val_text, eval_batch_size)\n",
    "test_data = batchify(test_text, eval_batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "553d2541",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 令子长度允许的最大值bptt为35\n",
    "bptt = 35\n",
    "\n",
    "def get_batch(source, i):\n",
    "    \"\"\"用于获得每个批次合理大小的源数据和目标数据.\n",
    "       参数source是通过batchify得到的train_data/val_data/test_data.\n",
    "       i是具体的批次次数.\n",
    "    \"\"\"\n",
    "\n",
    "    # 首先我们确定句子长度, 它将是在bptt和len(source) - 1 - i中最小值\n",
    "    # 实质上, 前面的批次中都会是bptt的值, 只不过最后一个批次中, 句子长度\n",
    "    # 可能不够bptt的35个, 因此会变为len(source) - 1 - i的值.\n",
    "    seq_len = min(bptt, len(source) - 1 - i)\n",
    "\n",
    "    # 语言模型训练的源数据的第i批数据将是batchify的结果的切片[i:i+seq_len]\n",
    "    data = source[i:i+seq_len]\n",
    "\n",
    "    # 根据语言模型训练的语料规定, 它的目标数据是源数据向后移动一位\n",
    "    # 因为最后目标数据的切片会越界, 因此使用view(-1)来保证形状正常.\n",
    "    target = source[i+1:i+1+seq_len].view(-1)\n",
    "    return data, target"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "b22596de",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 以测试集数据为例\n",
    "source = test_data\n",
    "i = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "c9937569",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 通过TEXT.vocab.stoi方法获得不重复词汇总数\n",
    "ntokens = len(TEXT.vocab.stoi)\n",
    "\n",
    "# 词嵌入大小为200\n",
    "emsize = 200\n",
    "\n",
    "# 前馈全连接层的节点数\n",
    "nhid = 200\n",
    "\n",
    "# 编码器层的数量\n",
    "nlayers = 2\n",
    "\n",
    "# 多头注意力机制的头数\n",
    "nhead = 2\n",
    "\n",
    "# 置0比率\n",
    "dropout = 0.2\n",
    "\n",
    "# 将参数输入到TransformerModel中\n",
    "model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device)\n",
    "\n",
    "# 模型初始化后, 接下来进行损失函数和优化方法的选择.\n",
    "\n",
    "# 关于损失函数, 我们使用nn自带的交叉熵损失\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "# 学习率初始值定为5.0\n",
    "lr = 5.0\n",
    "\n",
    "# 优化器选择torch自带的SGD随机梯度下降方法, 并把lr传入其中\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n",
    "\n",
    "# 定义学习率调整方法, 使用torch自带的lr_scheduler, 将优化器传入其中.\n",
    "scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b3384e11",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入时间工具包\n",
    "import time\n",
    "\n",
    "def train():\n",
    "    \"\"\"训练函数\"\"\"\n",
    "    # 模型开启训练模式\n",
    "    model.train()\n",
    "    # 定义初始损失为0\n",
    "    total_loss = 0.\n",
    "    # 获得当前时间\n",
    "    start_time = time.time()\n",
    "    # 开始遍历批次数据\n",
    "    for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):\n",
    "        # 通过get_batch获得源数据和目标数据\n",
    "        data, targets = get_batch(train_data, i)\n",
    "        # 设置优化器初始梯度为0梯度\n",
    "        optimizer.zero_grad()\n",
    "        # 将数据装入model得到输出\n",
    "        output = model(data)\n",
    "        # 将输出和目标数据传入损失函数对象\n",
    "        loss = criterion(output.view(-1, ntokens), targets)\n",
    "        # 损失进行反向传播以获得总的损失\n",
    "        loss.backward()\n",
    "        # 使用nn自带的clip_grad_norm_方法进行梯度规范化, 防止出现梯度消失或爆炸\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n",
    "        # 模型参数进行更新\n",
    "        optimizer.step()\n",
    "        # 将每层的损失相加获得总的损失\n",
    "        total_loss += loss.item()\n",
    "        # 日志打印间隔定为200\n",
    "        log_interval = 200\n",
    "        # 如果batch是200的倍数且大于0，则打印相关日志\n",
    "        if batch % log_interval == 0 and batch > 0:\n",
    "            # 平均损失为总损失除以log_interval\n",
    "            cur_loss = total_loss / log_interval\n",
    "            # 需要的时间为当前时间减去开始时间\n",
    "            elapsed = time.time() - start_time\n",
    "            # 打印轮数, 当前批次和总批次, 当前学习率, 训练速度(每豪秒处理多少批次),\n",
    "            # 平均损失, 以及困惑度, 困惑度是衡量语言模型的重要指标, 它的计算方法就是\n",
    "            # 对交叉熵平均损失取自然对数的底数.\n",
    "            print('| epoch {:3d} | {:5d}/{:5d} batches | '\n",
    "                  'lr {:02.2f} | ms/batch {:5.2f} | '\n",
    "                  'loss {:5.2f} | ppl {:8.2f}'.format(\n",
    "                    epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0],\n",
    "                    elapsed * 1000 / log_interval,\n",
    "                    cur_loss, math.exp(cur_loss)))\n",
    "\n",
    "            # 每个批次结束后, 总损失归0\n",
    "            total_loss = 0\n",
    "            # 开始时间取当前时间\n",
    "            start_time = time.time()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "4b85c43c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate(eval_model, data_source):\n",
    "    \"\"\"评估函数, 评估阶段包括验证和测试,\n",
    "       它的两个参数eval_model为每轮训练产生的模型\n",
    "       data_source代表验证或测试数据集\"\"\"\n",
    "    # 模型开启评估模式\n",
    "    eval_model.eval()\n",
    "    # 总损失归0\n",
    "    total_loss = 0\n",
    "    # 因为评估模式模型参数不变, 因此反向传播不需要求导, 以加快计算\n",
    "    with torch.no_grad():\n",
    "        # 与训练过程相同, 但是因为过程不需要打印信息, 因此不需要batch数\n",
    "        for i in range(0, data_source.size(0) - 1, bptt):\n",
    "            # 首先还是通过通过get_batch获得验证数据集的源数据和目标数据\n",
    "            data, targets = get_batch(data_source, i)\n",
    "            # 通过eval_model获得输出\n",
    "            output = eval_model(data)\n",
    "            # 对输出形状扁平化, 变为全部词汇的概率分布\n",
    "            output_flat = output.view(-1, ntokens)\n",
    "            # 获得评估过程的总损失\n",
    "            total_loss += criterion(output_flat, targets).item()\n",
    "            # 计算平均损失\n",
    "            cur_loss = total_loss / ((data_source.size(0) - 1) / bptt)            \n",
    "\n",
    "    # 返回平均损失\n",
    "    return cur_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "f595c2e2",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\miniconda3\\envs\\torchX\\lib\\site-packages\\torch\\optim\\lr_scheduler.py:369: UserWarning: To get the last learning rate computed by the scheduler, please use `get_last_lr()`.\n",
      "  warnings.warn(\"To get the last learning rate computed by the scheduler, \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "| epoch   1 |   200/ 2981 batches | lr 5.00 | ms/batch 19.36 | loss  8.04 | ppl  3116.44\n",
      "| epoch   1 |   400/ 2981 batches | lr 5.00 | ms/batch 16.45 | loss  6.82 | ppl   915.35\n",
      "| epoch   1 |   600/ 2981 batches | lr 5.00 | ms/batch 16.44 | loss  6.38 | ppl   592.04\n",
      "| epoch   1 |   800/ 2981 batches | lr 5.00 | ms/batch 16.52 | loss  6.24 | ppl   510.65\n",
      "| epoch   1 |  1000/ 2981 batches | lr 5.00 | ms/batch 16.47 | loss  6.12 | ppl   456.68\n",
      "| epoch   1 |  1200/ 2981 batches | lr 5.00 | ms/batch 16.73 | loss  6.09 | ppl   443.08\n",
      "| epoch   1 |  1400/ 2981 batches | lr 5.00 | ms/batch 16.37 | loss  6.05 | ppl   422.57\n",
      "| epoch   1 |  1600/ 2981 batches | lr 5.00 | ms/batch 16.39 | loss  6.05 | ppl   425.59\n",
      "| epoch   1 |  1800/ 2981 batches | lr 5.00 | ms/batch 16.49 | loss  5.96 | ppl   386.75\n",
      "| epoch   1 |  2000/ 2981 batches | lr 5.00 | ms/batch 16.52 | loss  5.96 | ppl   389.17\n",
      "| epoch   1 |  2200/ 2981 batches | lr 5.00 | ms/batch 16.56 | loss  5.84 | ppl   345.09\n",
      "| epoch   1 |  2400/ 2981 batches | lr 5.00 | ms/batch 16.75 | loss  5.89 | ppl   361.74\n",
      "| epoch   1 |  2600/ 2981 batches | lr 5.00 | ms/batch 16.60 | loss  5.91 | ppl   368.06\n",
      "| epoch   1 |  2800/ 2981 batches | lr 5.00 | ms/batch 16.65 | loss  5.81 | ppl   332.95\n",
      "-----------------------------------------------------------------------------------------\n",
      "| end of epoch   1 | time: 52.01s | valid loss  5.74 | valid ppl   311.18\n",
      "-----------------------------------------------------------------------------------------\n",
      "| epoch   2 |   200/ 2981 batches | lr 4.51 | ms/batch 16.81 | loss  5.80 | ppl   330.48\n",
      "| epoch   2 |   400/ 2981 batches | lr 4.51 | ms/batch 16.74 | loss  5.78 | ppl   324.05\n",
      "| epoch   2 |   600/ 2981 batches | lr 4.51 | ms/batch 16.70 | loss  5.60 | ppl   270.97\n",
      "| epoch   2 |   800/ 2981 batches | lr 4.51 | ms/batch 16.71 | loss  5.64 | ppl   281.66\n",
      "| epoch   2 |  1000/ 2981 batches | lr 4.51 | ms/batch 16.70 | loss  5.59 | ppl   267.31\n",
      "| epoch   2 |  1200/ 2981 batches | lr 4.51 | ms/batch 16.75 | loss  5.61 | ppl   273.69\n",
      "| epoch   2 |  1400/ 2981 batches | lr 4.51 | ms/batch 16.74 | loss  5.63 | ppl   277.96\n",
      "| epoch   2 |  1600/ 2981 batches | lr 4.51 | ms/batch 16.75 | loss  5.66 | ppl   287.74\n",
      "| epoch   2 |  1800/ 2981 batches | lr 4.51 | ms/batch 16.76 | loss  5.58 | ppl   265.44\n",
      "| epoch   2 |  2000/ 2981 batches | lr 4.51 | ms/batch 16.81 | loss  5.62 | ppl   275.55\n",
      "| epoch   2 |  2200/ 2981 batches | lr 4.51 | ms/batch 16.80 | loss  5.51 | ppl   246.31\n",
      "| epoch   2 |  2400/ 2981 batches | lr 4.51 | ms/batch 16.77 | loss  5.58 | ppl   264.26\n",
      "| epoch   2 |  2600/ 2981 batches | lr 4.51 | ms/batch 16.79 | loss  5.59 | ppl   266.53\n",
      "| epoch   2 |  2800/ 2981 batches | lr 4.51 | ms/batch 16.81 | loss  5.52 | ppl   248.74\n",
      "-----------------------------------------------------------------------------------------\n",
      "| end of epoch   2 | time: 52.12s | valid loss  5.57 | valid ppl   263.37\n",
      "-----------------------------------------------------------------------------------------\n",
      "| epoch   3 |   200/ 2981 batches | lr 4.29 | ms/batch 17.30 | loss  5.54 | ppl   254.93\n",
      "| epoch   3 |   400/ 2981 batches | lr 4.29 | ms/batch 17.47 | loss  5.55 | ppl   258.10\n",
      "| epoch   3 |   600/ 2981 batches | lr 4.29 | ms/batch 17.58 | loss  5.37 | ppl   214.54\n",
      "| epoch   3 |   800/ 2981 batches | lr 4.29 | ms/batch 17.40 | loss  5.42 | ppl   226.19\n",
      "| epoch   3 |  1000/ 2981 batches | lr 4.29 | ms/batch 17.26 | loss  5.38 | ppl   216.06\n",
      "| epoch   3 |  1200/ 2981 batches | lr 4.29 | ms/batch 17.24 | loss  5.42 | ppl   225.38\n",
      "| epoch   3 |  1400/ 2981 batches | lr 4.29 | ms/batch 17.19 | loss  5.44 | ppl   229.42\n",
      "| epoch   3 |  1600/ 2981 batches | lr 4.29 | ms/batch 17.16 | loss  5.48 | ppl   238.95\n",
      "| epoch   3 |  1800/ 2981 batches | lr 4.29 | ms/batch 17.12 | loss  5.40 | ppl   222.47\n",
      "| epoch   3 |  2000/ 2981 batches | lr 4.29 | ms/batch 17.18 | loss  5.44 | ppl   229.31\n",
      "| epoch   3 |  2200/ 2981 batches | lr 4.29 | ms/batch 17.20 | loss  5.32 | ppl   204.10\n",
      "| epoch   3 |  2400/ 2981 batches | lr 4.29 | ms/batch 17.23 | loss  5.39 | ppl   219.72\n",
      "| epoch   3 |  2600/ 2981 batches | lr 4.29 | ms/batch 17.21 | loss  5.41 | ppl   224.49\n",
      "| epoch   3 |  2800/ 2981 batches | lr 4.29 | ms/batch 17.67 | loss  5.34 | ppl   207.51\n",
      "-----------------------------------------------------------------------------------------\n",
      "| end of epoch   3 | time: 53.78s | valid loss  5.52 | valid ppl   250.71\n",
      "-----------------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "# 首先初始化最佳验证损失，初始值为无穷大\n",
    "import copy\n",
    "best_val_loss = float(\"inf\")\n",
    "\n",
    "# 定义训练轮数\n",
    "epochs = 3\n",
    "\n",
    "# 定义最佳模型变量, 初始值为None\n",
    "best_model = None\n",
    "\n",
    "# 使用for循环遍历轮数\n",
    "for epoch in range(1, epochs + 1):\n",
    "    # 首先获得轮数开始时间\n",
    "    epoch_start_time = time.time()\n",
    "    # 调用训练函数\n",
    "    train()\n",
    "    # 该轮训练后我们的模型参数已经发生了变化\n",
    "    # 将模型和评估数据传入到评估函数中\n",
    "    val_loss = evaluate(model, val_data)\n",
    "    # 之后打印每轮的评估日志，分别有轮数，耗时，验证损失以及验证困惑度\n",
    "    print('-' * 89)\n",
    "    print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n",
    "          'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),\n",
    "                                     val_loss, math.exp(val_loss)))\n",
    "    print('-' * 89)\n",
    "    # 我们将比较哪一轮损失最小，赋值给best_val_loss，\n",
    "    # 并取该损失下的模型为best_model\n",
    "    if val_loss < best_val_loss:\n",
    "        best_val_loss = val_loss\n",
    "        # 使用深拷贝，拷贝最优模型\n",
    "        best_model = copy.deepcopy(model)\n",
    "    # 每轮都会对优化方法的学习率做调整\n",
    "    scheduler.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "0d09b7ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=========================================================================================\n",
      "| End of training | test loss  5.43 | test ppl   228.82\n",
      "=========================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 我们仍然使用evaluate函数，这次它的参数是best_model以及测试数据\n",
    "test_loss = evaluate(best_model, test_data)\n",
    "\n",
    "# 打印测试日志，包括测试损失和测试困惑度\n",
    "print('=' * 89)\n",
    "print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(\n",
    "    test_loss, math.exp(test_loss)))\n",
    "print('=' * 89)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28dc922a",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torchX",
   "language": "python",
   "name": "torchx"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
