{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "此模型原型来自序列模型[Convolutional Sequence to Sequence Learning](https://arxiv.org/pdf/1705.03122.pdf)中的encoder部分。\n",
    "\n",
    "原模型是用于机器翻译，这里我将稍加修改用来做问答中的slot filling和intent detection联合建模。\n",
    "\n",
    "我这里进行了改进：如下："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![model](img/model5.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    本项目在此基础上进行了改进，改进点如下：\n",
    "        1.加入了多个size的卷积，获取更多的特征，最后将这多个size的卷积进行连接。\n",
    "        2.在embedding层后使用了一个多头注意力self-attention。\n",
    "        3.最后将卷积后的特征和self-attention后的特征进行连接。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from torchtext import data, datasets\n",
    "import pandas as pd\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "base_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n",
    "atis_data = os.path.join(base_dir, 'atis')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "build train and val dataset\n",
    "'''\n",
    "    \n",
    "tokenize = lambda s:s.split()\n",
    "\n",
    "SOURCE = data.Field(sequential=True, tokenize=tokenize,\n",
    "                    lower=True, use_vocab=True,\n",
    "                    init_token='<sos>', eos_token='<eos>',\n",
    "                    pad_token='<pad>', unk_token='<unk>',\n",
    "                    batch_first=True, fix_length=50,\n",
    "                    include_lengths=True) #include_lengths=True为方便之后使用torch的pack_padded_sequence\n",
    "\n",
    "TARGET = data.Field(sequential=True, tokenize=tokenize,\n",
    "                    lower=True, use_vocab=True,\n",
    "                    init_token='<sos>', eos_token='<eos>',\n",
    "                    pad_token='<pad>', unk_token='<unk>',\n",
    "                    batch_first=True, fix_length=50,\n",
    "                    include_lengths=True) #include_lengths=True为方便之后使用torch的pack_padded_sequence\n",
    "LABEL = data.Field(\n",
    "                sequential=False,\n",
    "                use_vocab=True)\n",
    "\n",
    "train, val = data.TabularDataset.splits(\n",
    "                                        path=atis_data,\n",
    "                                        skip_header=True,\n",
    "                                        train='atis.train.csv',\n",
    "                                        validation='atis.test.csv',\n",
    "                                        format='csv',\n",
    "                                        fields=[('index', None), ('intent', LABEL), ('source', SOURCE), ('target', TARGET)])\n",
    "\n",
    "SOURCE.build_vocab(train, val)\n",
    "TARGET.build_vocab(train, val)\n",
    "LABEL.build_vocab(train, val)\n",
    "\n",
    "train_iter, val_iter = data.Iterator.splits(\n",
    "                                            (train, val),\n",
    "                                            batch_sizes=(64, len(val)), # 训练集设置为32,验证集整个集合用于测试\n",
    "                                            shuffle=True,\n",
    "                                            sort_within_batch=True, #为true则一个batch内的数据会按sort_key规则降序排序\n",
    "                                            sort_key=lambda x: len(x.source)) #这里按src的长度降序排序，主要是为后面pack,pad操作)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save source words\n",
    "source_words_path = os.path.join(os.getcwd(), 'source_words.pkl')\n",
    "with open(source_words_path, 'wb') as f_source_words:\n",
    "    pickle.dump(SOURCE.vocab, f_source_words)\n",
    "\n",
    "# save target words\n",
    "target_words_path = os.path.join(os.getcwd(), 'target_words.pkl')\n",
    "with open(target_words_path, 'wb') as f_target_words:\n",
    "    pickle.dump(TARGET.vocab, f_target_words)\n",
    "    \n",
    "# save label words\n",
    "label_words_path = os.path.join(os.getcwd(), 'label_words.pkl')\n",
    "with open(label_words_path, 'wb') as f_label_words:\n",
    "    pickle.dump(LABEL.vocab, f_label_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "import math\n",
    "from apex import amp\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "SEED = 1234\n",
    "random.seed(SEED)\n",
    "torch.manual_seed(SEED)\n",
    "torch.cuda.manual_seed(SEED)\n",
    "torch.backends.cudnn.deterministic=True\n",
    "torch.backends.cudnn.benchmark=False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "这里将卷积后的特征与经过多头注意力后的特征进行融合\n",
    "'''\n",
    "class CNNAttention(nn.Module):\n",
    "    def __init__(self, input_dim, intent_out, slot_out, hid_dim, n_layers, kernel_size, dropout, src_pad_idx, n_heads, max_length=50):\n",
    "        super(CNNAttention, self).__init__()\n",
    "        for kernel in kernel_size:\n",
    "            assert kernel % 2 == 1,'kernel size must be odd!' # 卷积核size为奇数，方便序列两边pad处理\n",
    "        \n",
    "        self.src_pad_idx = src_pad_idx\n",
    "        \n",
    "        self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device) # 确保整个网络的方差不会发生显著变化\n",
    "        \n",
    "        self.tok_embedding = nn.Embedding(input_dim, hid_dim) # token编码\n",
    "        self.pos_embedding = nn.Embedding(max_length, hid_dim) # token的位置编码\n",
    "        \n",
    "        self.hid2hid = nn.Linear(hid_dim * 2, hid_dim) # 线性层，从2 * hid_dim转为hid_dim\n",
    "        \n",
    "        # 不同的kernel_size\n",
    "        '''\n",
    "        self.conv_module = list()\n",
    "        for k in kernel_size:\n",
    "            conv = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n",
    "                                                  out_channels=2*hid_dim, # 卷积后输出的维度，这里2*hid_dim是为了后面的glu激活函数\n",
    "                                                  kernel_size=k,\n",
    "                                                  padding=(k - 1)//2) # 序列两边补0个数，保持维度不变\n",
    "                                                  for _ in range(n_layers)])\n",
    "            self.conv_module.append(conv)\n",
    "        '''\n",
    "        \n",
    "        self.conv_1 = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n",
    "                                                  out_channels=2*hid_dim, # 卷积后输出的维度，这里2*hid_dim是为了后面的glu激活函数\n",
    "                                                  kernel_size=kernel_size[0],\n",
    "                                                  padding=(kernel_size[0] - 1)//2) # 序列两边补0个数，保持维度不变\n",
    "                                                  for _ in range(n_layers)])\n",
    "        self.conv_2 = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n",
    "                                                  out_channels=2*hid_dim, # 卷积后输出的维度，这里2*hid_dim是为了后面的glu激活函数\n",
    "                                                  kernel_size=kernel_size[1],\n",
    "                                                  padding=(kernel_size[1] - 1)//2) # 序列两边补0个数，保持维度不变\n",
    "                                                  for _ in range(n_layers)])\n",
    "        self.conv_3 = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n",
    "                                                  out_channels=2*hid_dim, # 卷积后输出的维度，这里2*hid_dim是为了后面的glu激活函数\n",
    "                                                  kernel_size=kernel_size[2],\n",
    "                                                  padding=(kernel_size[2] - 1)//2) # 序列两边补0个数，保持维度不变\n",
    "                                                  for _ in range(n_layers)])\n",
    "        \n",
    "        # 几个卷积模块转换维度\n",
    "        self.convhid2hid = nn.Linear(len(kernel_size) * hid_dim, hid_dim)\n",
    "        \n",
    "        # 多头注意力模块\n",
    "        self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout)\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "        # intent detection 意图识别\n",
    "        self.intent_output = nn.Linear(hid_dim, intent_out)\n",
    "        \n",
    "         # slot filling，槽填充\n",
    "        self.slot_out = nn.Linear(hid_dim, slot_out)\n",
    "    \n",
    "    def make_src_mask(self, src):\n",
    "        # src: [batch_size, src_len]\n",
    "        src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2) # [batch_size, 1, 1, src_len]\n",
    "        \n",
    "        return src_mask\n",
    "        \n",
    "    def forward(self, src):\n",
    "        # src: [batch_size, src_len]\n",
    "        # src_mask: [batch_size, src_len]\n",
    "        batch_size = src.shape[0]\n",
    "        src_len = src.shape[1]\n",
    "        \n",
    "        src_mask = self.make_src_mask(src) # [batch_size, 1, 1, src_len]\n",
    "        \n",
    "        # 创建token位置信息\n",
    "        pos = torch.arange(src_len).unsqueeze(0).repeat(batch_size, 1).to(device) # [batch_size, src_len]\n",
    "        \n",
    "        # 对token与其位置进行编码\n",
    "        tok_embedded = self.tok_embedding(src) # [batch_size, src_len, hid_dim]\n",
    "        pos_embedded = self.pos_embedding(pos.long()) # [batch_size, src_len, hid_dim]\n",
    "        \n",
    "        # 对token embedded和pos_embedded逐元素加和\n",
    "        embedded = self.dropout(tok_embedded + pos_embedded) # [batch_size, src_len, hid_dim]\n",
    "        \n",
    "        # 转变维度，卷积在输入数据的最后一维进行\n",
    "        conv_input = embedded.permute(0, 2, 1) # [batch_size, hid_dim, src_len]\n",
    "        \n",
    "        '''\n",
    "        combine_conv_module_list = []\n",
    "        for conv_module in self.conv_module:\n",
    "            conved_input = conv_input\n",
    "            # 以下进行卷积块\n",
    "            for i, conv in enumerate(conv_module):\n",
    "                \n",
    "                # 进行卷积\n",
    "                conved = conv(self.dropout(conved_input)) # [batch_size, 2*hid_dim, src_len]\n",
    "\n",
    "                # 进行激活glu\n",
    "                conved = F.glu(conved, dim=1) # [batch_size, hid_dim, src_len]\n",
    "\n",
    "                # 进行残差连接\n",
    "                conved = (conved + conved_input) * self.scale # [batch_size, hid_dim, src_len]\n",
    "\n",
    "                # 作为下一个卷积块的输入\n",
    "                conved_input = conved\n",
    "                \n",
    "            combine_conv_module_list.append(conved)\n",
    "            \n",
    "        # 拼接几个卷积块特征: [batch_size, len(kernel_size) * hid_dim, src_len]\n",
    "        combine_conv_module = combine_conv_module_list[0]\n",
    "        for i in range(1, len(combine_conv_module_list)):\n",
    "            combine_conv_module = torch.cat([combine_conv_module, combine_conv_module_list[i]], dim = 1)\n",
    "        '''\n",
    "        \n",
    "        # 第一个kernel_size\n",
    "        conved_input = conv_input\n",
    "        for i, conv in enumerate(self.conv_1):\n",
    "            # 进行卷积\n",
    "            conved1 = conv(self.dropout(conved_input)) # [batch_size, 2*hid_dim, src_len]\n",
    "\n",
    "            # 进行激活glu\n",
    "            conved1 = F.glu(conved1, dim=1) # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 进行残差连接\n",
    "            conved1 = (conved1 + conved_input) * self.scale # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 作为下一个卷积块的输入\n",
    "            conved_input = conved1\n",
    "        \n",
    "        combine_conv_module = conved1\n",
    "        \n",
    "        # 第二个kernel_size\n",
    "        conved_input = conv_input\n",
    "        for i, conv in enumerate(self.conv_2):\n",
    "            # 进行卷积\n",
    "            conved2 = conv(self.dropout(conved_input)) # [batch_size, 2*hid_dim, src_len]\n",
    "\n",
    "            # 进行激活glu\n",
    "            conved2 = F.glu(conved2, dim=1) # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 进行残差连接\n",
    "            conved2 = (conved2 + conved_input) * self.scale # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 作为下一个卷积块的输入\n",
    "            conved_input = conved2\n",
    "            \n",
    "        combine_conv_module = torch.cat([combine_conv_module, conved2], dim = 1)\n",
    "        \n",
    "        # 第三个kernel_size\n",
    "        conved_input = conv_input\n",
    "        for i, conv in enumerate(self.conv_3):\n",
    "            # 进行卷积\n",
    "            conved3 = conv(self.dropout(conved_input)) # [batch_size, 2*hid_dim, src_len]\n",
    "\n",
    "            # 进行激活glu\n",
    "            conved3 = F.glu(conved3, dim=1) # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 进行残差连接\n",
    "            conved3 = (conved3 + conved_input) * self.scale # [batch_size, hid_dim, src_len]\n",
    "\n",
    "            # 作为下一个卷积块的输入\n",
    "            conved_input = conved3\n",
    "            \n",
    "        combine_conv_module = torch.cat([combine_conv_module, conved3], dim = 1)\n",
    "        \n",
    "        \n",
    "        \n",
    "        conved = self.convhid2hid(combine_conv_module.permute(0, 2, 1)) # [batch_size, src_len, hid_dim]\n",
    "        \n",
    "        # 这里在所有卷积之后增加了一个多头自注意力层，它的输入是\n",
    "        self_attention, _ = self.self_attention(embedded, embedded, embedded, src_mask) # [batch_size, query_len, hid_dim]\n",
    "        \n",
    "        # 拼接卷积后的特征与多头注意力后的特征\n",
    "        combined_conv_attention = torch.cat([conved, self_attention], dim=2) # [batch_size, query_len, 2*hid_dim]\n",
    "        \n",
    "        # 经过一线性层，将2*hid_dim转为hid_dim，作为输出的特征\n",
    "        conved = self.hid2hid(combined_conv_attention) # [batch_size, query_len, hid_dim]\n",
    "        \n",
    "        # 又是一个残差连接，逐元素加和输出，作为encoder的联合输出特征\n",
    "        combined = (conved + embedded) * self.scale # [batch_size, src_len, hid_dim]\n",
    "        \n",
    "        # 意图识别,加一个平均池化,池化后的维度是：[batch_size, hid_dim]\n",
    "        intent_output = self.intent_output(self.dropout(F.max_pool1d(combined.permute(0, 2, 1), combined.shape[1]).squeeze())) # [batch_size, intent_dim]\n",
    "    \n",
    "        # 槽填充\n",
    "        slot_output = self.slot_out(self.dropout(combined)) # [batch_size, trg_len, output_dim]\n",
    "        \n",
    "        return intent_output, slot_output\n",
    " \n",
    "'''\n",
    "多头注意力multi-head attention\n",
    "'''\n",
    "class MultiHeadAttentionLayer(nn.Module):\n",
    "    def __init__(self, hid_dim, n_heads, dropout):\n",
    "        super(MultiHeadAttentionLayer, self).__init__()\n",
    "        \n",
    "        assert hid_dim % n_heads == 0\n",
    "        \n",
    "        self.hid_dim = hid_dim\n",
    "        self.n_heads = n_heads\n",
    "        self.head_dim = hid_dim // n_heads\n",
    "        \n",
    "        self.fc_q = nn.Linear(hid_dim, hid_dim)\n",
    "        self.fc_k = nn.Linear(hid_dim, hid_dim)\n",
    "        self.fc_v = nn.Linear(hid_dim, hid_dim)\n",
    "        \n",
    "        self.fc_o = nn.Linear(hid_dim, hid_dim)\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "        self.scale = torch.sqrt(torch.FloatTensor([self.hid_dim])).to(device) # 缩放因子\n",
    "        \n",
    "    def forward(self, query, key, value, mask=None):\n",
    "        '''\n",
    "        query: [batch_size, query_len, hid_dim]\n",
    "        key: [batch_size, key_len, hid_dim]\n",
    "        value: [batch_size, value_len, hid_dim]\n",
    "        '''\n",
    "        batch_size = query.shape[0]\n",
    "        \n",
    "        Q = self.fc_q(query) # [batch_size, query_len, hid_dim]\n",
    "        K = self.fc_k(key) # [batch_size, key_len, hid_dim]\n",
    "        V = self.fc_v(value) # [batch_size, value_len, hid_dim]\n",
    "        \n",
    "        Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch_size, n_heads, query_len, head_dim]\n",
    "        K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch_size, n_heads, key_len, head_dim]\n",
    "        V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch_size, n_heads, value_len, head_dim]\n",
    "        \n",
    "        # [batch_size, n_heads, query_len, head_dim] * [batch_size, n_heads, head_dim, key_len]\n",
    "        energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale # [batch_size, n_heads, query_len, key_len]\n",
    "        \n",
    "        if mask != None:\n",
    "            energy = energy.masked_fill(mask == 0, -1e10)\n",
    "        \n",
    "        attention = torch.softmax(energy, dim=-1) # [batch_size, n_heads, query_len, key_len]\n",
    "        \n",
    "        # [batch_size, n_heads, query_len, key_len] * [batch_size, n_heads, value_len, head_dim]\n",
    "        x = torch.matmul(self.dropout(attention), V) # [batch_size, n_heads, query_len, head_dim]\n",
    "        \n",
    "        x = x.permute(0, 2, 1, 3).contiguous() # [batch_size, query_len, n_heads, head_dim]\n",
    "        \n",
    "        x = x.view(batch_size, -1, self.hid_dim) # [batch_size, query_len, hid_dim]\n",
    "        \n",
    "        x = self.fc_o(x) # [batch_size, query_len, hid_dim]\n",
    "        \n",
    "        return x, attention\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "定义seq2seq model\n",
    "'''\n",
    "input_dim = len(SOURCE.vocab)\n",
    "slot_out = len(TARGET.vocab) # slot size\n",
    "intent_out = len(LABEL.vocab) # intent size\n",
    "\n",
    "hid_dim = 64\n",
    "conv_layers = 8\n",
    "kernel_size = (1,3,5) # 卷积核size\n",
    "dropout = 0.5\n",
    "n_heads = 8\n",
    "\n",
    "src_pad_idx = SOURCE.vocab.stoi[SOURCE.pad_token]\n",
    "\n",
    "model = CNNAttention(input_dim, intent_out, slot_out, hid_dim, conv_layers, kernel_size, dropout, src_pad_idx, n_heads)\n",
    "\n",
    "model = model.to(device)\n",
    "\n",
    "# 优化函数\n",
    "optimizer = optim.Adam(model.parameters())\n",
    "\n",
    "# 损失函数(slot)\n",
    "loss_slot = nn.CrossEntropyLoss(ignore_index=src_pad_idx)\n",
    "\n",
    "# 定义损失函数(意图识别)\n",
    "loss_intent = nn.CrossEntropyLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练\n",
    "def train(model, iterator, optimizer, loss_slot, loss_intent, clip):\n",
    "    \n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    \n",
    "    for i, batch in enumerate(iterator):\n",
    "        \n",
    "        src, _ = batch.source  # src=[batch_size, seq_len]，这里batch.src返回src和src的长度，因为在使用torchtext.Field时设置include_lengths=True\n",
    "        trg, _ = batch.target  # trg=[batch_size, seq_len]\n",
    "        label = batch.intent # [batch_size]\n",
    "        src = src.to(device)\n",
    "        trg = trg.to(device)\n",
    "        label = label.to(device)\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        intent_output, slot_output = model(src) # [batch_size, intent_dim]; [batch_size, trg_len, slot_output_dim]\n",
    "        \n",
    "        # 1.计算slot loss\n",
    "        slot_output_dim = slot_output.shape[-1]\n",
    "        \n",
    "        slot_output = slot_output[:, 1:, :].reshape(-1, slot_output_dim) # [batch_size * (trg_len-1), slot_output_dim]\n",
    "        \n",
    "        trg = trg[:,1:].contiguous().view(-1) # [batch_size * (trg_len-1)]\n",
    "        \n",
    "        # 1.计算slot loss\n",
    "        loss1 = loss_slot(slot_output, trg)\n",
    "        \n",
    "        # 2.计算intent loss\n",
    "        loss2 = loss_intent(intent_output, label)\n",
    "        \n",
    "        # 3.联合slot loss + intent loss\n",
    "        loss = loss1 + loss2\n",
    "        \n",
    "        loss.backward()\n",
    "        \n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n",
    "        \n",
    "        optimizer.step()\n",
    "        \n",
    "        epoch_loss += loss.item()\n",
    "    \n",
    "    return epoch_loss / len(iterator)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# val loss\n",
    "def evaluate(model, iterator, loss_slot, loss_intent):\n",
    "    model.eval()\n",
    "    \n",
    "    epoch_loss = 0\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for i, batch in enumerate(iterator):\n",
    "            src, _ = batch.source  # src=[batch_size, seq_len]\n",
    "            trg, _ = batch.target  # trg=[batch_size, seq_len]\n",
    "            label = batch.intent\n",
    "            src = src.to(device)\n",
    "            trg = trg.to(device)\n",
    "            label = label.to(device)\n",
    "            \n",
    "            intent_output, slot_output = model(src) # [batch_size, intent_dim]; [batch_size, trg_len-1, slot_output_dim]\n",
    "            \n",
    "            # 1.计算slot loss\n",
    "            slot_output_dim = slot_output.shape[-1]\n",
    "\n",
    "            slot_output = slot_output[:, 1:, :].reshape(-1, slot_output_dim) # [batch_size * (trg_len-1), slot_output_dim]\n",
    "\n",
    "            trg = trg[:,1:].contiguous().view(-1) # [batch_size * (trg_len-1)]\n",
    "\n",
    "            loss1 = loss_slot(slot_output, trg)\n",
    "\n",
    "            # 2.计算intent loss\n",
    "            loss2 = loss_intent(intent_output, label)\n",
    "\n",
    "            loss = loss1 + loss2\n",
    "        \n",
    "            \n",
    "            epoch_loss += loss.item()\n",
    "            \n",
    "    return epoch_loss / len(iterator)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "def epoch_time(start_time, end_time):\n",
    "    elapsed_time = end_time - start_time\n",
    "    elapsed_mins = int(elapsed_time / 60)\n",
    "    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n",
    "    return elapsed_mins, elapsed_secs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 01 | Time: 0m 3s\n",
      "\tTrain Loss: 4.468 | Train PPL:  87.153\n",
      "\t Val. Loss: 2.789 |  Val. PPL:  16.264\n",
      "Epoch: 02 | Time: 0m 4s\n",
      "\tTrain Loss: 2.428 | Train PPL:  11.334\n",
      "\t Val. Loss: 2.031 |  Val. PPL:   7.619\n",
      "Epoch: 03 | Time: 0m 3s\n",
      "\tTrain Loss: 1.953 | Train PPL:   7.053\n",
      "\t Val. Loss: 1.748 |  Val. PPL:   5.743\n",
      "Epoch: 04 | Time: 0m 3s\n",
      "\tTrain Loss: 1.726 | Train PPL:   5.617\n",
      "\t Val. Loss: 1.565 |  Val. PPL:   4.781\n",
      "Epoch: 05 | Time: 0m 3s\n",
      "\tTrain Loss: 1.558 | Train PPL:   4.750\n",
      "\t Val. Loss: 1.461 |  Val. PPL:   4.311\n",
      "Epoch: 06 | Time: 0m 3s\n",
      "\tTrain Loss: 1.443 | Train PPL:   4.235\n",
      "\t Val. Loss: 1.324 |  Val. PPL:   3.760\n",
      "Epoch: 07 | Time: 0m 4s\n",
      "\tTrain Loss: 1.339 | Train PPL:   3.814\n",
      "\t Val. Loss: 1.261 |  Val. PPL:   3.527\n",
      "Epoch: 08 | Time: 0m 3s\n",
      "\tTrain Loss: 1.247 | Train PPL:   3.481\n",
      "\t Val. Loss: 1.243 |  Val. PPL:   3.465\n",
      "Epoch: 09 | Time: 0m 3s\n",
      "\tTrain Loss: 1.226 | Train PPL:   3.407\n",
      "\t Val. Loss: 1.105 |  Val. PPL:   3.020\n",
      "Epoch: 10 | Time: 0m 3s\n",
      "\tTrain Loss: 1.136 | Train PPL:   3.113\n",
      "\t Val. Loss: 1.063 |  Val. PPL:   2.896\n",
      "Epoch: 11 | Time: 0m 3s\n",
      "\tTrain Loss: 1.073 | Train PPL:   2.923\n",
      "\t Val. Loss: 1.064 |  Val. PPL:   2.898\n",
      "Epoch: 12 | Time: 0m 3s\n",
      "\tTrain Loss: 1.047 | Train PPL:   2.850\n",
      "\t Val. Loss: 0.985 |  Val. PPL:   2.678\n",
      "Epoch: 13 | Time: 0m 4s\n",
      "\tTrain Loss: 1.014 | Train PPL:   2.757\n",
      "\t Val. Loss: 0.981 |  Val. PPL:   2.667\n",
      "Epoch: 14 | Time: 0m 3s\n",
      "\tTrain Loss: 0.956 | Train PPL:   2.601\n",
      "\t Val. Loss: 0.904 |  Val. PPL:   2.469\n",
      "Epoch: 15 | Time: 0m 3s\n",
      "\tTrain Loss: 0.905 | Train PPL:   2.473\n",
      "\t Val. Loss: 0.898 |  Val. PPL:   2.454\n",
      "Epoch: 16 | Time: 0m 3s\n",
      "\tTrain Loss: 0.891 | Train PPL:   2.437\n",
      "\t Val. Loss: 0.874 |  Val. PPL:   2.397\n",
      "Epoch: 17 | Time: 0m 4s\n",
      "\tTrain Loss: 0.868 | Train PPL:   2.381\n",
      "\t Val. Loss: 0.890 |  Val. PPL:   2.436\n",
      "Epoch: 18 | Time: 0m 3s\n",
      "\tTrain Loss: 0.838 | Train PPL:   2.311\n",
      "\t Val. Loss: 0.876 |  Val. PPL:   2.402\n",
      "Epoch: 19 | Time: 0m 3s\n",
      "\tTrain Loss: 0.803 | Train PPL:   2.233\n",
      "\t Val. Loss: 0.802 |  Val. PPL:   2.230\n",
      "Epoch: 20 | Time: 0m 3s\n",
      "\tTrain Loss: 0.770 | Train PPL:   2.160\n",
      "\t Val. Loss: 0.840 |  Val. PPL:   2.316\n",
      "Epoch: 21 | Time: 0m 3s\n",
      "\tTrain Loss: 0.749 | Train PPL:   2.116\n",
      "\t Val. Loss: 0.830 |  Val. PPL:   2.293\n",
      "Epoch: 22 | Time: 0m 4s\n",
      "\tTrain Loss: 0.717 | Train PPL:   2.048\n",
      "\t Val. Loss: 0.781 |  Val. PPL:   2.184\n",
      "Epoch: 23 | Time: 0m 3s\n",
      "\tTrain Loss: 0.720 | Train PPL:   2.055\n",
      "\t Val. Loss: 0.763 |  Val. PPL:   2.145\n",
      "Epoch: 24 | Time: 0m 3s\n",
      "\tTrain Loss: 0.699 | Train PPL:   2.012\n",
      "\t Val. Loss: 0.762 |  Val. PPL:   2.142\n",
      "Epoch: 25 | Time: 0m 3s\n",
      "\tTrain Loss: 0.687 | Train PPL:   1.988\n",
      "\t Val. Loss: 0.783 |  Val. PPL:   2.189\n",
      "Epoch: 26 | Time: 0m 4s\n",
      "\tTrain Loss: 0.650 | Train PPL:   1.916\n",
      "\t Val. Loss: 0.714 |  Val. PPL:   2.043\n",
      "Epoch: 27 | Time: 0m 4s\n",
      "\tTrain Loss: 0.631 | Train PPL:   1.880\n",
      "\t Val. Loss: 0.715 |  Val. PPL:   2.045\n",
      "Epoch: 28 | Time: 0m 4s\n",
      "\tTrain Loss: 0.626 | Train PPL:   1.870\n",
      "\t Val. Loss: 0.727 |  Val. PPL:   2.069\n",
      "Epoch: 29 | Time: 0m 4s\n",
      "\tTrain Loss: 0.602 | Train PPL:   1.825\n",
      "\t Val. Loss: 0.722 |  Val. PPL:   2.058\n",
      "Epoch: 30 | Time: 0m 4s\n",
      "\tTrain Loss: 0.595 | Train PPL:   1.813\n",
      "\t Val. Loss: 0.696 |  Val. PPL:   2.005\n",
      "Epoch: 31 | Time: 0m 3s\n",
      "\tTrain Loss: 0.581 | Train PPL:   1.788\n",
      "\t Val. Loss: 0.703 |  Val. PPL:   2.020\n",
      "Epoch: 32 | Time: 0m 3s\n",
      "\tTrain Loss: 0.576 | Train PPL:   1.779\n",
      "\t Val. Loss: 0.722 |  Val. PPL:   2.059\n",
      "Epoch: 33 | Time: 0m 3s\n",
      "\tTrain Loss: 0.561 | Train PPL:   1.752\n",
      "\t Val. Loss: 0.692 |  Val. PPL:   1.998\n",
      "Epoch: 34 | Time: 0m 3s\n",
      "\tTrain Loss: 0.552 | Train PPL:   1.736\n",
      "\t Val. Loss: 0.650 |  Val. PPL:   1.915\n",
      "Epoch: 35 | Time: 0m 3s\n",
      "\tTrain Loss: 0.546 | Train PPL:   1.727\n",
      "\t Val. Loss: 0.643 |  Val. PPL:   1.903\n",
      "Epoch: 36 | Time: 0m 3s\n",
      "\tTrain Loss: 0.517 | Train PPL:   1.677\n",
      "\t Val. Loss: 0.641 |  Val. PPL:   1.899\n",
      "Epoch: 37 | Time: 0m 3s\n",
      "\tTrain Loss: 0.519 | Train PPL:   1.680\n",
      "\t Val. Loss: 0.664 |  Val. PPL:   1.943\n",
      "Epoch: 38 | Time: 0m 3s\n",
      "\tTrain Loss: 0.493 | Train PPL:   1.638\n",
      "\t Val. Loss: 0.661 |  Val. PPL:   1.937\n",
      "Epoch: 39 | Time: 0m 3s\n",
      "\tTrain Loss: 0.506 | Train PPL:   1.658\n",
      "\t Val. Loss: 0.673 |  Val. PPL:   1.960\n",
      "Epoch: 40 | Time: 0m 3s\n",
      "\tTrain Loss: 0.495 | Train PPL:   1.641\n",
      "\t Val. Loss: 0.640 |  Val. PPL:   1.897\n",
      "Epoch: 41 | Time: 0m 3s\n",
      "\tTrain Loss: 0.479 | Train PPL:   1.615\n",
      "\t Val. Loss: 0.626 |  Val. PPL:   1.871\n",
      "Epoch: 42 | Time: 0m 3s\n",
      "\tTrain Loss: 0.472 | Train PPL:   1.603\n",
      "\t Val. Loss: 0.668 |  Val. PPL:   1.950\n",
      "Epoch: 43 | Time: 0m 3s\n",
      "\tTrain Loss: 0.481 | Train PPL:   1.618\n",
      "\t Val. Loss: 0.675 |  Val. PPL:   1.965\n",
      "Epoch: 44 | Time: 0m 4s\n",
      "\tTrain Loss: 0.454 | Train PPL:   1.574\n",
      "\t Val. Loss: 0.654 |  Val. PPL:   1.923\n",
      "Epoch: 45 | Time: 0m 4s\n",
      "\tTrain Loss: 0.441 | Train PPL:   1.554\n",
      "\t Val. Loss: 0.611 |  Val. PPL:   1.843\n",
      "Epoch: 46 | Time: 0m 4s\n",
      "\tTrain Loss: 0.439 | Train PPL:   1.551\n",
      "\t Val. Loss: 0.667 |  Val. PPL:   1.948\n",
      "Epoch: 47 | Time: 0m 3s\n",
      "\tTrain Loss: 0.426 | Train PPL:   1.532\n",
      "\t Val. Loss: 0.642 |  Val. PPL:   1.900\n",
      "Epoch: 48 | Time: 0m 3s\n",
      "\tTrain Loss: 0.435 | Train PPL:   1.545\n",
      "\t Val. Loss: 0.635 |  Val. PPL:   1.887\n",
      "Epoch: 49 | Time: 0m 3s\n",
      "\tTrain Loss: 0.439 | Train PPL:   1.551\n",
      "\t Val. Loss: 0.629 |  Val. PPL:   1.875\n",
      "Epoch: 50 | Time: 0m 4s\n",
      "\tTrain Loss: 0.433 | Train PPL:   1.542\n",
      "\t Val. Loss: 0.618 |  Val. PPL:   1.855\n",
      "Epoch: 51 | Time: 0m 4s\n",
      "\tTrain Loss: 0.411 | Train PPL:   1.508\n",
      "\t Val. Loss: 0.630 |  Val. PPL:   1.877\n",
      "Epoch: 52 | Time: 0m 4s\n",
      "\tTrain Loss: 0.418 | Train PPL:   1.519\n",
      "\t Val. Loss: 0.602 |  Val. PPL:   1.826\n",
      "Epoch: 53 | Time: 0m 3s\n",
      "\tTrain Loss: 0.400 | Train PPL:   1.492\n",
      "\t Val. Loss: 0.645 |  Val. PPL:   1.905\n",
      "Epoch: 54 | Time: 0m 3s\n",
      "\tTrain Loss: 0.415 | Train PPL:   1.515\n",
      "\t Val. Loss: 0.624 |  Val. PPL:   1.867\n",
      "Epoch: 55 | Time: 0m 3s\n",
      "\tTrain Loss: 0.399 | Train PPL:   1.491\n",
      "\t Val. Loss: 0.647 |  Val. PPL:   1.909\n",
      "Epoch: 56 | Time: 0m 4s\n",
      "\tTrain Loss: 0.401 | Train PPL:   1.494\n",
      "\t Val. Loss: 0.611 |  Val. PPL:   1.843\n",
      "Epoch: 57 | Time: 0m 3s\n",
      "\tTrain Loss: 0.376 | Train PPL:   1.457\n",
      "\t Val. Loss: 0.644 |  Val. PPL:   1.904\n",
      "Epoch: 58 | Time: 0m 3s\n",
      "\tTrain Loss: 0.379 | Train PPL:   1.461\n",
      "\t Val. Loss: 0.612 |  Val. PPL:   1.845\n",
      "Epoch: 59 | Time: 0m 3s\n",
      "\tTrain Loss: 0.368 | Train PPL:   1.445\n",
      "\t Val. Loss: 0.662 |  Val. PPL:   1.938\n",
      "Epoch: 60 | Time: 0m 3s\n",
      "\tTrain Loss: 0.380 | Train PPL:   1.462\n",
      "\t Val. Loss: 0.620 |  Val. PPL:   1.859\n",
      "Epoch: 61 | Time: 0m 3s\n",
      "\tTrain Loss: 0.368 | Train PPL:   1.445\n",
      "\t Val. Loss: 0.678 |  Val. PPL:   1.969\n",
      "Epoch: 62 | Time: 0m 3s\n",
      "\tTrain Loss: 0.368 | Train PPL:   1.445\n",
      "\t Val. Loss: 0.630 |  Val. PPL:   1.878\n",
      "Epoch: 63 | Time: 0m 3s\n",
      "\tTrain Loss: 0.356 | Train PPL:   1.427\n",
      "\t Val. Loss: 0.568 |  Val. PPL:   1.764\n",
      "Epoch: 64 | Time: 0m 3s\n",
      "\tTrain Loss: 0.362 | Train PPL:   1.437\n",
      "\t Val. Loss: 0.604 |  Val. PPL:   1.829\n",
      "Epoch: 65 | Time: 0m 3s\n",
      "\tTrain Loss: 0.346 | Train PPL:   1.413\n",
      "\t Val. Loss: 0.570 |  Val. PPL:   1.769\n",
      "Epoch: 66 | Time: 0m 3s\n",
      "\tTrain Loss: 0.347 | Train PPL:   1.415\n",
      "\t Val. Loss: 0.624 |  Val. PPL:   1.866\n",
      "Epoch: 67 | Time: 0m 3s\n",
      "\tTrain Loss: 0.344 | Train PPL:   1.411\n",
      "\t Val. Loss: 0.614 |  Val. PPL:   1.848\n",
      "Epoch: 68 | Time: 0m 3s\n",
      "\tTrain Loss: 0.358 | Train PPL:   1.431\n",
      "\t Val. Loss: 0.612 |  Val. PPL:   1.845\n",
      "Epoch: 69 | Time: 0m 3s\n",
      "\tTrain Loss: 0.342 | Train PPL:   1.408\n",
      "\t Val. Loss: 0.610 |  Val. PPL:   1.841\n",
      "Epoch: 70 | Time: 0m 3s\n",
      "\tTrain Loss: 0.346 | Train PPL:   1.413\n",
      "\t Val. Loss: 0.624 |  Val. PPL:   1.866\n",
      "Epoch: 71 | Time: 0m 3s\n",
      "\tTrain Loss: 0.338 | Train PPL:   1.403\n",
      "\t Val. Loss: 0.638 |  Val. PPL:   1.892\n",
      "Epoch: 72 | Time: 0m 3s\n",
      "\tTrain Loss: 0.338 | Train PPL:   1.402\n",
      "\t Val. Loss: 0.583 |  Val. PPL:   1.791\n",
      "Epoch: 73 | Time: 0m 3s\n",
      "\tTrain Loss: 0.324 | Train PPL:   1.383\n",
      "\t Val. Loss: 0.589 |  Val. PPL:   1.803\n"
     ]
    }
   ],
   "source": [
    "n_epochs = 100 # 迭代次数\n",
    "clip = 0.1 # 梯度裁剪\n",
    "\n",
    "model_path = os.path.join(os.getcwd(), \"model.h5\")\n",
    "\n",
    "best_valid_loss = float('inf')\n",
    "\n",
    "for epoch in range(n_epochs):\n",
    "    \n",
    "    start_time = time.time()\n",
    "    \n",
    "    train_loss = train(model, train_iter, optimizer, loss_slot, loss_intent, clip)\n",
    "    valid_loss = evaluate(model, val_iter, loss_slot, loss_intent)\n",
    "    \n",
    "    end_time = time.time()\n",
    "    \n",
    "    epoch_mins, epoch_secs = epoch_time(start_time, end_time) # 每个epoch花费的时间\n",
    "    \n",
    "    if valid_loss < best_valid_loss:\n",
    "        best_valid_loss = valid_loss\n",
    "        torch.save(model.state_dict(), model_path)\n",
    "        \n",
    "    print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')\n",
    "    print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')\n",
    "    print(f'\\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
