{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![seq2seq_attention_intent_slot模型](img/img1.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "对话中的意图识别和槽填充联合模型：\n",
    "这里实现了《Attention-Based Recurrent Neural Network Models for Joint Intent Detection and Slot Filling》上图中(c)模型\n",
    "此模型利用seq2seq-attention实现：\n",
    "1.意图识别是利用encoder中的最后一个time step中的双向隐层 + encoder的attention，最后接一个fc层进行分类\n",
    "2.槽填充利用序列标注，基于attention的常方法，最后也是一个fc层分类\n",
    "3.总的loss = 意图识别loss + 槽填充loss\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from torchtext import data, datasets\n",
    "import pandas as pd\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "base_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n",
    "atis_data = os.path.join(base_dir, 'atis')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "build train and val dataset\n",
    "'''\n",
    "    \n",
    "tokenize = lambda s:s.split()\n",
    "\n",
    "SOURCE = data.Field(sequential=True, tokenize=tokenize,\n",
    "                    lower=True, use_vocab=True,\n",
    "                    init_token='<sos>', eos_token='<eos>',\n",
    "                    pad_token='<pad>', unk_token='<unk>',\n",
    "                    batch_first=True, fix_length=50,\n",
    "                    include_lengths=True) #include_lengths=True为方便之后使用torch的pack_padded_sequence\n",
    "\n",
    "TARGET = data.Field(sequential=True, tokenize=tokenize,\n",
    "                    lower=True, use_vocab=True,\n",
    "                    init_token='<sos>', eos_token='<eos>',\n",
    "                    pad_token='<pad>', unk_token='<unk>',\n",
    "                    batch_first=True, fix_length=50,\n",
    "                    include_lengths=True) #include_lengths=True为方便之后使用torch的pack_padded_sequence\n",
    "LABEL = data.Field(\n",
    "                sequential=False,\n",
    "                use_vocab=True)\n",
    "\n",
    "train, val = data.TabularDataset.splits(\n",
    "                                        path=atis_data,\n",
    "                                        skip_header=True,\n",
    "                                        train='atis.train.csv',\n",
    "                                        validation='atis.test.csv',\n",
    "                                        format='csv',\n",
    "                                        fields=[('index', None), ('intent', LABEL), ('source', SOURCE), ('target', TARGET)])\n",
    "\n",
    "SOURCE.build_vocab(train, val)\n",
    "TARGET.build_vocab(train, val)\n",
    "LABEL.build_vocab(train, val)\n",
    "\n",
    "train_iter, val_iter = data.Iterator.splits(\n",
    "                                            (train, val),\n",
    "                                            batch_sizes=(128, len(val)), # 训练集设置为128,验证集整个集合用于测试\n",
    "                                            shuffle=True,\n",
    "                                            sort_within_batch=True, #为true则一个batch内的数据会按sort_key规则降序排序\n",
    "                                            sort_key=lambda x: len(x.source)) #这里按src的长度降序排序，主要是为后面pack,pad操作)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save source words\n",
    "source_words_path = os.path.join(os.getcwd(), 'source_words.pkl')\n",
    "with open(source_words_path, 'wb') as f_source_words:\n",
    "    pickle.dump(SOURCE.vocab, f_source_words)\n",
    "\n",
    "# save target words\n",
    "target_words_path = os.path.join(os.getcwd(), 'target_words.pkl')\n",
    "with open(target_words_path, 'wb') as f_target_words:\n",
    "    pickle.dump(TARGET.vocab, f_target_words)\n",
    "    \n",
    "# save label words\n",
    "label_words_path = os.path.join(os.getcwd(), 'label_words.pkl')\n",
    "with open(label_words_path, 'wb') as f_label_words:\n",
    "    pickle.dump(LABEL.vocab, f_label_words)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "import math\n",
    "from apex import amp\n",
    "import time\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# build model\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "'''\n",
    "以下注意这个模型中encoder与decoder都使用n_layers=2所以在计算attention时，拿到上一步hidden的最后一层是hidden[-1,:,:]\n",
    "'''\n",
    "# 构建编码器\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, pad_index):\n",
    "        super(Encoder, self).__init__()\n",
    "        self.pad_index = pad_index\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "        \n",
    "        self.embedding = nn.Embedding(input_dim, emb_dim, padding_idx=pad_index)\n",
    "        self.gru = nn.GRU(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=True, batch_first=True) #使用双向\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.fc = nn.Linear(hidden_dim * 2, hidden_dim)\n",
    "    def forward(self, src, src_len):\n",
    "        # 初始化\n",
    "        # h0 = torch.zeros(self.n_layers, src.size(1), self.hidden_dim).to(device)\n",
    "        # c0 = torch.zeros(self.n_layers, src.size(1), self.hidden_dim).to(device)\n",
    "        # nn.init.kaiming_normal_(h0)\n",
    "        # nn.init.kaiming_normal_(c0)\n",
    "        # src=[batch_size, seq_len]\n",
    "        embedded = self.dropout(self.embedding(src))\n",
    "        # embedd=[batch_size,seq_len,embdim]\n",
    "        packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, src_len, batch_first=True, enforce_sorted=True) #这里enfore_sotred=True要求数据根据词数排序\n",
    "        output, hidden = self.gru(packed)\n",
    "        # output=[batch_size, seq_len, hidden_size*2]\n",
    "        # hidden=[n_layers*2, batch_size, hidden_size]\n",
    "        \n",
    "        output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True, padding_value=self.pad_index, total_length=len(src[0])) #这个会返回output以及压缩后的legnths\n",
    "        \n",
    "        '''\n",
    "        hidden[-2,:,:]是gru最后一步的forward\n",
    "        hidden[-1,:,:]是gru最后一步的backward\n",
    "        利用最后前向和后向的hidden的隐状态作为decoder的初始状态\n",
    "        hidden:[batch_size, hidden_dim]\n",
    "        '''\n",
    "        hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)))\n",
    "        return output, hidden\n",
    "\n",
    "# 构建attention权重计算方式\n",
    "class Attention(nn.Module):\n",
    "    def __init__(self, hidden_dim):\n",
    "        super(Attention, self).__init__()\n",
    "        self.attn = nn.Linear((hidden_dim * 2) + hidden_dim, hidden_dim)\n",
    "        self.v = nn.Linear(hidden_dim, 1, bias=False)\n",
    "\n",
    "    def concat_score(self, hidden, encoder_output):\n",
    "        seq_len = encoder_output.shape[1]\n",
    "        hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1) # [batch_size, seq_len, hidden_size]\n",
    "        energy = torch.tanh(self.attn(torch.cat((hidden, encoder_output),dim=2))) # [batch_size, seq_len, hidden_dim]\n",
    "        attention = self.v(energy).squeeze(2) #[batch_size, seq_len]\n",
    "        return attention #[batch_size, seq_len]\n",
    "\n",
    "    def forward(self, hidden, encoder_output):\n",
    "        # hidden = [batch_size, hidden_size]\n",
    "        # #encoder_output=[batch_size, seq_len, hidden_dim*2]\n",
    "        \n",
    "        attn_energies = self.concat_score(hidden, encoder_output)\n",
    "\n",
    "        return F.softmax(attn_energies, dim=1).unsqueeze(1) #softmax归一化，[batch_size, 1, seq_len]\n",
    "\n",
    "# 构建解码器\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, output_dim, emb_dim, hidden_dim, n_layers, dropout):\n",
    "        super(Decoder, self).__init__()\n",
    "        self.output_dim = output_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "\n",
    "        self.embedding = nn.Embedding(output_dim, emb_dim)\n",
    "        self.gru = nn.GRU((hidden_dim * 2) + emb_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)\n",
    "        # 槽填充slot filling\n",
    "        self.slot_out = nn.Linear(hidden_dim * 2 + hidden_dim, output_dim)\n",
    "        self.attention = Attention(hidden_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, input, hidden, encoder_output):\n",
    "        input = input.unsqueeze(1)\n",
    "        # input=[batch_size, 1]\n",
    "        # hidden=[batch_size, hidden_size] 初始化为encoder的最后一层 [batch_size, hidden_size]\n",
    "        # encoder_output=[batch_size, seq_len, hidden_dim*2]\n",
    "        \n",
    "        # embedded=[batch_sze, 1, emb_dim]\n",
    "        embedded = self.dropout(self.embedding(input))\n",
    "\n",
    "        # 利用利用上一步的hidden与encoder_output，计算attention权重\n",
    "        # attention_weights=[batch_size, 1, seq_len]\n",
    "        attention_weights = self.attention(hidden, encoder_output)\n",
    "\n",
    "        '''\n",
    "        以下是计算上下文：利用attention权重与encoder_output计算attention上下文向量\n",
    "        注意力权重分布用于产生编码器隐藏状态的加权和，加权平均的过程。得到的向量称为上下文向量\n",
    "        '''\n",
    "        context = attention_weights.bmm(encoder_output) # [batch_size, 1, seq_len]*[batch_size,seq_len,hidden_dim*2]=[batch_size, 1, hidden_dim*2]\n",
    "        \n",
    "        #拼接注意力上下文和embedding向量作为gru输入\n",
    "        # [batch_size, 1, hidden_dim*2+emb_dim]\n",
    "        gru_input = torch.cat([context, embedded], 2)\n",
    "        \n",
    "        # 将注意力向量，本次embedding以及上次的hidden输入到ｇｒｕ中\n",
    "        # decoder_output=[batch_size, seq_len, hidden_size]\n",
    "        # hidden=[n_layers, batch_size, hidden_size]\n",
    "        # decoder中的ｇｒｕ是单向，序列长度为１，层为１，\n",
    "        # 所以decoder_output=[batch_size, １, hidden_size]，hidden=[１, batch_size, hidden_size]\n",
    "        decoder_output, hidden = self.gru(gru_input, hidden.unsqueeze(0))\n",
    "        \n",
    "\n",
    "        decoder_output_context = torch.cat([decoder_output, context], 2) # 连接context与decoder_output的hidden_dim =[batch_size, 1, 2 * hidden_dim + hidden_dim]\n",
    "        prediction = self.slot_out(decoder_output_context.squeeze(1))\n",
    "        # prediction=[batch_size, output_dim]，词汇表中所有词的概率分布，这里可以使用softmax进行归一化\n",
    "        return prediction, hidden.squeeze(0), attention_weights.squeeze(1), context.squeeze(1)\n",
    "\n",
    "# 利用Encoder与Decoder构建seq2seq模型\n",
    "class Seq2Seq(nn.Module):\n",
    "    '''\n",
    "    接收source句子\n",
    "    利用编码器encoder生成上下文向量\n",
    "    利用解码器decoder生成预测target句子\n",
    "\n",
    "    每次迭代中：\n",
    "    传入input以及先前的hidden与cell状态给解码器decoder\n",
    "    从解码器decoder中接收一个prediction以及下一个hidden与下一个cell状态\n",
    "    保存这个prediction作为预测句子中的一部分\n",
    "    决定是否使用\"teacher force\":\n",
    "        如果使用：解码器的下一次input是真实的token\n",
    "        如果不使用：解码器的下一次input是预测prediction（使用output tensor的argmax）的token\n",
    "    '''\n",
    "\n",
    "    def __init__(self, predict_flag, encoder, decoder, intent_size):\n",
    "        super(Seq2Seq, self).__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "        self.predict_flag = predict_flag\n",
    "        # 意图分类\n",
    "        self.intent_out = nn.Linear((encoder.hidden_dim * 2) + encoder.hidden_dim, intent_size)\n",
    "        assert encoder.hidden_dim == decoder.hidden_dim, 'encoder与decoder的隐藏状态维度必须相等！'\n",
    "        assert encoder.n_layers == decoder.n_layers, 'encoder与decoder的层数必须相等！'\n",
    "        \n",
    "    def forward(self, src, src_lens, trg, teacher_forcing_ration=1.0):\n",
    "        '''\n",
    "        src=[batch_size, seq_len]\n",
    "        src_len=[batch_size]\n",
    "        trg=[batch_size, trg_len]\n",
    "        \n",
    "        '''\n",
    "        # 预测，一次输入一句话\n",
    "        if self.predict_flag:\n",
    "            assert len(src) == 1, '预测时一次输入一句话'\n",
    "            src_len = len(src[0])\n",
    "            output_tokens = []\n",
    "            encoder_output, encoder_hidden = self.encoder(src, src_lens)\n",
    "            hidden = encoder_hidden\n",
    "            input = torch.tensor(2).unsqueeze(0)  # 预测阶段解码器输入第一个token-> <sos>\n",
    "            for s in range(1, src_len):\n",
    "                if s == 1:\n",
    "                    # context = [batch_size, hidden_dim*2]\n",
    "                    output, hidden, _, context = self.decoder(input, hidden, encoder_output)\n",
    "                else:\n",
    "                    output, hidden, _, _ = self.decoder(input, hidden, encoder_output)\n",
    "                    \n",
    "                input = output.argmax(1)\n",
    "                output_token = input.squeeze().detach().item()\n",
    "               \n",
    "                output_tokens.append(output_token)\n",
    "            concated = torch.cat((encoder_hidden, context), 1)\n",
    "            intent_outputs = self.intent_out(concated)\n",
    "            intent_outputs = intent_outputs.squeeze()\n",
    "            intent_outputs = intent_outputs.argmax()\n",
    "            return output_tokens, intent_outputs\n",
    "\n",
    "        # 训练\n",
    "        else:\n",
    "            '''\n",
    "            src=[batch_size, seq_len]\n",
    "            trg=[batch_size, trg_len]\n",
    "            teacher_forcing_ration是使用teacher forcing的概率,例如teacher_forcing_ration=0.8，则输入的时间步有80%的真实值。\n",
    "            '''\n",
    "            batch_size = trg.shape[0]\n",
    "            trg_len = trg.shape[1]\n",
    "            trg_vocab_size = self.decoder.output_dim\n",
    "            # 存储decoder outputs\n",
    "            slot_outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(device)\n",
    "            # encoder的最后一层hidden state(前向＋后向)作为decoder的初始隐状态,[batch_size, seq_len, hidden_size*2]\n",
    "            # hidden=[batch_size, hidden_size]\n",
    "            encoder_output, encoder_hidden = self.encoder(\n",
    "                src, src_lens)  \n",
    "            hidden = encoder_hidden\n",
    "            \n",
    "            # 输入到decoder的第一个是<sos>\n",
    "            input = trg[:, 0] # [batch_size]\n",
    "            \n",
    "            for t in range(1, trg_len):\n",
    "                '''\n",
    "                解码器输入的初始hidden为encoder的最后一步的hidden\n",
    "                接收输出即predictions和新的hidden状态\n",
    "                '''\n",
    "                if t == 1:\n",
    "                    # context = [batch_size, hidden_dim*2]\n",
    "                    output, hidden, _, context = self.decoder(input, hidden, encoder_output)\n",
    "                else:\n",
    "                    output, hidden, _, _ = self.decoder(input, hidden, encoder_output)\n",
    "                # 存入decoder的预测值\n",
    "                slot_outputs[:, t, :] = output\n",
    "                # 是否使用teacher forcing\n",
    "                teacher_force = random.random() < teacher_forcing_ration\n",
    "                # 获取预测的最大概率的token\n",
    "                predict_max = output.argmax(1)\n",
    "                '''\n",
    "                如果是teacher forcing则下一步使用真实token作为解码的输入\n",
    "                否则使用decoder的预测值作为下一步的解码输入\n",
    "                '''\n",
    "                input = trg[:, t] if teacher_force else predict_max\n",
    "            # concated = [batch_size, hidden_dim * 2 + hidden_dim]\n",
    "            concated = torch.cat((encoder_hidden, context), 1)\n",
    "            intent_outputs = self.intent_out(concated)\n",
    "            # slot_outputs=[batch_size, trg_len, trg_vocab_size], intetn_outputs=[batch_size, intent_size]\n",
    "            return slot_outputs, intent_outputs\n",
    "\n",
    "\n",
    "# 构建模型，优化函数，损失函数，学习率衰减函数\n",
    "def build_model(source, target, label, encoder_embedding_dim, decoder_embedding_dim, hidden_dim, n_layers, encoder_dropout,\n",
    "                decoder_dropout, lr, gamma, weight_decay):\n",
    "    '''\n",
    "    训练seq2seq model\n",
    "    input与output的维度是字典的大小。\n",
    "    encoder与decoder的embedding与dropout可以不同\n",
    "    网络的层数与hiden/cell状态的size必须相同\n",
    "    '''\n",
    "    input_dim = len(source.vocab) # source 词典大小（即词数量）\n",
    "    output_dim = len(target.vocab) # target 词典大小（即实体类型数量）\n",
    "    label_dim = len(label.vocab) # label 词典大小（即意图类别数量）\n",
    "    \n",
    "    encoder = Encoder(input_dim, encoder_embedding_dim, hidden_dim, n_layers, encoder_dropout, source.vocab.stoi[source.pad_token])\n",
    "    decoder = Decoder(output_dim, decoder_embedding_dim, hidden_dim, n_layers, decoder_dropout)\n",
    "\n",
    "    model = Seq2Seq(False, encoder, decoder, label_dim).to(device)\n",
    "\n",
    "    model.apply(init_weights)\n",
    "\n",
    "    # 定义优化函数\n",
    "    # optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n",
    "    optimizer = torch.optim.SGD(model.parameters(),lr=lr)\n",
    "    # 定义lr衰减\n",
    "    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=gamma)\n",
    "    # 这里忽略<pad>的损失。\n",
    "    target_pad_index = target.vocab.stoi[source.pad_token]\n",
    "    # 定义损失函数(实体识别)\n",
    "    loss_slot = nn.CrossEntropyLoss(ignore_index=target_pad_index)\n",
    "    # 定义损失函数(意图识别)\n",
    "    loss_intent = nn.CrossEntropyLoss()\n",
    "    \n",
    "    return model, optimizer, scheduler, loss_slot, loss_intent\n",
    "\n",
    "\n",
    "# 训练\n",
    "def train(model, iterator, optimizer, loss_slot, loss_intent, clip):\n",
    "    '''\n",
    "    开始训练：\n",
    "        1.得到source与target句子\n",
    "        2.上一批batch的计算梯度归0\n",
    "        3.给模型喂source与target，并得到输出output\n",
    "        4.由于损失函数只适用于带有1维target和2维的input，我们需要用view进行flatten(在计算损失时，从output与target中忽略了第一列<sos>)\n",
    "        5.反向传播计算梯度loss.backward()\n",
    "        6.梯度裁剪，防止梯度爆炸\n",
    "        7.更新模型参数\n",
    "        8.损失值求和(返回所有batch的损失的均值)\n",
    "    '''\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "\n",
    "    for i, batch in enumerate(iterator):\n",
    "        src, src_lens = batch.source  # src=[batch_size, seq_len]，这里batch.src返回src和src的长度，因为在使用torchtext.Field时设置include_lengths=True\n",
    "        trg, _ = batch.target  # trg=[batch_size, seq_len]\n",
    "        label = batch.intent # [batch_size]\n",
    "        src = src.to(device)\n",
    "        trg = trg.to(device)\n",
    "        label = label.to(device)\n",
    "        \n",
    "        #slot_outputs=[batch_size, trg_len, trg_vocab_size], intetn_outputs=[batch_size, intent_size]\n",
    "        slot_outputs, intent_outputs = model(src, src_lens, trg, teacher_forcing_ration=1.0)\n",
    "        \n",
    "        # 以下在计算损失时，忽略了每个tensor的第一个元素及<sos>\n",
    "        output_dim = slot_outputs.shape[-1]\n",
    "        slot_outputs = slot_outputs[:, 1:, :].reshape(-1, output_dim)  # output=[batch_size * (seq_len - 1), output_dim]\n",
    "        trg = trg[:, 1:].reshape(-1)  # trg=[batch_size * (seq_len - 1)]\n",
    "        loss1 = loss_slot(slot_outputs, trg)\n",
    "        loss2 = loss_intent(intent_outputs, label)\n",
    "        loss = loss1 + loss2\n",
    "        \n",
    "        with amp.scale_loss(loss, optimizer) as scaled_loss:\n",
    "            scaled_loss.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n",
    "        optimizer.step()\n",
    "        optimizer.zero_grad()\n",
    "        epoch_loss += float(loss.item())\n",
    "        # print('epoch_loss:{}'.format(float(loss.item())))\n",
    "    return epoch_loss / len(iterator)\n",
    "\n",
    "'''\n",
    "评估\n",
    "'''\n",
    "def evaluate(model, iterator, loss_slot, loss_intent):\n",
    "    model.eval()  # 评估模型，切断dropout与batchnorm\n",
    "    epoch_loss = 0\n",
    "    with torch.no_grad():  # 不更新梯度\n",
    "        for i, batch in enumerate(iterator):\n",
    "            src, src_len = batch.source  # src=[batch_size, seq_len]\n",
    "            trg, _ = batch.target  # trg=[batch_size, seq_len]\n",
    "            label = batch.intent\n",
    "            src = src.to(device)\n",
    "            trg = trg.to(device)\n",
    "            label = label.to(device)\n",
    "            # output=[batch_size, seq_len, output_dim]\n",
    "            slot_outputs, intent_outputs = model(src, src_len, trg, teacher_forcing_ration=0)  # 评估的时候不使用teacher force，使用预测作为每一步的输入\n",
    "\n",
    "            output_dim = slot_outputs.shape[-1]\n",
    "            slot_outputs = slot_outputs[:, 1:, :].reshape(-1, output_dim)  # output=[batch_size * (seq_len - 1), output_dim]\n",
    "            trg = trg[:, 1:].reshape(-1)  # trg=[batch_size * (seq_len - 1)]\n",
    "            loss1 = loss_slot(slot_outputs, trg)\n",
    "            loss2 = loss_intent(intent_outputs, label)\n",
    "            loss = loss1 + loss2\n",
    "            epoch_loss += float(loss.item())\n",
    "    return epoch_loss / len(iterator)\n",
    "\n",
    "\n",
    "def train_model(model, train_iterator, val_iterator, optimizer, scheduler, loss_slot, loss_intent, n_epochs, clip, model_path, writer):\n",
    "    '''\n",
    "    开始训练我们的模型：\n",
    "    1.每一次epoch，都会检查模型是否达到的最佳的validation loss，如果达到了，就更新\n",
    "    最好的validation loss以及保存模型参数\n",
    "    2.打印每个epoch的loss以及困惑度。\n",
    "    '''\n",
    "    best_valid_loss = float('inf')\n",
    "    for epoch in range(n_epochs):\n",
    "        start_time = time.time()\n",
    "        train_loss = train(model, train_iterator, optimizer, loss_slot, loss_intent, clip)\n",
    "        writer.add_scalar('loss',train_loss,global_step=epoch+1)\n",
    "        \n",
    "        valid_loss = evaluate(model, val_iterator, loss_slot, loss_intent)\n",
    "        end_time = time.time()\n",
    "        epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n",
    "\n",
    "        if valid_loss < best_valid_loss:\n",
    "            best_valid_loss = valid_loss\n",
    "            torch.save(model.state_dict(), model_path)\n",
    "        # scheduler.step()\n",
    "        print('epoch:{},time-mins:{},time-secs:{}'.format(epoch + 1, epoch_mins, epoch_secs))\n",
    "        print('train loss:{},train perplexity:{}'.format(train_loss, math.exp(train_loss)))\n",
    "        print('val loss:{}, val perplexity:{}'.format(valid_loss, math.exp(valid_loss)))\n",
    "    writer.flush()\n",
    "    writer.close()\n",
    "\n",
    "    #每个epoch所花时间\n",
    "def epoch_time(start_time, end_time):\n",
    "    run_tim = end_time - start_time\n",
    "    run_mins = int(run_tim / 60)\n",
    "    run_secs = int(run_tim-(run_mins * 60))\n",
    "    return run_mins,run_secs\n",
    "\n",
    "#对所有模块和子模块进行权重初始化\n",
    "def init_weights(model):\n",
    "    for name,param in model.named_parameters():\n",
    "        nn.init.uniform_(param.data, -0.08, 0.08)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Selected optimization level O1:  Insert automatic casts around Pytorch functions and Tensor methods.\n",
      "\n",
      "Defaults for this optimization level are:\n",
      "enabled                : True\n",
      "opt_level              : O1\n",
      "cast_model_type        : None\n",
      "patch_torch_functions  : True\n",
      "keep_batchnorm_fp32    : None\n",
      "master_weights         : None\n",
      "loss_scale             : dynamic\n",
      "Processing user overrides (additional kwargs that are not None)...\n",
      "After processing overrides, optimization options are:\n",
      "enabled                : True\n",
      "opt_level              : O1\n",
      "cast_model_type        : None\n",
      "patch_torch_functions  : True\n",
      "keep_batchnorm_fp32    : None\n",
      "master_weights         : None\n",
      "loss_scale             : dynamic\n",
      "epoch:1,time-mins:0,time-secs:15\n",
      "train loss:3.855553217423268,train perplexity:47.254751863524106\n",
      "val loss:3.5935122966766357, val perplexity:36.36156450873523\n",
      "epoch:2,time-mins:0,time-secs:15\n",
      "train loss:3.158393994355813,train perplexity:23.532771799981425\n",
      "val loss:3.6182515621185303, val perplexity:37.272342441950684\n",
      "epoch:3,time-mins:0,time-secs:15\n",
      "train loss:3.0986371407142053,train perplexity:22.167719203125817\n",
      "val loss:3.4812960624694824, val perplexity:32.50181917776026\n",
      "epoch:4,time-mins:0,time-secs:15\n",
      "train loss:3.0738250414530435,train perplexity:21.62445913599795\n",
      "val loss:3.6066794395446777, val perplexity:36.84350837237021\n",
      "epoch:5,time-mins:0,time-secs:15\n",
      "train loss:3.0592019924750695,train perplexity:21.310544399982117\n",
      "val loss:3.430021286010742, val perplexity:30.877299997221357\n",
      "epoch:6,time-mins:0,time-secs:15\n",
      "train loss:3.042655932597625,train perplexity:20.960839949944265\n",
      "val loss:3.501282215118408, val perplexity:33.15794032563873\n",
      "epoch:7,time-mins:0,time-secs:15\n",
      "train loss:3.0404105186462402,train perplexity:20.91382698901992\n",
      "val loss:3.404585123062134, val perplexity:30.10180458844302\n",
      "epoch:8,time-mins:0,time-secs:15\n",
      "train loss:3.034534827256814,train perplexity:20.791304101693207\n",
      "val loss:3.477362632751465, val perplexity:32.37422665892737\n",
      "epoch:9,time-mins:0,time-secs:15\n",
      "train loss:3.0289994447659225,train perplexity:20.67653422164814\n",
      "val loss:3.4190428256988525, val perplexity:30.540168761707783\n",
      "epoch:10,time-mins:0,time-secs:15\n",
      "train loss:3.021002317086244,train perplexity:20.511840752588032\n",
      "val loss:3.519343376159668, val perplexity:33.762252084254754\n"
     ]
    }
   ],
   "source": [
    "from torch.utils.tensorboard import SummaryWriter\n",
    "writer = SummaryWriter(os.getcwd()+'/log', comment='intent_slot')\n",
    "\n",
    "\n",
    "encoder_embedding_dim = 128\n",
    "decoder_embedding_dim = 128\n",
    "hidden_dim = 258\n",
    "n_layers = 1\n",
    "encoder_dropout = 0.1\n",
    "decoder_dropout = 0.1\n",
    "lr = 0.01\n",
    "gamma = 0.1\n",
    "weight_decay = 0.1\n",
    "n_epochs = 10\n",
    "clip = 1.0\n",
    "model_path = os.path.join(os.getcwd(), \"model.h5\")\n",
    "\n",
    "model, optimizer, scheduler, loss_slot, loss_intent = build_model(SOURCE,\n",
    "                                                  TARGET,\n",
    "                                                  LABEL,\n",
    "                                                  encoder_embedding_dim,\n",
    "                                                  decoder_embedding_dim,\n",
    "                                                  hidden_dim,\n",
    "                                                  n_layers,\n",
    "                                                  encoder_dropout,\n",
    "                                                  decoder_dropout,\n",
    "                                                  lr,\n",
    "                                                  gamma,\n",
    "                                                  weight_decay)\n",
    "\n",
    "model, optimizer = amp.initialize(model, optimizer, opt_level='O1')\n",
    "\n",
    "train_model(model,\n",
    "            train_iter,\n",
    "            val_iter,\n",
    "            optimizer,\n",
    "            scheduler,\n",
    "            loss_slot, \n",
    "            loss_intent,\n",
    "            n_epochs,\n",
    "            clip,\n",
    "            model_path,\n",
    "            writer)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
