{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import pickle\n",
    "import numpy as np\n",
    "import os\n",
    "import json\n",
    "import random\n",
    "    \n",
    "# load source words\n",
    "source_words_path = os.path.join(os.getcwd(), 'source_words.pkl')\n",
    "with open(source_words_path, 'rb') as f_source_words:\n",
    "    source_words = pickle.load(f_source_words)\n",
    "    \n",
    "# load target words\n",
    "target_words_path = os.path.join(os.getcwd(), 'target_words.pkl')\n",
    "with open(target_words_path, 'rb') as f_target_words:\n",
    "    target_words = pickle.load(f_target_words)\n",
    "    \n",
    "# load label words\n",
    "label_words_path = os.path.join(os.getcwd(), 'label_words.pkl')\n",
    "with open(label_words_path, 'rb') as f_label_words:\n",
    "    label_words = pickle.load(f_label_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "945\n",
      "133\n",
      "27\n",
      "1\n"
     ]
    }
   ],
   "source": [
    "print(len(source_words))\n",
    "print(len(target_words))\n",
    "print(len(label_words))\n",
    "print(source_words['<pad>'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "以下注意这个模型中encoder与decoder都使用n_layers=2所以在计算attention时，拿到上一步hidden的最后一层是hidden[-1,:,:]\n",
    "'''\n",
    "# 构建编码器\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, pad_index):\n",
    "        super(Encoder, self).__init__()\n",
    "        self.pad_index = pad_index\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "        \n",
    "        self.embedding = nn.Embedding(input_dim, emb_dim, padding_idx=pad_index)\n",
    "        self.gru = nn.GRU(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=True, batch_first=True) #使用双向\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.fc = nn.Linear(hidden_dim * 2, hidden_dim)\n",
    "    def forward(self, src, src_len):\n",
    "        # 初始化\n",
    "        # h0 = torch.zeros(self.n_layers, src.size(1), self.hidden_dim).to(device)\n",
    "        # c0 = torch.zeros(self.n_layers, src.size(1), self.hidden_dim).to(device)\n",
    "        # nn.init.kaiming_normal_(h0)\n",
    "        # nn.init.kaiming_normal_(c0)\n",
    "        # src=[batch_size, seq_len]\n",
    "        embedded = self.dropout(self.embedding(src))\n",
    "        # embedd=[batch_size,seq_len,embdim]\n",
    "        packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, src_len, batch_first=True, enforce_sorted=True) #这里enfore_sotred=True要求数据根据词数排序\n",
    "        output, hidden = self.gru(packed)\n",
    "        # output=[batch_size, seq_len, hidden_size*2]\n",
    "        # hidden=[n_layers*2, batch_size, hidden_size]\n",
    "        \n",
    "        output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True, padding_value=self.pad_index, total_length=len(src[0])) #这个会返回output以及压缩后的legnths\n",
    "        \n",
    "        '''\n",
    "        hidden[-2,:,:]是gru最后一步的forward\n",
    "        hidden[-1,:,:]是gru最后一步的backward\n",
    "        利用最后前向和后向的hidden的隐状态作为decoder的初始状态\n",
    "        hidden:[batch_size, hidden_dim]\n",
    "        '''\n",
    "        hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)))\n",
    "        return output, hidden\n",
    "\n",
    "# 构建attention权重计算方式\n",
    "class Attention(nn.Module):\n",
    "    def __init__(self, hidden_dim):\n",
    "        super(Attention, self).__init__()\n",
    "        self.attn = nn.Linear((hidden_dim * 2) + hidden_dim, hidden_dim)\n",
    "        self.v = nn.Linear(hidden_dim, 1, bias=False)\n",
    "\n",
    "    def concat_score(self, hidden, encoder_output):\n",
    "        seq_len = encoder_output.shape[1]\n",
    "        hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1) # [batch_size, seq_len, hidden_size]\n",
    "        energy = torch.tanh(self.attn(torch.cat((hidden, encoder_output),dim=2))) # [batch_size, seq_len, hidden_dim]\n",
    "        attention = self.v(energy).squeeze(2) #[batch_size, seq_len]\n",
    "        return attention #[batch_size, seq_len]\n",
    "\n",
    "    def forward(self, hidden, encoder_output):\n",
    "        # hidden = [batch_size, hidden_size]\n",
    "        # #encoder_output=[batch_size, seq_len, hidden_dim*2]\n",
    "        \n",
    "        attn_energies = self.concat_score(hidden, encoder_output)\n",
    "\n",
    "        return F.softmax(attn_energies, dim=1).unsqueeze(1) #softmax归一化，[batch_size, 1, seq_len]\n",
    "\n",
    "# 构建解码器\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, output_dim, emb_dim, hidden_dim, n_layers, dropout):\n",
    "        super(Decoder, self).__init__()\n",
    "        self.output_dim = output_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "\n",
    "        self.embedding = nn.Embedding(output_dim, emb_dim)\n",
    "        self.gru = nn.GRU((hidden_dim * 2) + emb_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)\n",
    "        # 槽填充slot filling\n",
    "        self.slot_out = nn.Linear(hidden_dim * 2 + hidden_dim, output_dim)\n",
    "        self.attention = Attention(hidden_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, input, hidden, encoder_output):\n",
    "        input = input.unsqueeze(1)\n",
    "        # input=[batch_size, 1]\n",
    "        # hidden=[batch_size, hidden_size] 初始化为encoder的最后一层 [batch_size, hidden_size]\n",
    "        # encoder_output=[batch_size, seq_len, hidden_dim*2]\n",
    "        \n",
    "        # embedded=[batch_sze, 1, emb_dim]\n",
    "        embedded = self.dropout(self.embedding(input))\n",
    "\n",
    "        # 利用利用上一步的hidden与encoder_output，计算attention权重\n",
    "        # attention_weights=[batch_size, 1, seq_len]\n",
    "        attention_weights = self.attention(hidden, encoder_output)\n",
    "\n",
    "        '''\n",
    "        以下是计算上下文：利用attention权重与encoder_output计算attention上下文向量\n",
    "        注意力权重分布用于产生编码器隐藏状态的加权和，加权平均的过程。得到的向量称为上下文向量\n",
    "        '''\n",
    "        context = attention_weights.bmm(encoder_output) # [batch_size, 1, seq_len]*[batch_size,seq_len,hidden_dim*2]=[batch_size, 1, hidden_dim*2]\n",
    "        \n",
    "        #拼接注意力上下文和embedding向量作为gru输入\n",
    "        # [batch_size, 1, hidden_dim*2+emb_dim]\n",
    "        gru_input = torch.cat([context, embedded], 2)\n",
    "        \n",
    "        # 将注意力向量，本次embedding以及上次的hidden输入到ｇｒｕ中\n",
    "        # decoder_output=[batch_size, seq_len, hidden_size]\n",
    "        # hidden=[n_layers, batch_size, hidden_size]\n",
    "        # decoder中的ｇｒｕ是单向，序列长度为１，层为１，\n",
    "        # 所以decoder_output=[batch_size, １, hidden_size]，hidden=[１, batch_size, hidden_size]\n",
    "        decoder_output, hidden = self.gru(gru_input, hidden.unsqueeze(0))\n",
    "        \n",
    "\n",
    "        decoder_output_context = torch.cat([decoder_output, context], 2) # 连接context与decoder_output的hidden_dim =[batch_size, 1, 2 * hidden_dim + hidden_dim]\n",
    "        prediction = self.slot_out(decoder_output_context.squeeze(1))\n",
    "        # prediction=[batch_size, output_dim]，词汇表中所有词的概率分布，这里可以使用softmax进行归一化\n",
    "        return prediction, hidden.squeeze(0), attention_weights.squeeze(1), context.squeeze(1)\n",
    "\n",
    "# 利用Encoder与Decoder构建seq2seq模型\n",
    "class Seq2Seq(nn.Module):\n",
    "    '''\n",
    "    接收source句子\n",
    "    利用编码器encoder生成上下文向量\n",
    "    利用解码器decoder生成预测target句子\n",
    "\n",
    "    每次迭代中：\n",
    "    传入input以及先前的hidden与cell状态给解码器decoder\n",
    "    从解码器decoder中接收一个prediction以及下一个hidden与下一个cell状态\n",
    "    保存这个prediction作为预测句子中的一部分\n",
    "    决定是否使用\"teacher force\":\n",
    "        如果使用：解码器的下一次input是真实的token\n",
    "        如果不使用：解码器的下一次input是预测prediction（使用output tensor的argmax）的token\n",
    "    '''\n",
    "\n",
    "    def __init__(self, predict_flag, encoder, decoder, intent_size):\n",
    "        super(Seq2Seq, self).__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "        self.predict_flag = predict_flag\n",
    "        # 意图分类\n",
    "        self.intent_out = nn.Linear((encoder.hidden_dim * 2) + encoder.hidden_dim, intent_size)\n",
    "        assert encoder.hidden_dim == decoder.hidden_dim, 'encoder与decoder的隐藏状态维度必须相等！'\n",
    "        assert encoder.n_layers == decoder.n_layers, 'encoder与decoder的层数必须相等！'\n",
    "        \n",
    "    def forward(self, src, src_lens, trg, teacher_forcing_ration=1.0):\n",
    "        '''\n",
    "        src=[batch_size, seq_len]\n",
    "        src_len=[batch_size]\n",
    "        trg=[batch_size, trg_len]\n",
    "        \n",
    "        '''\n",
    "        # 预测，一次输入一句话\n",
    "        if self.predict_flag:\n",
    "            assert len(src) == 1, '预测时一次输入一句话'\n",
    "            src_len = len(src[0])\n",
    "            output_tokens = []\n",
    "            encoder_output, encoder_hidden = self.encoder(src, src_lens)\n",
    "            hidden = encoder_hidden\n",
    "            input = torch.tensor(2).unsqueeze(0)  # 预测阶段解码器输入第一个token-> <sos>\n",
    "            for s in range(1, src_len):\n",
    "                if s == 1:\n",
    "                    # context = [batch_size, hidden_dim*2]\n",
    "                    output, hidden, _, context = self.decoder(input, hidden, encoder_output)\n",
    "                else:\n",
    "                    output, hidden, _, _ = self.decoder(input, hidden, encoder_output)\n",
    "                    \n",
    "                input = output.argmax(1)\n",
    "                output_token = input.squeeze().detach().item()\n",
    "               \n",
    "                output_tokens.append(output_token)\n",
    "            concated = torch.cat((encoder_hidden, context), 1)\n",
    "            intent_outputs = self.intent_out(concated)\n",
    "            intent_outputs = intent_outputs.squeeze()\n",
    "            intent_outputs = intent_outputs.argmax()\n",
    "            return output_tokens, intent_outputs\n",
    "\n",
    "        # 训练\n",
    "        else:\n",
    "            '''\n",
    "            src=[batch_size, seq_len]\n",
    "            trg=[batch_size, trg_len]\n",
    "            teacher_forcing_ration是使用teacher forcing的概率,例如teacher_forcing_ration=0.8，则输入的时间步有80%的真实值。\n",
    "            '''\n",
    "            batch_size = trg.shape[0]\n",
    "            trg_len = trg.shape[1]\n",
    "            trg_vocab_size = self.decoder.output_dim\n",
    "            # 存储decoder outputs\n",
    "            slot_outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(device)\n",
    "            # encoder的最后一层hidden state(前向＋后向)作为decoder的初始隐状态,[batch_size, seq_len, hidden_size*2]\n",
    "            # hidden=[batch_size, hidden_size]\n",
    "            encoder_output, encoder_hidden = self.encoder(\n",
    "                src, src_lens)  \n",
    "            hidden = encoder_hidden\n",
    "            \n",
    "            # 输入到decoder的第一个是<sos>\n",
    "            input = trg[:, 0] # [batch_size]\n",
    "            \n",
    "            for t in range(1, trg_len):\n",
    "                '''\n",
    "                解码器输入的初始hidden为encoder的最后一步的hidden\n",
    "                接收输出即predictions和新的hidden状态\n",
    "                '''\n",
    "                if t == 1:\n",
    "                    # context = [batch_size, hidden_dim*2]\n",
    "                    output, hidden, _, context = self.decoder(input, hidden, encoder_output)\n",
    "                else:\n",
    "                    output, hidden, _, _ = self.decoder(input, hidden, encoder_output)\n",
    "                # 存入decoder的预测值\n",
    "                slot_outputs[:, t, :] = output\n",
    "                # 是否使用teacher forcing\n",
    "                teacher_force = random.random() < teacher_forcing_ration\n",
    "                # 获取预测的最大概率的token\n",
    "                predict_max = output.argmax(1)\n",
    "                '''\n",
    "                如果是teacher forcing则下一步使用真实token作为解码的输入\n",
    "                否则使用decoder的预测值作为下一步的解码输入\n",
    "                '''\n",
    "                input = trg[:, t] if teacher_force else predict_max\n",
    "            # concated = [batch_size, hidden_dim * 2 + hidden_dim]\n",
    "            concated = torch.cat((encoder_hidden, context), 1)\n",
    "            intent_outputs = self.intent_out(concated)\n",
    "            # slot_outputs=[batch_size, trg_len, trg_vocab_size], intetn_outputs=[batch_size, intent_size]\n",
    "            return slot_outputs, intent_outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "slot_prediciton:o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o o\n",
      "intent_prediction:abbreviation\n"
     ]
    }
   ],
   "source": [
    "encoder_embedding_dim = 128\n",
    "decoder_embedding_dim = 128\n",
    "hidden_dim = 256\n",
    "n_layers = 1\n",
    "encoder_dropout = 0.5\n",
    "decoder_dropout = 0.5\n",
    "\n",
    "model_path = os.path.join(os.getcwd(), \"model.h5\")\n",
    "\n",
    "input_dim = len(source_words) # source 词典大小（即词数量）\n",
    "output_dim = len(target_words) # target 词典大小（即实体类型数量）\n",
    "label_dim = len(label_words) # label 词典大小（即意图类别数量）\n",
    "\n",
    "encoder = Encoder(input_dim, encoder_embedding_dim, hidden_dim, n_layers, encoder_dropout, source_words['<pad>'])\n",
    "decoder = Decoder(output_dim, decoder_embedding_dim, hidden_dim, n_layers, decoder_dropout)\n",
    "\n",
    "model = Seq2Seq(True, encoder, decoder, label_dim)\n",
    "\n",
    "model.load_state_dict(torch.load(model_path))\n",
    "model.eval()\n",
    "\n",
    "sentence = 'i would like to find a flight from charlotte to las vegas that makes a stop in st. louis'\n",
    "with torch.no_grad():\n",
    "    tokenized = list(sentence)  # tokenize the sentence\n",
    "    tokenized.append('<eos>')\n",
    "    indexed = [source_words[t] for t in tokenized]  # convert to integer sequence\n",
    "    tensor = torch.LongTensor(indexed)  # convert to tensor\n",
    "    tensor = tensor.unsqueeze(0)  # reshape in form of batch,no. of words\n",
    "\n",
    "    slot_outputs, intent_outputs = model(tensor, [len(tensor)], None)  # prediction\n",
    "    intent = intent_outputs.detach().item()\n",
    "    slot_prediction = [target_words.itos[t] for t in slot_outputs]\n",
    "\n",
    "    print('slot_prediciton:{}'.format(' '.join(slot_prediction)))\n",
    "    print('intent_prediction:{}'.format(label_words.itos[intent]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
