{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## PyTorch Tutorial\n",
    "MILA, November 2017\n",
    "\n",
    "By Sandeep Subramanian"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Neural Machine Translation (Seq2Seq)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import time\n",
    "import numpy as np\n",
    "from __future__ import print_function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.init as init\n",
    "import torch.nn.functional as F\n",
    "from torch.autograd import Variable\n",
    "from torch.nn.utils.rnn import pack_padded_sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import codecs\n",
    "import nltk"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Read training, validation & test data\n",
    "\n",
    "Training data was obtained from http://www.manythings.org/anki/ and partitioned randomly into train, dev and test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_lines = [line.strip().split('\\t') for line in codecs.open('data/jpn-train.txt', 'r', encoding='utf-8')]\n",
    "dev_lines = [line.strip().split('\\t') for line in codecs.open('data/jpn-dev.txt', 'r', encoding='utf-8')]\n",
    "test_lines = [line.strip().split('\\t') for line in codecs.open('data/jpn-test.txt', 'r', encoding='utf-8')]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Compute source and target vocabularies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of unique Japanese words : 2367 \n",
      "Number of unique English words : 16065 \n"
     ]
    }
   ],
   "source": [
    "src_vocab = set()\n",
    "trg_vocab = set()\n",
    "for line in train_lines:\n",
    "    for word in line[1]:\n",
    "        if word not in src_vocab:\n",
    "            src_vocab.add(word)\n",
    "    for word in line[0].split():\n",
    "        if word not in trg_vocab:\n",
    "            trg_vocab.add(word)\n",
    "\n",
    "# Add special tokens to the source and target vocabularies\n",
    "src_vocab.add('<s>')\n",
    "src_vocab.add('</s>')\n",
    "src_vocab.add('<unk>')\n",
    "src_vocab.add('<pad>')\n",
    "\n",
    "trg_vocab.add('<s>')\n",
    "trg_vocab.add('</s>')\n",
    "trg_vocab.add('<unk>')\n",
    "trg_vocab.add('<pad>')\n",
    "\n",
    "src_word2id = {word: idx for idx, word in enumerate(src_vocab)}\n",
    "src_id2word = {idx: word for idx, word in enumerate(src_vocab)}\n",
    "\n",
    "trg_word2id = {word: idx for idx, word in enumerate(trg_vocab)}\n",
    "trg_id2word = {idx: word for idx, word in enumerate(trg_vocab)}\n",
    "\n",
    "print('Number of unique Japanese words : %d ' % (len(src_vocab)))\n",
    "print('Number of unique English words : %d ' % (len(trg_vocab)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Create Seq2Seq model with GRUs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class Seq2Seq(nn.Module):\n",
    "    \"\"\"A Vanilla Sequence to Sequence (Seq2Seq) model with LSTMs.\n",
    "    Ref: Sequence to Sequence Learning with Neural Nets\n",
    "    https://arxiv.org/abs/1409.3215\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self, src_emb_dim, trg_emb_dim, src_vocab_size,\n",
    "        trg_vocab_size, src_hidden_dim, trg_hidden_dim,\n",
    "        pad_token_src, pad_token_trg, bidirectional=False,\n",
    "        nlayers_src=1, nlayers_trg=1\n",
    "    ):\n",
    "        \"\"\"Initialize Seq2Seq Model.\"\"\"\n",
    "        super(Seq2Seq, self).__init__()\n",
    "        self.src_vocab_size = src_vocab_size\n",
    "        self.trg_vocab_size = trg_vocab_size\n",
    "        self.src_emb_dim = src_emb_dim\n",
    "        self.trg_emb_dim = trg_emb_dim\n",
    "        self.src_hidden_dim = src_hidden_dim\n",
    "        self.trg_hidden_dim = trg_hidden_dim\n",
    "        self.bidirectional = bidirectional\n",
    "        self.nlayers_src = nlayers_src\n",
    "        self.nlayers_trg = nlayers_trg\n",
    "        self.pad_token_src = pad_token_src\n",
    "        self.pad_token_trg = pad_token_trg\n",
    "        \n",
    "        # Word Embedding look-up table for the soruce language\n",
    "        self.src_embedding = nn.Embedding(\n",
    "            self.src_vocab_size,\n",
    "            self.src_emb_dim,\n",
    "            self.pad_token_src,\n",
    "        )\n",
    "\n",
    "        # Word Embedding look-up table for the target language\n",
    "        self.trg_embedding = nn.Embedding(\n",
    "            self.trg_vocab_size,\n",
    "            self.trg_emb_dim,\n",
    "            self.pad_token_trg,\n",
    "        )\n",
    "\n",
    "        # Encoder GRU\n",
    "        self.encoder = nn.GRU(\n",
    "            self.src_emb_dim // 2 if self.bidirectional else self.src_emb_dim,\n",
    "            self.src_hidden_dim,\n",
    "            self.nlayers_src,\n",
    "            bidirectional=bidirectional,\n",
    "            batch_first=True,\n",
    "        )\n",
    "\n",
    "        # Decoder GRU\n",
    "        self.decoder = nn.GRU(\n",
    "            self.trg_emb_dim,\n",
    "            self.trg_hidden_dim,\n",
    "            self.nlayers_trg,\n",
    "            batch_first=True\n",
    "        )\n",
    "        \n",
    "        # Projection layer from decoder hidden states to target language vocabulary\n",
    "        self.decoder2vocab = nn.Linear(trg_hidden_dim, trg_vocab_size)\n",
    "\n",
    "    def forward(self, input_src, input_trg, src_lengths):\n",
    "        # Lookup word embeddings in source and target minibatch\n",
    "        src_emb = self.src_embedding(input_src)\n",
    "        trg_emb = self.trg_embedding(input_trg)\n",
    "        \n",
    "        # Pack padded sequence for length masking in encoder RNN (This requires sorting input sequence by length)\n",
    "        src_emb = pack_padded_sequence(src_emb, src_lengths, batch_first=True)\n",
    "        \n",
    "        # Run sequence of embeddings through the encoder GRU\n",
    "        _, src_h_t = self.encoder(src_emb)\n",
    "        \n",
    "        # Extract the last hidden state of the GRU\n",
    "        h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1) if self.bidirectional else src_h_t[-1]\n",
    "\n",
    "        # Initialize the decoder GRU with the last hidden state of the encoder and \n",
    "        # run target inputs through the decoder.\n",
    "        trg_h, _ = self.decoder(trg_emb, h_t.unsqueeze(0).expand(self.nlayers_trg, h_t.size(0), h_t.size(1)))\n",
    "        \n",
    "        # Merge batch and time dimensions to pass to a linear layer\n",
    "        trg_h_reshape = trg_h.contiguous().view(\n",
    "            trg_h.size(0) * trg_h.size(1), trg_h.size(2)\n",
    "        )\n",
    "        \n",
    "        # Affine transformation of all decoder hidden states\n",
    "        decoder2vocab = self.decoder2vocab(trg_h_reshape)\n",
    "        \n",
    "        # Reshape\n",
    "        decoder2vocab = decoder2vocab.view(\n",
    "            trg_h.size(0), trg_h.size(1), decoder2vocab.size(1)\n",
    "        )\n",
    "\n",
    "        return decoder2vocab\n",
    "    \n",
    "    def decode(self, decoder2vocab):\n",
    "        # Turn decoder output into a probabiltiy distribution over vocabulary\n",
    "        decoder2vocab_reshape = decoder2vocab.view(-1, decoder2vocab.size(2))\n",
    "        word_probs = F.softmax(decoder2vocab_reshape)\n",
    "        word_probs = word_probs.view(\n",
    "            decoder2vocab.size(0), decoder2vocab.size(1), decoder2vocab.size(2)\n",
    "        )\n",
    "\n",
    "        return word_probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def get_parallel_minibatch(lines, src_word2id, trg_word2id, index, batch_size, volatile=False):\n",
    "        \n",
    "        # Get source sentences for this minibatch\n",
    "        src_lines = [\n",
    "            ['<s>'] + list(line[1]) + ['</s>']\n",
    "            for line in lines[index: index + batch_size]\n",
    "        ]\n",
    "\n",
    "        # Get target sentences for this minibatch\n",
    "        trg_lines = [\n",
    "            ['<s>'] + line[0].split() + ['</s>']\n",
    "            for line in lines[index: index + batch_size]\n",
    "        ]\n",
    "        \n",
    "        # Sort source sentences by length for length masking in RNNs\n",
    "        src_lens = [len(line) for line in src_lines]\n",
    "        sorted_indices = np.argsort(src_lens)[::-1]\n",
    "        \n",
    "        # Reorder sentences based on source lengths\n",
    "        sorted_src_lines = [src_lines[idx] for idx in sorted_indices]\n",
    "        sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]\n",
    "        \n",
    "        # Compute new sentence lengths\n",
    "        sorted_src_lens = [len(line) for line in sorted_src_lines]\n",
    "        sorted_trg_lens = [len(line) for line in sorted_trg_lines]\n",
    "        \n",
    "        # Get max source and target lengths to pad input and output sequences\n",
    "        max_src_len = max(sorted_src_lens)\n",
    "        max_trg_len = max(sorted_trg_lens)\n",
    "        \n",
    "        # Construct padded source input sequence\n",
    "        input_lines_src = [\n",
    "            [src_word2id[w] if w in src_word2id else src_word2id['<unk>'] for w in line] +\n",
    "            [src_word2id['<pad>']] * (max_src_len - len(line))\n",
    "            for line in sorted_src_lines\n",
    "        ]\n",
    "\n",
    "        # Construct padded target input sequence\n",
    "        input_lines_trg = [\n",
    "            [trg_word2id[w] if w in trg_word2id else trg_word2id['<unk>'] for w in line[:-1]] +\n",
    "            [trg_word2id['<pad>']] * (max_trg_len - len(line))\n",
    "            for line in sorted_trg_lines\n",
    "        ]\n",
    "\n",
    "        # Construct padded target output sequence (Note: Output sequence is just the input shifted by 1 position)\n",
    "        # This is for teacher-forcing\n",
    "        output_lines_trg = [\n",
    "            [trg_word2id[w] if w in trg_word2id else trg_word2id['<unk>'] for w in line[1:]] +\n",
    "            [trg_word2id['<pad>']] * (max_trg_len - len(line))\n",
    "            for line in sorted_trg_lines\n",
    "        ]\n",
    "\n",
    "        input_lines_src = Variable(torch.LongTensor(input_lines_src), volatile=volatile)\n",
    "        input_lines_trg = Variable(torch.LongTensor(input_lines_trg), volatile=volatile)\n",
    "        output_lines_trg = Variable(torch.LongTensor(output_lines_trg), volatile=volatile)\n",
    "\n",
    "        return {\n",
    "            'input_src': input_lines_src,\n",
    "            'input_trg': input_lines_trg,\n",
    "            'output_trg': output_lines_trg,\n",
    "            'src_lens': sorted_src_lens\n",
    "        }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "cuda_available = torch.cuda.is_available()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "seq2seq = Seq2Seq(\n",
    "    src_emb_dim=128, trg_emb_dim=128,\n",
    "    src_vocab_size=len(src_word2id), trg_vocab_size=len(trg_word2id),\n",
    "    src_hidden_dim=512, trg_hidden_dim=512,\n",
    "    pad_token_src=src_word2id['<pad>'],\n",
    "    pad_token_trg=trg_word2id['<pad>'],\n",
    ")\n",
    "\n",
    "if cuda_available:\n",
    "    seq2seq = seq2seq.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "optimizer = optim.Adam(seq2seq.parameters(), lr=4e-4)\n",
    "weight_mask = torch.ones(len(trg_word2id))\n",
    "if cuda_available:\n",
    "    weight_mask = weight_mask.cuda()\n",
    "weight_mask[trg_word2id['<pad>']] = 0\n",
    "loss_criterion = nn.CrossEntropyLoss(weight=weight_mask)\n",
    "batch_size = 64"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch : 0 Training Loss : 5.507\n",
      "Epoch : 0 Dev Loss : 4.752\n",
      "Epoch : 0 Test Loss : 4.679\n",
      "-------------------------------------------------------------\n",
      "Epoch : 1 Training Loss : 4.257\n",
      "Epoch : 1 Dev Loss : 4.223\n",
      "Epoch : 1 Test Loss : 4.122\n",
      "-------------------------------------------------------------\n",
      "Epoch : 2 Training Loss : 3.636\n",
      "Epoch : 2 Dev Loss : 3.913\n",
      "Epoch : 2 Test Loss : 3.787\n",
      "-------------------------------------------------------------\n",
      "Epoch : 3 Training Loss : 3.156\n",
      "Epoch : 3 Dev Loss : 3.702\n",
      "Epoch : 3 Test Loss : 3.555\n",
      "-------------------------------------------------------------\n",
      "Epoch : 4 Training Loss : 2.757\n",
      "Epoch : 4 Dev Loss : 3.554\n",
      "Epoch : 4 Test Loss : 3.392\n",
      "-------------------------------------------------------------\n",
      "Epoch : 5 Training Loss : 2.418\n",
      "Epoch : 5 Dev Loss : 3.450\n",
      "Epoch : 5 Test Loss : 3.276\n",
      "-------------------------------------------------------------\n",
      "Epoch : 6 Training Loss : 2.127\n",
      "Epoch : 6 Dev Loss : 3.381\n",
      "Epoch : 6 Test Loss : 3.191\n",
      "-------------------------------------------------------------\n",
      "Epoch : 7 Training Loss : 1.875\n",
      "Epoch : 7 Dev Loss : 3.343\n",
      "Epoch : 7 Test Loss : 3.141\n",
      "-------------------------------------------------------------\n",
      "Epoch : 8 Training Loss : 1.660\n",
      "Epoch : 8 Dev Loss : 3.322\n",
      "Epoch : 8 Test Loss : 3.115\n",
      "-------------------------------------------------------------\n",
      "Epoch : 9 Training Loss : 1.468\n",
      "Epoch : 9 Dev Loss : 3.297\n",
      "Epoch : 9 Test Loss : 3.086\n",
      "-------------------------------------------------------------\n",
      "Epoch : 10 Training Loss : 1.293\n",
      "Epoch : 10 Dev Loss : 3.286\n",
      "Epoch : 10 Test Loss : 3.066\n",
      "-------------------------------------------------------------\n",
      "Epoch : 11 Training Loss : 1.138\n",
      "Epoch : 11 Dev Loss : 3.311\n",
      "Epoch : 11 Test Loss : 3.085\n",
      "-------------------------------------------------------------\n",
      "Epoch : 12 Training Loss : 1.005\n",
      "Epoch : 12 Dev Loss : 3.322\n",
      "Epoch : 12 Test Loss : 3.090\n",
      "-------------------------------------------------------------\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-12-1f52b6a010e6>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     25\u001b[0m         \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     26\u001b[0m         \u001b[0;31m# Gradient clipping to avoid exploding gradients\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m         \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip_grad_norm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mseq2seq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5.\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     28\u001b[0m         \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     29\u001b[0m         \u001b[0mlosses\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/sandeep/anaconda2/lib/python2.7/site-packages/torch/nn/utils/clip_grad.pyc\u001b[0m in \u001b[0;36mclip_grad_norm\u001b[0;34m(parameters, max_norm, norm_type)\u001b[0m\n\u001b[1;32m     24\u001b[0m         \u001b[0mtotal_norm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     25\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mp\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 26\u001b[0;31m             \u001b[0mparam_norm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnorm_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     27\u001b[0m             \u001b[0mtotal_norm\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mparam_norm\u001b[0m \u001b[0;34m**\u001b[0m \u001b[0mnorm_type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     28\u001b[0m         \u001b[0mtotal_norm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtotal_norm\u001b[0m \u001b[0;34m**\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1.\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mnorm_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "for epoch in range(15):\n",
    "    losses = []\n",
    "    for j in range(0, len(train_lines), batch_size):\n",
    "        # Get minibatch of examples\n",
    "        minibatch = get_parallel_minibatch(\n",
    "            lines=train_lines, src_word2id=src_word2id,\n",
    "            trg_word2id=trg_word2id, index=j, batch_size=batch_size\n",
    "        )\n",
    "        \n",
    "        if cuda_available:\n",
    "            minibatch['input_src'] = minibatch['input_src'].cuda()\n",
    "            minibatch['input_trg'] = minibatch['input_trg'].cuda()\n",
    "            minibatch['output_trg'] = minibatch['output_trg'].cuda()\n",
    "        \n",
    "        decoder_out = seq2seq(\n",
    "            input_src=minibatch['input_src'], input_trg=minibatch['input_trg'], src_lengths=minibatch['src_lens']\n",
    "        )\n",
    "        \n",
    "        loss = loss_criterion(\n",
    "            decoder_out.contiguous().view(-1, decoder_out.size(2)),\n",
    "            minibatch['output_trg'].contiguous().view(-1)\n",
    "        )\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        # Gradient clipping to avoid exploding gradients\n",
    "        torch.nn.utils.clip_grad_norm(seq2seq.parameters(), 5.)\n",
    "        optimizer.step()\n",
    "        losses.append(loss.data[0])\n",
    "    \n",
    "    dev_nll = []\n",
    "    for j in range(0, len(dev_lines), batch_size):\n",
    "        # Get minibatch of examples\n",
    "        minibatch = get_parallel_minibatch(\n",
    "            lines=dev_lines, src_word2id=src_word2id,\n",
    "            trg_word2id=trg_word2id, index=j, batch_size=batch_size,\n",
    "            volatile=True\n",
    "        )\n",
    "        \n",
    "        if cuda_available:\n",
    "            minibatch['input_src'] = minibatch['input_src'].cuda()\n",
    "            minibatch['input_trg'] = minibatch['input_trg'].cuda()\n",
    "            minibatch['output_trg'] = minibatch['output_trg'].cuda()\n",
    "        \n",
    "        decoder_out = seq2seq(\n",
    "            input_src=minibatch['input_src'], input_trg=minibatch['input_trg'], src_lengths=minibatch['src_lens']\n",
    "        )\n",
    "        \n",
    "        loss = loss_criterion(\n",
    "            decoder_out.contiguous().view(-1, decoder_out.size(2)),\n",
    "            minibatch['output_trg'].contiguous().view(-1)\n",
    "        )\n",
    "\n",
    "        dev_nll.append(loss.data[0])\n",
    "    \n",
    "    test_nll = []\n",
    "    for j in range(0, len(test_lines), batch_size):\n",
    "        # Get minibatch of examples\n",
    "        minibatch = get_parallel_minibatch(\n",
    "            lines=test_lines, src_word2id=src_word2id,\n",
    "            trg_word2id=trg_word2id, index=j, batch_size=batch_size,\n",
    "            volatile=True\n",
    "        )\n",
    "        \n",
    "        if cuda_available:\n",
    "            minibatch['input_src'] = minibatch['input_src'].cuda()\n",
    "            minibatch['input_trg'] = minibatch['input_trg'].cuda()\n",
    "            minibatch['output_trg'] = minibatch['output_trg'].cuda()\n",
    "        \n",
    "        decoder_out = seq2seq(\n",
    "            input_src=minibatch['input_src'], input_trg=minibatch['input_trg'], src_lengths=minibatch['src_lens']\n",
    "        )\n",
    "        \n",
    "        loss = loss_criterion(\n",
    "            decoder_out.contiguous().view(-1, decoder_out.size(2)),\n",
    "            minibatch['output_trg'].contiguous().view(-1)\n",
    "        )\n",
    "\n",
    "        test_nll.append(loss.data[0])\n",
    "    \n",
    "    print('Epoch : %d Training Loss : %.3f' % (epoch, np.mean(losses)))\n",
    "    print('Epoch : %d Dev Loss : %.3f' % (epoch, np.mean(dev_nll)))\n",
    "    print('Epoch : %d Test Loss : %.3f' % (epoch, np.mean(test_nll)))\n",
    "    print('-------------------------------------------------------------')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Lets see what the model produces for a few sentences in our dev set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Prediction : Tom found Tom unlikely unlikely that Tom would interested planning by Mary has eat able to see the party \n",
      "Gold : I think it's highly unlikely that Tom was not aware that he wouldn't be allowed to enter the museum without his parents. \n",
      "---------------\n",
      "Prediction : It news was me a times I I the grandmother. \n",
      "Gold : The photo brought back many happy memories of my childhood. \n",
      "---------------\n",
      "Prediction : I thought to find a test in I was in the \n",
      "Gold : I expected to make new friends when I moved to Boston. \n",
      "---------------\n",
      "Prediction : I've never heard him speak of the job. \n",
      "Gold : I've never heard him complaining about his meals. \n",
      "---------------\n",
      "Prediction : I thought it would be an to one to go to \n",
      "Gold : I thought it would be fun for us to go skiing together. \n",
      "---------------\n",
      "Prediction : The man was out of his hand to take me to the station. \n",
      "Gold : The man went out of his way to take me to the station. \n",
      "---------------\n",
      "Prediction : Nobody must have had my cold. with mistake. \n",
      "Gold : Someone must have taken my umbrella by mistake. \n",
      "---------------\n",
      "Prediction : It's would surprised to she had to a good cook. \n",
      "Gold : I am surprised that she refused such a good offer. \n",
      "---------------\n",
      "Prediction : Tom has never been to to do this \n",
      "Gold : Tom has never been able to beat me. \n",
      "---------------\n",
      "Prediction : The was the night yesterday. had yesterday. \n",
      "Gold : Everyone loved the <unk> I baked yesterday. \n",
      "---------------\n",
      "Prediction : The been a picture from the \n",
      "Gold : I've ordered a book from Amazon.com. \n",
      "---------------\n",
      "Prediction : The factory produces CD in the in day. \n",
      "Gold : The factory produces thousands of bottles every month. \n",
      "---------------\n",
      "Prediction : The is the earth's surface is out in snow. \n",
      "Gold : Three-fourths of the earth's surface is covered with water. \n",
      "---------------\n",
      "Prediction : He not cut good but he bit \n",
      "Gold : He's not a doctor, but a nurse. \n",
      "---------------\n",
      "Prediction : I'll be in this tomorrow here afternoon. \n",
      "Gold : She'll be up and around this afternoon. \n",
      "---------------\n",
      "Prediction : The sun was so up in school. with. \n",
      "Gold : The situation was getting difficult to deal with. \n",
      "---------------\n",
      "Prediction : I told him to go the room. \n",
      "Gold : I told him to leave the room. \n",
      "---------------\n",
      "Prediction : She plays a every a older for day. \n",
      "Gold : She spends time with her grandmother every Sunday. \n",
      "---------------\n",
      "Prediction : Tom doesn't had no pretty with Mary. else. \n",
      "Gold : Tom hasn't had a fight with anybody lately. \n",
      "---------------\n",
      "Prediction : He asked me to pass him for salt. \n",
      "Gold : He asked me to pass him the salt. \n",
      "---------------\n",
      "Prediction : She's is just to letter to \n",
      "Gold : She is writing a letter now. \n",
      "---------------\n",
      "Prediction : Let's careful to to catch anything much. \n",
      "Gold : Be sure not to eat too much. \n",
      "---------------\n",
      "Prediction : It was not a joke. \n",
      "Gold : It was only a partial success. \n",
      "---------------\n",
      "Prediction : We won't not be \n",
      "Gold : We will never agree. \n",
      "---------------\n",
      "Prediction : She saw to the sight of a town. \n",
      "Gold : She froze at the sight of the bear. \n",
      "---------------\n",
      "Prediction : The bag's is \n",
      "Gold : Your reputation <unk> you. \n",
      "---------------\n",
      "Prediction : I need a bad for you. \n",
      "Gold : I have a gift for you. \n",
      "---------------\n",
      "Prediction : Quite man of people attended when the middle \n",
      "Gold : A bunch of people died in the explosion. \n",
      "---------------\n",
      "Prediction : We helped a new \n",
      "Gold : We bought a round table. \n",
      "---------------\n",
      "Prediction : Tom came up his about late. \n",
      "Gold : Tom showed up 15 minutes late. \n",
      "---------------\n",
      "Prediction : There is to to \n",
      "Gold : Everything has its limit. \n",
      "---------------\n",
      "Prediction : Tom doesn't like French. \n",
      "Gold : Tom doesn't like cheese. \n",
      "---------------\n",
      "Prediction : You go now. now. \n",
      "Gold : Don't go there now. \n",
      "---------------\n",
      "Prediction : Are you take a car? \n",
      "Gold : Can you ride a bicycle? \n",
      "---------------\n",
      "Prediction : She has a eye for antiques. beautiful. \n",
      "Gold : She has an eye for the beautiful. \n",
      "---------------\n",
      "Prediction : It was a very beautiful flower. \n",
      "Gold : It was a very beautiful flower. \n",
      "---------------\n",
      "Prediction : Look him as \n",
      "Gold : Give him time. \n",
      "---------------\n",
      "Prediction : Can me the salt, please. you? \n",
      "Gold : Pass me the salt, will you? \n",
      "---------------\n",
      "Prediction : They dragged their bottles of wine. \n",
      "Gold : They drank two bottles of wine. \n",
      "---------------\n",
      "Prediction : Where can going to a reason. \n",
      "Gold : How about going for a swim? \n",
      "---------------\n",
      "Prediction : He got to letter to his right. \n",
      "Gold : He moved the desk to the right. \n",
      "---------------\n",
      "Prediction : Dinner is ready. \n",
      "Gold : Dinner is ready. \n",
      "---------------\n",
      "Prediction : Do you know him? he is? \n",
      "Gold : Do you know who he is? \n",
      "---------------\n",
      "Prediction : I feed to my diary every day. \n",
      "Gold : I write in my diary every day. \n",
      "---------------\n",
      "Prediction : People one on \n",
      "Gold : No <unk> allowed. \n",
      "---------------\n",
      "Prediction : He is what with the way \n",
      "Gold : He is familiar with the subject. \n",
      "---------------\n",
      "Prediction : It would you were \n",
      "Gold : I wish you success. \n",
      "---------------\n",
      "Prediction : He fell off hard. \n",
      "Gold : He got very drunk. \n",
      "---------------\n",
      "Prediction : Tom's house was late. of fashion. \n",
      "Gold : Tom's clothes are out of fashion. \n",
      "---------------\n",
      "Prediction : I don't like any God. sports. sort of thing. \n",
      "Gold : I don't go in for that sort of thing. \n",
      "---------------\n",
      "Prediction : She's is a beautiful beauty. \n",
      "Gold : She is a real beauty. \n",
      "---------------\n",
      "Prediction : Speech is silver, silence \n",
      "Gold : Speech is silver, silence is gold. \n",
      "---------------\n",
      "Prediction : Tom is a short \n",
      "Gold : Tom has a hangover. \n",
      "---------------\n",
      "Prediction : I said mistaken. \n",
      "Gold : I was <unk> \n",
      "---------------\n",
      "Prediction : I'm a same \n",
      "Gold : I'm the youngest child in the family. \n",
      "---------------\n",
      "Prediction : Don't it easy. \n",
      "Gold : Take it <unk> \n",
      "---------------\n",
      "Prediction : Don't be angry. \n",
      "Gold : Don't be mad at me. \n",
      "---------------\n",
      "Prediction : I have a fever. fever. \n",
      "Gold : I have a high temperature. \n",
      "---------------\n",
      "Prediction : He is a good temper. \n",
      "Gold : He has a bad heart. \n",
      "---------------\n",
      "Prediction : What're are you doing? \n",
      "Gold : What are you doing? \n",
      "---------------\n",
      "Prediction : It's already seven. o'clock. \n",
      "Gold : It's already nine o'clock. \n",
      "---------------\n",
      "Prediction : Get out. \n",
      "Gold : Get out! \n",
      "---------------\n",
      "Prediction : Where have I? \n",
      "Gold : Where am I? \n",
      "---------------\n",
      "Prediction : How are you? \n",
      "Gold : How are you? \n",
      "---------------\n"
     ]
    }
   ],
   "source": [
    "# Get the first minibatch in the dev set.\n",
    "minibatch = get_parallel_minibatch(\n",
    "    lines=dev_lines, src_word2id=src_word2id,\n",
    "    trg_word2id=trg_word2id, index=0, batch_size=batch_size,\n",
    "    volatile=True\n",
    ")\n",
    "\n",
    "if cuda_available:\n",
    "    minibatch['input_src'] = minibatch['input_src'].cuda()\n",
    "    minibatch['input_trg'] = minibatch['input_trg'].cuda()\n",
    "    minibatch['output_trg'] = minibatch['output_trg'].cuda()\n",
    "\n",
    "# Run it through our model (in teacher forcing mode)\n",
    "res = seq2seq(\n",
    "    input_src=minibatch['input_src'], input_trg=minibatch['input_trg'], src_lengths=minibatch['src_lens']\n",
    ")\n",
    "\n",
    "# Pick the most likely word at each time step\n",
    "res = res.data.cpu().numpy().argmax(axis=-1)\n",
    "\n",
    "# Cast targets to numpy\n",
    "gold = minibatch['output_trg'].data.cpu().numpy()\n",
    "\n",
    "# Decode indices to words for predictions and gold\n",
    "res = [[trg_id2word[x] for x in line] for line in res]\n",
    "gold = [[trg_id2word[x] for x in line] for line in gold]\n",
    "\n",
    "for r, g in zip(res, gold):\n",
    "    if '</s>' in r:\n",
    "        index = r.index('</s>')\n",
    "    else:\n",
    "        index = len(r)\n",
    "    \n",
    "    print('Prediction : %s ' % (' '.join(r[:index])))\n",
    "\n",
    "    index = g.index('</s>')\n",
    "    print('Gold : %s ' % (' '.join(g[:index])))\n",
    "    print('---------------')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
