{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.autograd import Variable\n",
    "from torch.nn.utils.rnn import PackedSequence\n",
    "from torch.nn.parameter import Parameter\n",
    "\n",
    "import math\n",
    "import numpy as np\n",
    "from collections import OrderedDict\n",
    "\n",
    "import neural_cls\n",
    "from neural_cls.util.utils import *\n",
    "from neural_cls.util import Trainer, Loader, Initializer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNNBase_BB(nn.Module):\n",
    "\n",
    "    def __init__(self, mode, input_size, hidden_size, sigma_prior,\n",
    "                 num_layers=1, batch_first=False,\n",
    "                 dropout=0, bidirectional=True):\n",
    "        \n",
    "        super(RNNBase_BB, self).__init__()\n",
    "        \n",
    "        self.mode = mode\n",
    "        self.input_size = input_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.batch_first = batch_first\n",
    "        self.dropout = dropout\n",
    "        self.dropout_state = {}\n",
    "        self.bidirectional = bidirectional\n",
    "        num_directions = 2 if bidirectional else 1\n",
    "        self.num_directions = num_directions\n",
    "        self.sampled_weights = []\n",
    "        self.sigma_prior = sigma_prior\n",
    "\n",
    "        if mode == 'LSTM':\n",
    "            gate_size = 4 * hidden_size\n",
    "        elif mode == 'GRU':\n",
    "            gate_size = 3 * hidden_size\n",
    "        else:\n",
    "            gate_size = hidden_size\n",
    "        \n",
    "        self.means = []\n",
    "        self.logvars = []\n",
    "                \n",
    "        for layer in range(num_layers):\n",
    "            for direction in range(num_directions):\n",
    "                layer_input_size = input_size if layer == 0 else hidden_size * num_directions\n",
    "\n",
    "                w_ih_mu = Parameter(torch.Tensor(gate_size, layer_input_size))\n",
    "                w_hh_mu = Parameter(torch.Tensor(gate_size, hidden_size))                \n",
    "                w_ih_logvar = Parameter(torch.Tensor(gate_size, layer_input_size))\n",
    "                w_hh_logvar = Parameter(torch.Tensor(gate_size, hidden_size))\n",
    "                                \n",
    "                b_ih_mu = Parameter(torch.Tensor(gate_size))\n",
    "                b_hh_mu = Parameter(torch.Tensor(gate_size))\n",
    "                b_ih_logvar = Parameter(torch.Tensor(gate_size))\n",
    "                b_hh_logvar = Parameter(torch.Tensor(gate_size))\n",
    "                \n",
    "                self.means += [w_ih_mu, w_hh_mu, b_ih_mu, b_hh_mu]\n",
    "                self.logvars += [w_ih_logvar, w_hh_logvar, b_ih_logvar, b_hh_logvar]\n",
    "                \n",
    "                layer_params = (w_ih_mu,  w_ih_logvar, w_hh_mu, w_hh_logvar, b_ih_mu, b_ih_logvar, b_hh_mu, b_hh_logvar)\n",
    "\n",
    "                suffix = '_reverse' if direction == 1 else ''\n",
    "                param_names = ['weight_ih_l_mu{}{}', 'weight_ih_l_logvar{}{}', 'weight_hh_l_mu{}{}', 'weight_hh_l_logvar{}{}']\n",
    "                param_names += ['bias_ih_l_mu{}{}', 'bias_ih_l_logvar{}{}', 'bias_hh_l_mu{}{}', 'bias_hh_l_logvar{}{}']\n",
    "                \n",
    "                param_names = [x.format(layer, suffix) for x in param_names]\n",
    "\n",
    "                for name, param in zip(param_names, layer_params):\n",
    "                    setattr(self, name, param)\n",
    "\n",
    "        self.reset_parameters()\n",
    "        self.lpw = 0\n",
    "        self.lqw = 0\n",
    "\n",
    "    def _apply(self, fn):\n",
    "        ret = super(RNNBase_BB, self)._apply(fn)\n",
    "        return ret\n",
    "\n",
    "    def reset_parameters(self):\n",
    "        stdv = 1.0 / math.sqrt(self.hidden_size)\n",
    "        logvar_init = math.log(stdv) * 2\n",
    "        for mean in self.means:\n",
    "            mean.data.uniform_(-stdv, stdv)\n",
    "        for logvar in self.logvars:\n",
    "            logvar.data.fill_(logvar_init)\n",
    "            \n",
    "    def get_all_weights(self, weights):\n",
    "        \n",
    "        start = 0\n",
    "        all_weights = []\n",
    "        for layer in range(self.num_layers):\n",
    "            for direction in range(self.num_directions):\n",
    "                w_ih = weights[start]\n",
    "                w_hh = weights[start+1]\n",
    "                b_ih = weights[start+2]\n",
    "                b_hh = weights[start+3]\n",
    "                start += 4\n",
    "                all_weights.append([w_ih, w_hh, b_ih, b_hh])\n",
    "\n",
    "        return all_weights\n",
    "    \n",
    "    def sample(self, usecuda = True):\n",
    "        self.sampled_weights = []\n",
    "        for i in range(len(self.means)):\n",
    "            mean = self.means[i]\n",
    "            logvar = self.logvars[i]\n",
    "            eps = torch.zeros(mean.size())\n",
    "            if usecuda:\n",
    "                eps = eps.cuda()\n",
    "\n",
    "            eps.normal_(0, self.sigma_prior)\n",
    "            std = logvar.mul(0.5).exp()\n",
    "            weight = mean + Variable(eps) * std\n",
    "            self.sampled_weights.append(weight)\n",
    "            \n",
    "    def _calculate_prior(self, weights):\n",
    "        lpw = 0.\n",
    "        for w in weights:\n",
    "            lpw += log_gaussian(w, 0, self.sigma_prior).sum()\n",
    "        return lpw\n",
    "    \n",
    "    def _calculate_posterior(self, weights):\n",
    "        lqw = 0.\n",
    "        for i,w in enumerate(weights):\n",
    "            lqw += log_gaussian_logsigma(w, self.means[i], 0.5*self.logvars[i]).sum()\n",
    "        return lqw\n",
    "\n",
    "    def forward(self, input, hx=None, usecuda = True):\n",
    "        if self.training:\n",
    "            self.sample(usecuda = usecuda)\n",
    "            weights = self.sampled_weights\n",
    "            self.lpw = self._calculate_prior(weights)\n",
    "            self.lqw = self._calculate_posterior(weights)\n",
    "        else:\n",
    "            weights = self.means\n",
    "\n",
    "        self.all_weights = self.get_all_weights(weights)\n",
    "        \n",
    "        is_packed = isinstance(input, PackedSequence)\n",
    "        if is_packed:\n",
    "            input, batch_sizes = input\n",
    "            max_batch_size = batch_sizes[0]\n",
    "        else:\n",
    "            batch_sizes = None\n",
    "            max_batch_size = input.size(0) if self.batch_first else input.size(1)\n",
    "\n",
    "        if hx is None:\n",
    "            num_directions = 2 if self.bidirectional else 1\n",
    "            hx = torch.autograd.Variable(input.data.new(self.num_layers *\n",
    "                                                        num_directions,\n",
    "                                                        max_batch_size,\n",
    "                                                        self.hidden_size).zero_(), requires_grad=False)\n",
    "            if self.mode == 'LSTM':\n",
    "                hx = (hx, hx)\n",
    "\n",
    "        func = self._backend.RNN(\n",
    "            self.mode,\n",
    "            self.input_size,\n",
    "            self.hidden_size,\n",
    "            num_layers=self.num_layers,\n",
    "            batch_first=self.batch_first,\n",
    "            dropout=self.dropout,\n",
    "            train=self.training,\n",
    "            bidirectional=self.bidirectional,\n",
    "            batch_sizes=batch_sizes,\n",
    "            dropout_state=self.dropout_state,\n",
    "            flat_weight=None\n",
    "        )\n",
    "        # change this line\n",
    "        output, hidden = func(input, self.all_weights, hx)\n",
    "        if is_packed:\n",
    "            output = PackedSequence(output, batch_sizes)\n",
    "        return output, hidden\n",
    "\n",
    "\n",
    "class LSTM_BB(RNNBase_BB):\n",
    "\n",
    "    def __init__(self, *args, **kwargs):\n",
    "        super(LSTM_BB, self).__init__('LSTM', *args, **kwargs)\n",
    "\n",
    "class baseRNN_BB(nn.Module):\n",
    "\n",
    "    def __init__(self, vocab_size, hidden_size, input_dropout_p, output_dropout_p, n_layers, rnn_cell, \n",
    "                 max_len=25):\n",
    "        \n",
    "        super(baseRNN_BB, self).__init__()\n",
    "        \n",
    "        self.vocab_size = vocab_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.n_layers = n_layers\n",
    "        self.max_len = max_len\n",
    "        \n",
    "        self.input_dropout_p = input_dropout_p\n",
    "        self.output_dropout_p = output_dropout_p\n",
    "        \n",
    "        if rnn_cell.lower() == 'lstm':\n",
    "            self.rnn_cell = LSTM_BB\n",
    "        else:\n",
    "            raise ValueError(\"Unsupported RNN Cell: {0}\".format(rnn_cell))\n",
    "\n",
    "        self.input_dropout = nn.Dropout(p=input_dropout_p)\n",
    "\n",
    "    def forward(self, *args, **kwargs):\n",
    "        raise NotImplementedError()\n",
    "\n",
    "class EncoderRNN_BB(baseRNN_BB):\n",
    "\n",
    "    def __init__(self, vocab_size, embedding_size ,hidden_size, sigma_prior, input_dropout_p=0, \n",
    "                 output_dropout_p=0, n_layers=1, bidirectional=True, rnn_cell='lstm'):\n",
    "        \n",
    "        super(EncoderRNN_BB, self).__init__(vocab_size, hidden_size, input_dropout_p, \n",
    "                                             output_dropout_p, n_layers, rnn_cell)\n",
    "\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_size)\n",
    "        \n",
    "        self.rnn = self.rnn_cell(embedding_size, hidden_size, sigma_prior, n_layers,\n",
    "                                 bidirectional=bidirectional, dropout=output_dropout_p,\n",
    "                                 batch_first=True)\n",
    "\n",
    "    def forward(self, words, input_lengths, usecuda = True):\n",
    "        \n",
    "        batch_size = words.size()[0]\n",
    "        embedded = self.embedding(words)\n",
    "        embedded = self.input_dropout(embedded)\n",
    "        embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first= True)\n",
    "        _, output = self.rnn(embedded, usecuda = usecuda)\n",
    "        output = output[0].transpose(0,1).contiguous().view(batch_size, -1)\n",
    "        \n",
    "        return output\n",
    "    \n",
    "    def get_lpw_lqw(self):\n",
    "        \n",
    "        lpw = self.rnn.lpw\n",
    "        lqw = self.rnn.lqw\n",
    "        return lpw, lqw"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BiLSTM_BB(nn.Module):\n",
    "    \n",
    "    def __init__(self, word_vocab_size, word_embedding_dim, word_hidden_dim, output_size, sigma_prior, \n",
    "                 pretrained=None, n_layers = 1, bidirectional = True, dropout_p = 0.5):\n",
    "        \n",
    "        super(BiLSTM_BB, self).__init__()\n",
    "        \n",
    "        self.word_vocab_size = word_vocab_size\n",
    "        self.word_embedding_dim = word_embedding_dim\n",
    "        self.word_hidden_dim = word_hidden_dim\n",
    "        self.sigma_prior = sigma_prior\n",
    "        \n",
    "        self.initializer = Initializer()\n",
    "        self.loader = Loader()\n",
    "        \n",
    "        self.word_encoder = EncoderRNN_BB(word_vocab_size, word_embedding_dim, word_hidden_dim, \n",
    "                                          sigma_prior = sigma_prior, n_layers = n_layers, \n",
    "                                          bidirectional = bidirectional)\n",
    "        \n",
    "        if pretrained is not None:\n",
    "            self.word_encoder.embedding.weight = nn.Parameter(torch.FloatTensor(pretrained))\n",
    "        \n",
    "        self.dropout = nn.Dropout(p=dropout_p)\n",
    "        \n",
    "        hidden_size = 2*n_layers*word_hidden_dim if bidirectional else n_layers*word_hidden_dim\n",
    "        self.linear = nn.Linear(hidden_size, output_size)\n",
    "        self.lossfunc = nn.CrossEntropyLoss()\n",
    "        \n",
    "    def forward_pass(self, words, wordslen, usecuda=True):\n",
    "        \n",
    "        batch_size, max_len = words.size()\n",
    "        word_features = self.word_encoder(words, wordslen, usecuda=usecuda)\n",
    "        word_features = self.dropout(word_features)\n",
    "        output = self.linear(word_features)\n",
    "        \n",
    "        return output\n",
    "        \n",
    "    def forward(self, words, tags, tagset_size, wordslen, n_batches, n_samples = 3, usecuda=True):\n",
    "        \n",
    "        batch_size, max_len = words.size()\n",
    "        s_log_pw, s_log_qw, s_log_likelihood = 0., 0., 0.\n",
    "                \n",
    "        if usecuda:\n",
    "            onehottags = Variable(torch.zeros(batch_size, tagset_size)).cuda()\n",
    "        else:\n",
    "            onehottags = Variable(torch.zeros(batch_size, tagset_size))\n",
    "        onehottags.scatter_(1, tags.unsqueeze(1), 1)\n",
    "                \n",
    "        for _ in xrange(n_samples):\n",
    "            output = self.forward_pass(words, wordslen, usecuda = usecuda)\n",
    "            sample_log_pw, sample_log_qw = self.word_encoder.get_lpw_lqw()\n",
    "            sample_log_likelihood = log_gaussian(onehottags, output, self.sigma_prior).sum() * max_len\n",
    "            s_log_pw += sample_log_pw\n",
    "            s_log_qw += sample_log_qw\n",
    "            s_log_likelihood += sample_log_likelihood\n",
    "        \n",
    "        log_pw, log_qw, log_llh = s_log_pw/n_samples, s_log_qw/n_samples, s_log_likelihood/n_samples\n",
    "        loss = bayes_loss_function(log_pw, log_qw, log_llh, n_batches, batch_size)\n",
    "        \n",
    "        return loss\n",
    "            \n",
    "    def predict(self, words, wordslen, scoreonly=False, usecuda=True):\n",
    "        \n",
    "        batch_size, max_len = words.size()\n",
    "        word_features = self.word_encoder(words, wordslen)\n",
    "        word_features = self.dropout(word_features)\n",
    "        output = self.linear(word_features)\n",
    "        \n",
    "        scores = torch.max(F.softmax(output, dim =1), dim=1)[0].data.cpu().numpy()\n",
    "        if scoreonly:\n",
    "            return scores\n",
    "        \n",
    "        prediction = torch.max(output, dim=1)[1].data.cpu().numpy().tolist()\n",
    "        return scores, prediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "parameters = OrderedDict()\n",
    "\n",
    "parameters['wrdim'] = 300\n",
    "parameters['ptrnd'] = 'wordvectors/glove.6B.300d.txt'\n",
    "\n",
    "parameters['dpout'] = 0.5\n",
    "parameters['wldim'] = 200\n",
    "parameters['nepch'] = 10\n",
    "\n",
    "parameters['lrate'] = 0.001\n",
    "parameters['batch_size'] = 50\n",
    "parameters['opsiz'] = 2\n",
    "parameters['sigmp'] = float(np.exp(-3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "use_dataset = 'mareview'\n",
    "dataset_path = os.path.join('datasets', use_dataset)\n",
    "result_path = os.path.join('neural_cls/results/', use_dataset)\n",
    "loader = Loader()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 9758 unique words (202057 in total)\n",
      "Loading pretrained embeddings from wordvectors/glove.6B.300d.txt...\n",
      "Found 2 unique named entity tags\n",
      "Loaded 400000 pretrained embeddings.\n"
     ]
    }
   ],
   "source": [
    "if use_dataset == 'trec':\n",
    "    train_data, test_data, mappings = loader.load_trec(dataset_path, parameters['ptrnd'], \n",
    "                                                       parameters['wrdim'])\n",
    "elif use_dataset == 'mareview':\n",
    "    train_data, test_data, mappings = loader.load_mareview(dataset_path, parameters['ptrnd'], \n",
    "                                                       parameters['wrdim'])\n",
    "else:\n",
    "    raise NotImplementedError()\n",
    "    \n",
    "word_to_id = mappings['word_to_id']\n",
    "tag_to_id = mappings['tag_to_id']\n",
    "word_embeds = mappings['word_embeds']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "word_vocab_size = len(word_to_id)\n",
    "word_embedding_dim = parameters['wrdim']\n",
    "word_hidden_dim = parameters['wldim']\n",
    "output_size = parameters['opsiz']\n",
    "sigma_prior = parameters['sigmp']\n",
    "\n",
    "model = BiLSTM_BB(word_vocab_size, word_embedding_dim, word_hidden_dim,\n",
    "                 output_size, sigma_prior=sigma_prior, pretrained = word_embeds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_name = 'BiLSTM_BB'\n",
    "if not os.path.exists(result_path):\n",
    "    os.makedirs(result_path)\n",
    "    \n",
    "if not os.path.exists(os.path.join(result_path,model_name)):\n",
    "    os.makedirs(os.path.join(result_path,model_name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initial learning rate is: 0.001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/ipykernel/__main__.py:215: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1000 :  92.9076879883\n",
      "2000 :  70.799197998\n",
      "3000 :  67.2443574219\n",
      "4000 :  70.1498837891\n",
      "5000 :  56.1160869141\n",
      "6000 :  55.0630898438\n",
      "7000 :  58.9332850342\n",
      "8000 :  53.4544295654\n",
      "9000 :  52.1350965576\n",
      "********************************************************************************\n",
      "Accuracy: 0.831495, Best Accuracy: 0.831495\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Accuracy: 0.788425, Best Accuracy: 0.788425\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Saving Best Weights\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Epoch 1 Complete: Time Taken 26\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type BiLSTM_BB. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n",
      "/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type EncoderRNN_BB. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n",
      "/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type LSTM_BB. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "9958 :  57.7330526428\n",
      "10958 :  37.3931756592\n",
      "11958 :  42.540442627\n",
      "12958 :  39.8780645752\n",
      "13958 :  37.6225488281\n",
      "14958 :  39.0733669434\n",
      "15958 :  41.7455275879\n",
      "16958 :  38.0560771484\n",
      "17916 :  56.5936414185\n",
      "18916 :  34.4455311279\n",
      "********************************************************************************\n",
      "Accuracy: 0.919130, Best Accuracy: 0.919130\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Accuracy: 0.811195, Best Accuracy: 0.811195\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Saving Best Weights\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Epoch 2 Complete: Time Taken 26\n",
      "19916 :  23.9550842896\n",
      "20916 :  25.0934352417\n",
      "21916 :  21.4783797302\n",
      "22916 :  26.1734592285\n",
      "23874 :  21.0742547455\n",
      "24874 :  22.4724936829\n",
      "25874 :  22.7008666382\n",
      "26874 :  26.0300613403\n",
      "27874 :  24.33168927\n",
      "********************************************************************************\n",
      "Accuracy: 0.954309, Best Accuracy: 0.954309\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Accuracy: 0.795066, Best Accuracy: 0.811195\n",
      "********************************************************************************\n",
      "********************************************************************************\n",
      "Epoch 3 Complete: Time Taken 26\n",
      "28874 :  24.5455993652\n",
      "29874 :  12.7186560516\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-9-be01a367dd3b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      7\u001b[0m \u001b[0mtrainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTrainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtag_to_id\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musedataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'mareview'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      8\u001b[0m losses, all_F = trainer.train_model(num_epochs, train_data, test_data, learning_rate,\n\u001b[0;32m----> 9\u001b[0;31m                                     batch_size = parameters['batch_size'])\n\u001b[0m\u001b[1;32m     10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     11\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlosses\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/Active-NLP/neural_cls/util/trainer.pyc\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(self, num_epochs, train_data, test_data, learning_rate, checkpoint_folder, eval_train, plot_every, adjust_lr, batch_size)\u001b[0m\n\u001b[1;32m     65\u001b[0m                 \u001b[0mwordslen\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'wordslen'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     66\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m                 \u001b[0mscore\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtags\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtagset_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwordslen\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musecuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0musecuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     68\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     69\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mscore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwordslen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/nn/modules/module.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m    355\u001b[0m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    356\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 357\u001b[0;31m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    358\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    359\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-3-f181cdf33dab>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, words, tags, tagset_size, wordslen, n_batches, n_samples, usecuda)\u001b[0m\n\u001b[1;32m     48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     49\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mxrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m             \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward_pass\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwordslen\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musecuda\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0musecuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     51\u001b[0m             \u001b[0msample_log_pw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msample_log_qw\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_encoder\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_lpw_lqw\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     52\u001b[0m             \u001b[0msample_log_likelihood\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlog_gaussian\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0monehottags\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msigma_prior\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mmax_len\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-3-f181cdf33dab>\u001b[0m in \u001b[0;36mforward_pass\u001b[0;34m(self, words, wordslen, usecuda)\u001b[0m\n\u001b[1;32m     30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     31\u001b[0m         \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_len\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwords\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m         \u001b[0mword_features\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_encoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwordslen\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musecuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0musecuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     33\u001b[0m         \u001b[0mword_features\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mword_features\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     34\u001b[0m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mword_features\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/nn/modules/module.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m    355\u001b[0m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    356\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 357\u001b[0;31m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    358\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    359\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-2-a39903630064>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, words, input_lengths, usecuda)\u001b[0m\n\u001b[1;32m    213\u001b[0m         \u001b[0membedded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minput_dropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membedded\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    214\u001b[0m         \u001b[0membedded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpack_padded_sequence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membedded\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_lengths\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_first\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 215\u001b[0;31m         \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrnn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membedded\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musecuda\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0musecuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    216\u001b[0m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtranspose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontiguous\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mview\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    217\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/nn/modules/module.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m    355\u001b[0m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    356\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 357\u001b[0;31m             \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    358\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    359\u001b[0m             \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-2-a39903630064>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input, hx, usecuda)\u001b[0m\n\u001b[1;32m    157\u001b[0m         )\n\u001b[1;32m    158\u001b[0m         \u001b[0;31m# change this line\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 159\u001b[0;31m         \u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhidden\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mall_weights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    160\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mis_packed\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    161\u001b[0m             \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mPackedSequence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_sizes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/nn/_functions/rnn.pyc\u001b[0m in \u001b[0;36mforward\u001b[0;34m(input, *fargs, **fkwargs)\u001b[0m\n\u001b[1;32m    383\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0mhack_onnx_rnn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mfargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    384\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 385\u001b[0;31m             \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mfargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mfkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    386\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    387\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/autograd/function.pyc\u001b[0m in \u001b[0;36m_do_forward\u001b[0;34m(self, *input)\u001b[0m\n\u001b[1;32m    326\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_input\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    327\u001b[0m         \u001b[0mflat_input\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_iter_variables\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 328\u001b[0;31m         \u001b[0mflat_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mNestedIOFunction\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mflat_input\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    329\u001b[0m         \u001b[0mnested_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_output\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    330\u001b[0m         \u001b[0mnested_variables\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_unflatten\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mflat_output\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/autograd/function.pyc\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m    348\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    349\u001b[0m         \u001b[0mnested_tensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_map_variable_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_input\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 350\u001b[0;31m         \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward_extended\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mnested_tensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    351\u001b[0m         \u001b[0;32mdel\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_input\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    352\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_nested_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/nn/_functions/rnn.pyc\u001b[0m in \u001b[0;36mforward_extended\u001b[0;34m(self, input, weight, hx)\u001b[0m\n\u001b[1;32m    292\u001b[0m             \u001b[0mhy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnew\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mh\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mhx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    293\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 294\u001b[0;31m         \u001b[0mcudnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    295\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    296\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_for_backward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/backends/cudnn/rnn.pyc\u001b[0m in \u001b[0;36mforward\u001b[0;34m(fn, input, hx, weight, output, hy)\u001b[0m\n\u001b[1;32m    250\u001b[0m             \u001b[0mw\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    251\u001b[0m             \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 252\u001b[0;31m             \u001b[0m_copyParams\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    253\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    254\u001b[0m             \u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mw_desc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minit_weight_descriptor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight_buf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/backends/cudnn/rnn.pyc\u001b[0m in \u001b[0;36m_copyParams\u001b[0;34m(params_from, params_to)\u001b[0m\n\u001b[1;32m    184\u001b[0m         \u001b[0;31m# use biases, zip will terminate once layer_params_from ends and ignore them.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    185\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mparam_from\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparam_to\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_params_from\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer_params_to\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 186\u001b[0;31m             \u001b[0;32massert\u001b[0m \u001b[0mparam_from\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mparam_to\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    187\u001b[0m             \u001b[0mparam_to\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcopy_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparam_from\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbroadcast\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/cuda/__init__.pyc\u001b[0m in \u001b[0;36mtype\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m    393\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    394\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 395\u001b[0;31m         \u001b[0;32mwith\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_device\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    396\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_CudaBase\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ubuntu/anaconda3/envs/pytorch_p27/lib/python2.7/site-packages/torch/cuda/__init__.pyc\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, idx)\u001b[0m\n\u001b[1;32m    198\u001b[0m     \"\"\"\n\u001b[1;32m    199\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 200\u001b[0;31m     \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    201\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0midx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    202\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprev_idx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "model.cuda()\n",
    "learning_rate = parameters['lrate']\n",
    "num_epochs = parameters['nepch']\n",
    "print('Initial learning rate is: %s' %(learning_rate))\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
    "\n",
    "trainer = Trainer(model, optimizer, result_path, model_name, tag_to_id, usedataset='mareview') \n",
    "losses, all_F = trainer.train_model(num_epochs, train_data, test_data, learning_rate,\n",
    "                                    batch_size = parameters['batch_size'])\n",
    "    \n",
    "plt.plot(losses)\n",
    "plt.savefig(os.path.join(result_path, model_name, 'lossplot.png'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:pytorch_p27]",
   "language": "python",
   "name": "conda-env-pytorch_p27-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
