{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: http://mirrors.tencentyun.com/pypi/simple\n",
      "Requirement already satisfied: keras==2.3.1 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (2.3.1)\n",
      "Requirement already satisfied: six>=1.9.0 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (1.15.0)\n",
      "Requirement already satisfied: numpy>=1.9.1 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (1.18.5)\n",
      "Requirement already satisfied: keras-preprocessing>=1.0.5 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (1.1.0)\n",
      "Requirement already satisfied: keras-applications>=1.0.6 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (1.0.8)\n",
      "Requirement already satisfied: pyyaml in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (5.3.1)\n",
      "Requirement already satisfied: scipy>=0.14 in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (1.5.0)\n",
      "Requirement already satisfied: h5py in /opt/conda/envs/tensorflow_py3/lib/python3.6/site-packages (from keras==2.3.1) (2.10.0)\n"
     ]
    }
   ],
   "source": [
    "!pip install keras==2.3.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "Using TensorFlow backend.\n"
    }
   ],
   "source": [
    "import os\n",
    "import keras.backend as K\n",
    "\n",
    "from data import DATA_SET_DIR\n",
    "from elmo.lm_generator import LMDataGenerator\n",
    "from elmo.model import ELMo\n",
    "from tqdm import tqdm\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "config = tf.ConfigProto()\n",
    "config.gpu_options.allow_growth = True\n",
    "sess = tf.Session(config = config)\n",
    "keras.backend.set_session(sess)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def printnow(s):\n",
    "    import time\n",
    "    import datetime\n",
    "    now = datetime.datetime.now()\n",
    "    t = now.strftime(\"%m-%d-%H:%M:%S\")\n",
    "    print('='*40+t+'='*40)\n",
    "    print(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "100%|██████████| 4000000/4000000 [00:01<00:00, 3010385.58it/s]\n  9%|▉         | 162061/1750000 [00:00<00:00, 1620588.36it/s]划分数据集\n100%|██████████| 1750000/1750000 [00:00<00:00, 1779829.72it/s]\n100%|██████████| 250000/250000 [00:00<00:00, 1349765.34it/s]\n100%|██████████| 2000000/2000000 [00:01<00:00, 1830268.63it/s]\n"
    }
   ],
   "source": [
    "\n",
    "data = []\n",
    "with open('./data/datasets/txt/advertiser_id.all.tokens', 'r') as f:\n",
    "    for i in tqdm(f.readlines()):\n",
    "        data.append(i[:-1])\n",
    "print('划分数据集')\n",
    "with open('./data/datasets/txt/advertiser_id.train.tokens', 'w') as f:\n",
    "    for i in tqdm(data[:1750000]):\n",
    "        f.write(i)\n",
    "        f.write('\\n')\n",
    "with open('./data/datasets/txt/advertiser_id.valid.tokens', 'w') as f:\n",
    "    for i in tqdm(data[1750000:2000000]):\n",
    "        f.write(i)\n",
    "        f.write('\\n')\n",
    "with open('./data/datasets/txt/advertiser_id.test.tokens', 'w') as f:\n",
    "    for i in tqdm(data[2000000:]):\n",
    "        f.write(i)\n",
    "        f.write('\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "100%|██████████| 1750000/1750000 [00:00<00:00, 2798074.45it/s]\n100%|██████████| 250000/250000 [00:00<00:00, 2630086.41it/s]\n54833it [00:00, 2284351.96it/s]\n100%|██████████| 54837/54837 [00:00<00:00, 1459651.01it/s]\n"
    }
   ],
   "source": [
    "# 利用验证、训练数据制作词表\n",
    "data = []\n",
    "with open('./data/datasets/txt/advertiser_id.train.tokens', 'r') as f:\n",
    "    for i in tqdm(f.readlines()):\n",
    "        data.append(i[:-1])\n",
    "with open('./data/datasets/txt/advertiser_id.valid.tokens', 'r') as f:\n",
    "    for i in tqdm(f.readlines()):\n",
    "        data.append(i[:-1])\n",
    "\n",
    "words = []\n",
    "for i in data:\n",
    "    words += i.split(' ')\n",
    "words = set(words) - set(('<unk>',))\n",
    "vocab = {}\n",
    "vocab['<pad>'] = 0\n",
    "vocab['<bos>'] = 1\n",
    "vocab['<eos>'] = 2\n",
    "vocab['<unk>'] = 3\n",
    "i = 0\n",
    "for i, word in tqdm(enumerate(words)):\n",
    "    vocab[word] = i + 4\n",
    "with open('./data/datasets/txt/advertiser_id.vocab', 'w') as f:\n",
    "    for i in tqdm(vocab):\n",
    "        f.write('{} {}\\n'.format(i, vocab[i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "1%|          | 14784/2000000 [00:00<00:26, 73994.91it/s]========================================07-12-01:17:23========================================\ngo\n100%|██████████| 2000000/2000000 [00:30<00:00, 64637.80it/s]\n========================================07-12-01:17:54========================================\nturn to np array\n========================================07-12-01:17:57========================================\nsave data\n========================================07-12-01:18:22========================================\nend\n"
    }
   ],
   "source": [
    "printnow('go')\n",
    "cur_ids = []\n",
    "for_ward_ids = []\n",
    "back_ward_ids = []\n",
    "for row in tqdm(data):\n",
    "#     import pdb\n",
    "#     pdb.set_trace()\n",
    "    cur_temp_ids = np.zeros((100,))\n",
    "    split_row = row.split(' ')[:98]\n",
    "    cur_temp_ids[0] = vocab['<bos>']\n",
    "    for i, word in enumerate(split_row):\n",
    "        if word in vocab:\n",
    "            cur_temp_ids[i + 1] = vocab[word]\n",
    "        else:\n",
    "            cur_temp_ids[i + 1] = vocab['<unk>']\n",
    "    cur_temp_ids[i + 2] = vocab['<eos>']\n",
    "    for_ward_temp_ids = np.concatenate([cur_temp_ids[1:], np.zeros((1,)) ])\n",
    "    back_ward_temp_ids = np.concatenate([np.zeros((1,)), cur_temp_ids[:-1]])\n",
    "\n",
    "    cur_ids.append(cur_temp_ids)\n",
    "    for_ward_ids.append(for_ward_temp_ids)\n",
    "    back_ward_ids.append(back_ward_temp_ids)\n",
    "    \n",
    "printnow('turn to np array')\n",
    "np_cur_ids = np.array(cur_ids, dtype='int32')\n",
    "np_for_ward_ids = np.array(for_ward_ids, dtype='int32')\n",
    "np_back_ward_ids = np.array(back_ward_ids, dtype='int32')\n",
    "printnow('save data')\n",
    "import joblib\n",
    "joblib.dump(np_cur_ids, './data/datasets/train_np_cur_ids.jl.z')\n",
    "joblib.dump(np_for_ward_ids, './data/datasets/train_np_for_ward_ids.jl.z')\n",
    "joblib.dump(np_back_ward_ids, './data/datasets/train_np_back_ward_ids.jl.z')\n",
    "printnow('end')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "100%|██████████| 2000000/2000000 [00:00<00:00, 3109846.21it/s]\n  1%|          | 13061/2000000 [00:00<00:30, 64870.87it/s]========================================07-12-01:18:23========================================\ngo\n100%|██████████| 2000000/2000000 [00:28<00:00, 69521.37it/s]\n========================================07-12-01:18:53========================================\nturn to np array\n========================================07-12-01:18:57========================================\nsave data\n========================================07-12-01:19:20========================================\nend\n"
    }
   ],
   "source": [
    "data = []\n",
    "with open('./data/datasets/txt/advertiser_id.test.tokens', 'r') as f:\n",
    "    for i in tqdm(f.readlines()):\n",
    "        data.append(i[:-1])\n",
    "\n",
    "printnow('go')\n",
    "cur_ids = []\n",
    "for_ward_ids = []\n",
    "back_ward_ids = []\n",
    "for row in tqdm(data):\n",
    "#     import pdb\n",
    "#     pdb.set_trace()\n",
    "    cur_temp_ids = np.zeros((100,))\n",
    "    split_row = row.split(' ')[:98]\n",
    "    cur_temp_ids[0] = vocab['<bos>']\n",
    "    for i, word in enumerate(split_row):\n",
    "        if word in vocab:\n",
    "            cur_temp_ids[i + 1] = vocab[word]\n",
    "        else:\n",
    "            cur_temp_ids[i + 1] = vocab['<unk>']\n",
    "    cur_temp_ids[i + 2] = vocab['<eos>']\n",
    "    for_ward_temp_ids = np.concatenate([cur_temp_ids[1:], np.zeros((1,)) ])\n",
    "    back_ward_temp_ids = np.concatenate([np.zeros((1,)), cur_temp_ids[:-1]])\n",
    "\n",
    "    cur_ids.append(cur_temp_ids)\n",
    "    for_ward_ids.append(for_ward_temp_ids)\n",
    "    back_ward_ids.append(back_ward_temp_ids)\n",
    "    \n",
    "printnow('turn to np array')\n",
    "np_cur_ids = np.array(cur_ids, dtype='int32')\n",
    "np_for_ward_ids = np.array(for_ward_ids, dtype='int32')\n",
    "np_back_ward_ids = np.array(back_ward_ids, dtype='int32')\n",
    "printnow('save data')\n",
    "import joblib\n",
    "joblib.dump(np_cur_ids, './data/datasets/test_np_cur_ids.jl.z')\n",
    "joblib.dump(np_for_ward_ids, './data/datasets/test_np_for_ward_ids.jl.z')\n",
    "joblib.dump(np_back_ward_ids, './data/datasets/test_np_back_ward_ids.jl.z')\n",
    "printnow('end')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "from ti import session\n",
    "ti_session = session.Session()\n",
    "inputs = ti_session.upload_data(path='./data/datasets/elmo_data.zip', bucket='semi-final-1258613868', key_prefix=\"elmo\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import joblib\n",
    "debug=True\n",
    "if debug:\n",
    "    train_np_for_ward_ids = joblib.load('./data/datasets/train_np_for_ward_ids.jl.z')[:1000]\n",
    "    train_np_back_ward_ids = joblib.load('./data/datasets/train_np_back_ward_ids.jl.z')[:1000]\n",
    "    train_np_cur_ids = joblib.load('./data/datasets/train_np_cur_ids.jl.z')[:1000]\n",
    "\n",
    "    test_np_for_ward_ids = joblib.load('./data/datasets/test_np_for_ward_ids.jl.z')[:1000]\n",
    "    test_np_back_ward_ids = joblib.load('./data/datasets/test_np_back_ward_ids.jl.z')[:1000]\n",
    "    test_np_cur_ids = joblib.load('./data/datasets/test_np_cur_ids.jl.z')[:1000]\n",
    "\n",
    "else:\n",
    "    train_np_for_ward_ids = joblib.load('./data/datasets/train_np_for_ward_ids.jl.z')\n",
    "    train_np_back_ward_ids = joblib.load('./data/datasets/train_np_back_ward_ids.jl.z')\n",
    "    train_np_cur_ids = joblib.load('./data/datasets/train_np_cur_ids.jl.z')\n",
    "\n",
    "    test_np_for_ward_ids = joblib.load('./data/datasets/test_np_for_ward_ids.jl.z')\n",
    "    test_np_back_ward_ids = joblib.load('./data/datasets/test_np_back_ward_ids.jl.z')\n",
    "    test_np_cur_ids = joblib.load('./data/datasets/test_np_cur_ids.jl.z')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_for, valid_for = train_test_split(train_np_for_ward_ids, test_size=0.2, random_state = 547)\n",
    "train_back, valid_back = train_test_split(train_np_back_ward_ids, test_size=0.2, random_state = 547)\n",
    "train_cur, valid_cur = train_test_split(train_np_cur_ids, test_size=0.2, random_state = 547)\n",
    "\n",
    "train_for, valid_for, train_back, valid_back = train_for[:, :, np.newaxis], valid_for[:, :, np.newaxis], train_back[:, :, np.newaxis], valid_back[:, :, np.newaxis]\n",
    "train = [[train_cur, train_for, train_back],[]]\n",
    "valid = [[valid_cur, valid_for, valid_back], []]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_np_for_ward_ids, test_np_back_ward_ids = test_np_for_ward_ids[:,:,np.newaxis], test_np_back_ward_ids[:,:,np.newaxis]\n",
    "test = [[test_np_cur_ids, test_np_for_ward_ids, test_np_back_ward_ids],[]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from keras import backend as K\n",
    "from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
    "from keras.layers import Dense, Input, SpatialDropout1D\n",
    "from keras.layers import LSTM, CuDNNLSTM, Activation\n",
    "from keras.layers import Lambda, Embedding, Conv2D, GlobalMaxPool1D\n",
    "from keras.layers import add, concatenate\n",
    "from keras.layers.wrappers import TimeDistributed\n",
    "from keras.models import Model, load_model\n",
    "from keras.optimizers import Adagrad\n",
    "from keras.constraints import MinMaxNorm\n",
    "from keras.utils import to_categorical\n",
    "\n",
    "from data import MODELS_DIR\n",
    "\n",
    "from keras import backend as K\n",
    "from keras.engine import InputSpec\n",
    "from keras.layers import Dropout\n",
    "\n",
    "from keras.layers.core import Layer\n",
    "\n",
    "class TimestepDropout(Dropout):\n",
    "    \"\"\"Word Dropout.\n",
    "\n",
    "    This version performs the same function as Dropout, however it drops\n",
    "    entire timesteps (e.g., words embeddings) instead of individual elements (features).\n",
    "\n",
    "    # Arguments\n",
    "        rate: float between 0 and 1. Fraction of the timesteps to drop.\n",
    "\n",
    "    # Input shape\n",
    "        3D tensor with shape:\n",
    "        `(samples, timesteps, channels)`\n",
    "\n",
    "    # Output shape\n",
    "        Same as input\n",
    "\n",
    "    # References\n",
    "        - N/A\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, rate, **kwargs):\n",
    "        super(TimestepDropout, self).__init__(rate, **kwargs)\n",
    "        self.input_spec = InputSpec(ndim=3)\n",
    "\n",
    "    def _get_noise_shape(self, inputs):\n",
    "        input_shape = K.shape(inputs)\n",
    "        noise_shape = (input_shape[0], input_shape[1], 1)\n",
    "        return noise_shape\n",
    "\n",
    "class Highway(Layer):\n",
    "    \"\"\"Highway network, a natural extension of LSTMs to feedforward networks.\n",
    "\n",
    "    # Arguments\n",
    "        activation: Activation function to use\n",
    "            (see [activations](../activations.md)).\n",
    "            Default: no activation is applied\n",
    "            (ie. \"linear\" activation: `a(x) = x`).\n",
    "        transform_activation: Activation function to use\n",
    "            for the transform unit\n",
    "            (see [activations](../activations.md)).\n",
    "            Default: sigmoid (`sigmoid`).\n",
    "            If you pass `None`, no activation is applied\n",
    "            (ie. \"linear\" activation: `a(x) = x`).x\n",
    "        kernel_initializer: Initializer for the `kernel` weights matrix,\n",
    "            used for the linear transformation of the inputs\n",
    "            (see [initializers](../initializers.md)).\n",
    "        transform_initializer: Initializer for the `transform` weights matrix,\n",
    "            used for the linear transformation of the inputs\n",
    "            (see [initializers](../initializers.md)).\n",
    "        bias_initializer: Initializer for the bias vector\n",
    "            (see [initializers](../initializers.md)).\n",
    "        transform_bias_initializer: Initializer for the bias vector\n",
    "            (see [initializers](../initializers.md)).\n",
    "            Default: -2 constant.\n",
    "        kernel_regularizer: Regularizer function applied to\n",
    "            the `kernel` weights matrix\n",
    "            (see [regularizer](../regularizers.md)).\n",
    "        transform_regularizer: Regularizer function applied to\n",
    "            the `transform` weights matrix\n",
    "            (see [regularizer](../regularizers.md)).\n",
    "        bias_regularizer: Regularizer function applied to the bias vector\n",
    "            (see [regularizer](../regularizers.md)).\n",
    "        transform_bias_regularizer: Regularizer function applied to the transform bias vector\n",
    "            (see [regularizer](../regularizers.md)).\n",
    "        kernel_constraint: Constraint function applied to\n",
    "            the `kernel` weights matrix\n",
    "            (see [constraints](../constraints.md)).\n",
    "        bias_constraint: Constraint function applied to the bias vector\n",
    "            (see [constraints](../constraints.md)).\n",
    "    # Input shape\n",
    "        2D tensor with shape: `(nb_samples, input_dim)`.\n",
    "    # Output shape\n",
    "        2D tensor with shape: `(nb_samples, input_dim)`.\n",
    "    # References\n",
    "        - [Highway Networks](http://arxiv.org/pdf/1505.00387v2.pdf)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self,\n",
    "                 activation='relu',\n",
    "                 transform_activation='sigmoid',\n",
    "                 kernel_initializer='glorot_uniform',\n",
    "                 transform_initializer='glorot_uniform',\n",
    "                 bias_initializer='zeros',\n",
    "                 transform_bias_initializer=-2,\n",
    "                 kernel_regularizer=None,\n",
    "                 transform_regularizer=None,\n",
    "                 bias_regularizer=None,\n",
    "                 transform_bias_regularizer=None,\n",
    "                 kernel_constraint=None,\n",
    "                 bias_constraint=None,\n",
    "                 **kwargs):\n",
    "        self.activation = activations.get(activation)\n",
    "        self.transform_activation = activations.get(transform_activation)\n",
    "\n",
    "        self.kernel_initializer = initializers.get(kernel_initializer)\n",
    "        self.transform_initializer = initializers.get(transform_initializer)\n",
    "        self.bias_initializer = initializers.get(bias_initializer)\n",
    "        if isinstance(transform_bias_initializer, int):\n",
    "            self.transform_bias_initializer = Constant(value=transform_bias_initializer)\n",
    "        else:\n",
    "            self.transform_bias_initializer = initializers.get(transform_bias_initializer)\n",
    "\n",
    "        self.kernel_regularizer = regularizers.get(kernel_regularizer)\n",
    "        self.transform_regularizer = regularizers.get(transform_regularizer)\n",
    "        self.bias_regularizer = regularizers.get(bias_regularizer)\n",
    "        self.transform_bias_regularizer = regularizers.get(transform_bias_regularizer)\n",
    "\n",
    "        self.kernel_constraint = constraints.get(kernel_constraint)\n",
    "        self.bias_constraint = constraints.get(bias_constraint)\n",
    "\n",
    "        super(Highway, self).__init__(**kwargs)\n",
    "\n",
    "    def build(self, input_shape):\n",
    "        assert len(input_shape) == 2\n",
    "        input_dim = input_shape[-1]\n",
    "\n",
    "        self.W = self.add_weight(shape=(input_dim, input_dim),\n",
    "                                 name='{}_W'.format(self.name),\n",
    "                                 initializer=self.kernel_initializer,\n",
    "                                 regularizer=self.kernel_regularizer,\n",
    "                                 constraint=self.kernel_constraint)\n",
    "        self.W_transform = self.add_weight(shape=(input_dim, input_dim),\n",
    "                                           name='{}_W_transform'.format(self.name),\n",
    "                                           initializer=self.transform_initializer,\n",
    "                                           regularizer=self.transform_regularizer,\n",
    "                                           constraint=self.kernel_constraint)\n",
    "\n",
    "        self.bias = self.add_weight(shape=(input_dim,),\n",
    "                                 name='{}_bias'.format(self.name),\n",
    "                                 initializer=self.bias_initializer,\n",
    "                                 regularizer=self.bias_regularizer,\n",
    "                                 constraint=self.bias_constraint)\n",
    "\n",
    "        self.bias_transform = self.add_weight(shape=(input_dim,),\n",
    "                                           name='{}_bias_transform'.format(self.name),\n",
    "                                           initializer=self.transform_bias_initializer,\n",
    "                                           regularizer=self.transform_bias_regularizer)\n",
    "\n",
    "        self.built = True\n",
    "\n",
    "    def call(self, x, mask=None):\n",
    "        x_h = self.activation(K.dot(x, self.W) + self.bias)\n",
    "        x_trans = self.transform_activation(K.dot(x, self.W_transform) + self.bias_transform)\n",
    "        output = x_h * x_trans + (1 - x_trans) * x\n",
    "        return output\n",
    "\n",
    "    def get_config(self):\n",
    "        config = {'activation': activations.serialize(self.activation),\n",
    "                  'transform_activation': activations.serialize(self.transform_activation),\n",
    "                  'kernel_initializer': initializers.serialize(self.kernel_initializer),\n",
    "                  'transform_initializer': initializers.serialize(self.transform_initializer),\n",
    "                  'bias_initializer': initializers.serialize(self.bias_initializer),\n",
    "                  'transform_bias_initializer': initializers.serialize(self.transform_bias_initializer),\n",
    "                  'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n",
    "                  'transform_regularizer': regularizers.serialize(self.transform_regularizer),\n",
    "                  'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n",
    "                  'transform_bias_regularizer': regularizers.serialize(self.transform_bias_regularizer),\n",
    "                  'kernel_constraint': constraints.serialize(self.kernel_constraint),\n",
    "                  'bias_constraint': constraints.serialize(self.bias_constraint)\n",
    "                  }\n",
    "        base_config = super(Highway, self).get_config()\n",
    "        return dict(list(base_config.items()) + list(config.items()))\n",
    "class Camouflage(Layer):\n",
    "    \"\"\"Masks a sequence by using a mask value to skip timesteps based on another sequence.\n",
    "       LSTM and Convolution layers may produce fake tensors for padding timesteps. We need\n",
    "       to eliminate those tensors by replicating their initial values presented in the second input.\n",
    "\n",
    "       inputs = Input()\n",
    "       lstms = LSTM(units=100, return_sequences=True)(inputs)\n",
    "       padded_lstms = Camouflage()([lstms, inputs])\n",
    "       ...\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, mask_value=0., **kwargs):\n",
    "        super(Camouflage, self).__init__(**kwargs)\n",
    "        self.mask_value = mask_value\n",
    "\n",
    "    def call(self, inputs):\n",
    "        boolean_mask = K.any(K.not_equal(inputs[1], self.mask_value),\n",
    "                             axis=-1, keepdims=True)\n",
    "        return inputs[0] * K.cast(boolean_mask, K.dtype(inputs[0]))\n",
    "\n",
    "    def get_config(self):\n",
    "        config = {'mask_value': self.mask_value}\n",
    "        base_config = super(Camouflage, self).get_config()\n",
    "        return dict(list(base_config.items()) + list(config.items()))\n",
    "\n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return input_shape[0]\n",
    "class SampledSoftmax(Layer):\n",
    "    \"\"\"Sampled Softmax, a faster way to train a softmax classifier over a huge number of classes.\n",
    "\n",
    "    # Arguments\n",
    "        num_classes: number of classes\n",
    "        num_sampled: number of classes to be sampled at each batch\n",
    "        tied_to: layer to be tied with (e.g., Embedding layer)\n",
    "        kwargs:\n",
    "    # Input shape\n",
    "        2D tensor with shape: `(nb_samples, input_dim)`.\n",
    "    # Output shape\n",
    "        2D tensor with shape: `(nb_samples, input_dim)`.\n",
    "    # References\n",
    "        - [Tensorflow code](tf.nn.sampled_softmax_loss)\n",
    "        - [Sampled SoftMax](https://www.tensorflow.org/extras/candidate_sampling.pdf)\n",
    "    \"\"\"\n",
    "    def __init__(self, num_classes=50000, num_sampled=1000, tied_to=None, **kwargs):\n",
    "        super(SampledSoftmax, self).__init__(**kwargs)\n",
    "        self.num_sampled = num_sampled\n",
    "        self.num_classes = num_classes\n",
    "        self.tied_to = tied_to\n",
    "        self.sampled = (self.num_classes != self.num_sampled)\n",
    "\n",
    "    def build(self, input_shape):\n",
    "        if self.tied_to is None:\n",
    "            self.softmax_W = self.add_weight(shape=(self.num_classes, input_shape[0][-1]), name='W_soft', initializer='lecun_normal')\n",
    "        self.softmax_b = self.add_weight(shape=(self.num_classes,), name='b_soft', initializer='zeros')\n",
    "        self.built = True\n",
    "\n",
    "    def call(self, x, mask=None):\n",
    "        lstm_outputs, next_token_ids = x\n",
    "\n",
    "        def sampled_softmax(x):\n",
    "            lstm_outputs_batch, next_token_ids_batch = x\n",
    "            batch_losses = tf.nn.sampled_softmax_loss(\n",
    "                self.softmax_W if self.tied_to is None else self.tied_to.weights[0], self.softmax_b,\n",
    "                next_token_ids_batch, lstm_outputs_batch,\n",
    "                num_classes=self.num_classes,\n",
    "                num_sampled=self.num_sampled,\n",
    "                partition_strategy='div')\n",
    "            batch_losses = tf.reduce_mean(batch_losses)\n",
    "            return [batch_losses, batch_losses]\n",
    "\n",
    "        def softmax(x):\n",
    "            lstm_outputs_batch, next_token_ids_batch = x\n",
    "            logits = tf.matmul(lstm_outputs_batch,\n",
    "                                 tf.transpose(self.softmax_W) if self.tied_to is None else tf.transpose(self.tied_to.weights[0]))\n",
    "            logits = tf.nn.bias_add(logits, self.softmax_b)\n",
    "            batch_predictions = tf.nn.softmax(logits)\n",
    "            labels_one_hot = tf.one_hot(tf.cast(next_token_ids_batch, dtype=tf.int32), self.num_classes)\n",
    "            batch_losses = tf.nn.softmax_cross_entropy_with_logits(labels=labels_one_hot, logits=logits)\n",
    "            return [batch_losses, batch_predictions]\n",
    "\n",
    "        losses, predictions = tf.map_fn(sampled_softmax if self.sampled else softmax, [lstm_outputs, next_token_ids])\n",
    "        self.add_loss(0.5 * tf.reduce_mean(losses[0]))\n",
    "        return lstm_outputs if self.sampled else predictions\n",
    "\n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return input_shape[0] if self.sampled else (input_shape[0][0], input_shape[0][1], self.num_classes)\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "class ELMo(object):\n",
    "    def __init__(self, parameters):\n",
    "        self._model = None\n",
    "        self._elmo_model = None\n",
    "        self.parameters = parameters\n",
    "        self.compile_elmo()\n",
    "\n",
    "    def __del__(self):\n",
    "        K.clear_session()\n",
    "        del self._model\n",
    "\n",
    "    def char_level_token_encoder(self):\n",
    "        charset_size = self.parameters['charset_size']\n",
    "        char_embedding_size = self.parameters['char_embedding_size']\n",
    "        token_embedding_size = self.parameters['hidden_units_size']\n",
    "        n_highway_layers = self.parameters['n_highway_layers']\n",
    "        filters = self.parameters['cnn_filters']\n",
    "        token_maxlen = self.parameters['token_maxlen']\n",
    "\n",
    "        # Input Layer, word characters (samples, words, character_indices)\n",
    "        inputs = Input(shape=(None, token_maxlen,), dtype='int32')\n",
    "        # Embed characters (samples, words, characters, character embedding)\n",
    "        embeds = Embedding(input_dim=charset_size, output_dim=char_embedding_size)(inputs)\n",
    "        token_embeds = []\n",
    "        # Apply multi-filter 2D convolutions + 1D MaxPooling + tanh\n",
    "        for (window_size, filters_size) in filters:\n",
    "            convs = Conv2D(filters=filters_size, kernel_size=[window_size, char_embedding_size], strides=(1, 1),\n",
    "                           padding=\"same\")(embeds)\n",
    "            convs = TimeDistributed(GlobalMaxPool1D())(convs)\n",
    "            convs = Activation('tanh')(convs)\n",
    "            convs = Camouflage(mask_value=0)(inputs=[convs, inputs])\n",
    "            token_embeds.append(convs)\n",
    "        token_embeds = concatenate(token_embeds)\n",
    "        # Apply highways networks\n",
    "        for i in range(n_highway_layers):\n",
    "            token_embeds = TimeDistributed(Highway())(token_embeds)\n",
    "            token_embeds = Camouflage(mask_value=0)(inputs=[token_embeds, inputs])\n",
    "        # Project to token embedding dimensionality\n",
    "        token_embeds = TimeDistributed(Dense(units=token_embedding_size, activation='linear'))(token_embeds)\n",
    "        token_embeds = Camouflage(mask_value=0)(inputs=[token_embeds, inputs])\n",
    "\n",
    "        token_encoder = Model(inputs=inputs, outputs=token_embeds, name='token_encoding')\n",
    "        return token_encoder\n",
    "\n",
    "    def compile_elmo(self, print_summary=False):\n",
    "        \"\"\"\n",
    "        Compiles a Language Model RNN based on the given parameters\n",
    "        \"\"\"\n",
    "\n",
    "        if self.parameters['token_encoding'] == 'word':\n",
    "            # Train word embeddings from scratch\n",
    "            word_inputs = Input(shape=(None,), name='word_indices', dtype='int32')\n",
    "            embeddings = Embedding(self.parameters['vocab_size'], self.parameters['hidden_units_size'], trainable=True, name='token_encoding')\n",
    "            inputs = embeddings(word_inputs)\n",
    "\n",
    "            # Token embeddings for Input\n",
    "            drop_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(inputs)\n",
    "            lstm_inputs = TimestepDropout(self.parameters['word_dropout_rate'])(drop_inputs)\n",
    "\n",
    "            # Pass outputs as inputs to apply sampled softmax\n",
    "            next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')\n",
    "            previous_ids = Input(shape=(None, 1), name='previous_ids', dtype='float32')\n",
    "        elif self.parameters['token_encoding'] == 'char':\n",
    "            # Train character-level representation\n",
    "            word_inputs = Input(shape=(None, self.parameters['token_maxlen'],), dtype='int32', name='char_indices')\n",
    "            inputs = self.char_level_token_encoder()(word_inputs)\n",
    "\n",
    "            # Token embeddings for Input\n",
    "            drop_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(inputs)\n",
    "            lstm_inputs = TimestepDropout(self.parameters['word_dropout_rate'])(drop_inputs)\n",
    "\n",
    "            # Pass outputs as inputs to apply sampled softmax\n",
    "            next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')\n",
    "            previous_ids = Input(shape=(None, 1), name='previous_ids', dtype='float32')\n",
    "\n",
    "        # Reversed input for backward LSTMs\n",
    "        re_lstm_inputs = Lambda(function=ELMo.reverse)(lstm_inputs)\n",
    "        mask = Lambda(function=ELMo.reverse)(drop_inputs)\n",
    "\n",
    "        # Forward LSTMs\n",
    "        for i in range(self.parameters['n_lstm_layers']):\n",
    "            if self.parameters['cuDNN']:\n",
    "                lstm = CuDNNLSTM(units=self.parameters['lstm_units_size'], return_sequences=True,\n",
    "                                 kernel_constraint=MinMaxNorm(-1*self.parameters['cell_clip'],\n",
    "                                                              self.parameters['cell_clip']),\n",
    "                                 recurrent_constraint=MinMaxNorm(-1*self.parameters['cell_clip'],\n",
    "                                                                 self.parameters['cell_clip']))(lstm_inputs)\n",
    "            else:\n",
    "                lstm = LSTM(units=self.parameters['lstm_units_size'], return_sequences=True, activation=\"tanh\",\n",
    "                            recurrent_activation='sigmoid',\n",
    "                            kernel_constraint=MinMaxNorm(-1 * self.parameters['cell_clip'],\n",
    "                                                         self.parameters['cell_clip']),\n",
    "                            recurrent_constraint=MinMaxNorm(-1 * self.parameters['cell_clip'],\n",
    "                                                            self.parameters['cell_clip'])\n",
    "                            )(lstm_inputs)\n",
    "            lstm = Camouflage(mask_value=0)(inputs=[lstm, drop_inputs])\n",
    "            # Projection to hidden_units_size\n",
    "            proj = TimeDistributed(Dense(self.parameters['hidden_units_size'], activation='linear',\n",
    "                                         kernel_constraint=MinMaxNorm(-1 * self.parameters['proj_clip'],\n",
    "                                                                      self.parameters['proj_clip'])\n",
    "                                         ))(lstm)\n",
    "            # Merge Bi-LSTMs feature vectors with the previous ones\n",
    "            lstm_inputs = add([proj, lstm_inputs], name='f_block_{}'.format(i + 1))\n",
    "            # Apply variational drop-out between BI-LSTM layers\n",
    "            lstm_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(lstm_inputs)\n",
    "\n",
    "        # Backward LSTMs\n",
    "        for i in range(self.parameters['n_lstm_layers']):\n",
    "            if self.parameters['cuDNN']:\n",
    "                re_lstm = CuDNNLSTM(units=self.parameters['lstm_units_size'], return_sequences=True,\n",
    "                                    kernel_constraint=MinMaxNorm(-1*self.parameters['cell_clip'],\n",
    "                                                                 self.parameters['cell_clip']),\n",
    "                                    recurrent_constraint=MinMaxNorm(-1*self.parameters['cell_clip'],\n",
    "                                                                    self.parameters['cell_clip']))(re_lstm_inputs)\n",
    "            else:\n",
    "                re_lstm = LSTM(units=self.parameters['lstm_units_size'], return_sequences=True, activation='tanh',\n",
    "                               recurrent_activation='sigmoid',\n",
    "                               kernel_constraint=MinMaxNorm(-1 * self.parameters['cell_clip'],\n",
    "                                                            self.parameters['cell_clip']),\n",
    "                               recurrent_constraint=MinMaxNorm(-1 * self.parameters['cell_clip'],\n",
    "                                                               self.parameters['cell_clip'])\n",
    "                               )(re_lstm_inputs)\n",
    "            re_lstm = Camouflage(mask_value=0)(inputs=[re_lstm, mask])\n",
    "            # Projection to hidden_units_size\n",
    "            re_proj = TimeDistributed(Dense(self.parameters['hidden_units_size'], activation='linear',\n",
    "                                            kernel_constraint=MinMaxNorm(-1 * self.parameters['proj_clip'],\n",
    "                                                                         self.parameters['proj_clip'])\n",
    "                                            ))(re_lstm)\n",
    "            # Merge Bi-LSTMs feature vectors with the previous ones\n",
    "            re_lstm_inputs = add([re_proj, re_lstm_inputs], name='b_block_{}'.format(i + 1))\n",
    "            # Apply variational drop-out between BI-LSTM layers\n",
    "            re_lstm_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(re_lstm_inputs)\n",
    "\n",
    "        # Reverse backward LSTMs' outputs = Make it forward again\n",
    "        re_lstm_inputs = Lambda(function=ELMo.reverse, name=\"reverse\")(re_lstm_inputs)\n",
    "\n",
    "        # Project to Vocabulary with Sampled Softmax\n",
    "        sampled_softmax = SampledSoftmax(num_classes=self.parameters['vocab_size'],\n",
    "                                         num_sampled=int(self.parameters['num_sampled']),\n",
    "                                         tied_to=embeddings if self.parameters['weight_tying']\n",
    "                                         and self.parameters['token_encoding'] == 'word' else None)\n",
    "        outputs = sampled_softmax([lstm_inputs, next_ids])\n",
    "        re_outputs = sampled_softmax([re_lstm_inputs, previous_ids])\n",
    "\n",
    "        self._model = Model(inputs=[word_inputs, next_ids, previous_ids],\n",
    "                            outputs=[outputs, re_outputs])\n",
    "#         pdb.set_trace()\n",
    "        self._model.compile(optimizer=Adagrad(lr=self.parameters['lr'], clipvalue=self.parameters['clip_value']),\n",
    "                            loss=None)\n",
    "        if print_summary:\n",
    "            self._model.summary()\n",
    "\n",
    "    def train(self, train_data, valid_data):\n",
    "\n",
    "        # Add callbacks (early stopping, model checkpoint)\n",
    "        weights_file = os.path.join(MODELS_DIR, \"elmo_best_weights.hdf5\")\n",
    "        save_best_model = ModelCheckpoint(filepath=weights_file, monitor='val_loss', verbose=1,\n",
    "                                          save_best_only=True, mode='auto')\n",
    "        early_stopping = EarlyStopping(patience=self.parameters['patience'], restore_best_weights=True)\n",
    "\n",
    "        t_start = time.time()\n",
    "\n",
    "        # Fit Model\n",
    "#         self._model.fit_generator(train_data,\n",
    "#                                   validation_data=valid_data,\n",
    "#                                   epochs=self.parameters['epochs'],\n",
    "#                                   workers=self.parameters['n_threads']\n",
    "#                                   if self.parameters['n_threads'] else os.cpu_count(),\n",
    "#                                   use_multiprocessing=True\n",
    "#                                   if self.parameters['multi_processing'] else False,\n",
    "#                                   callbacks=[save_best_model])\n",
    "        self._model.fit(train_data[0], train_data[1], validation_data=valid_data, epochs=self.parameters['epochs'],\n",
    "                       workers=os.cpu_count(), use_multiprocessing=True, callbacks=[save_best_model], batch_size=self.parameters['batch_size'])\n",
    "\n",
    "        print('Training took {0} sec'.format(str(time.time() - t_start)))\n",
    "\n",
    "    def evaluate(self, test_data):\n",
    "\n",
    "        def unpad(x, y_true, y_pred):\n",
    "            y_true_unpad = []\n",
    "            y_pred_unpad = []\n",
    "            for i, x_i in enumerate(x):\n",
    "                for j, x_ij in enumerate(x_i):\n",
    "                    if x_ij == 0:\n",
    "                        y_true_unpad.append(y_true[i][:j])\n",
    "                        y_pred_unpad.append(y_pred[i][:j])\n",
    "                        break\n",
    "            return np.asarray(y_true_unpad), np.asarray(y_pred_unpad)\n",
    "\n",
    "        # Generate samples\n",
    "        x, y_true_forward, y_true_backward = test_data\n",
    "\n",
    "        # Predict outputs\n",
    "        y_pred_forward, y_pred_backward = self._model.predict([x, y_true_forward, y_true_backward], batch_size=4)\n",
    "        print('predict finish')\n",
    "\n",
    "        # Unpad sequences\n",
    "        y_true_forward, y_pred_forward = unpad(x, y_true_forward, y_pred_forward)\n",
    "        y_true_backward, y_pred_backward = unpad(x, y_true_backward, y_pred_backward)\n",
    "\n",
    "        # Compute and print perplexity\n",
    "        print('Forward Langauge Model Perplexity: {}'.format(ELMo.perplexity(y_pred_forward, y_true_forward)))\n",
    "        print('Backward Langauge Model Perplexity: {}'.format(ELMo.perplexity(y_pred_backward, y_true_backward)))\n",
    "\n",
    "    def wrap_multi_elmo_encoder(self, print_summary=False, save=False):\n",
    "        \"\"\"\n",
    "        Wrap ELMo meta-model encoder, which returns an array of the 3 intermediate ELMo outputs\n",
    "        :param print_summary: print a summary of the new architecture\n",
    "        :param save: persist model\n",
    "        :return: None\n",
    "        \"\"\"\n",
    "\n",
    "        elmo_embeddings = list()\n",
    "        elmo_embeddings.append(concatenate([self._model.get_layer('token_encoding').output, self._model.get_layer('token_encoding').output],\n",
    "                                           name='elmo_embeddings_level_0'))\n",
    "        for i in range(self.parameters['n_lstm_layers']):\n",
    "            elmo_embeddings.append(concatenate([self._model.get_layer('f_block_{}'.format(i + 1)).output,\n",
    "                                                Lambda(function=ELMo.reverse)\n",
    "                                                (self._model.get_layer('b_block_{}'.format(i + 1)).output)],\n",
    "                                               name='elmo_embeddings_level_{}'.format(i + 1)))\n",
    "\n",
    "        camos = list()\n",
    "        for i, elmo_embedding in enumerate(elmo_embeddings):\n",
    "            camos.append(Camouflage(mask_value=0.0, name='camo_elmo_embeddings_level_{}'.format(i + 1))([elmo_embedding,\n",
    "                                                                                                         self._model.get_layer(\n",
    "                                                                                                             'token_encoding').output]))\n",
    "\n",
    "        self._elmo_model = Model(inputs=[self._model.get_layer('word_indices').input], outputs=camos)\n",
    "\n",
    "        if print_summary:\n",
    "            self._elmo_model.summary()\n",
    "\n",
    "        if save:\n",
    "            self._elmo_model.save(os.path.join(MODELS_DIR, 'ELMo_Encoder.hd5'))\n",
    "            print('ELMo Encoder saved successfully')\n",
    "\n",
    "    def save(self, sampled_softmax=True):\n",
    "        \"\"\"\n",
    "        Persist model in disk\n",
    "        :param sampled_softmax: reload model using the full softmax function\n",
    "        :return: None\n",
    "        \"\"\"\n",
    "        if not sampled_softmax:\n",
    "            self.parameters['num_sampled'] = self.parameters['vocab_size']\n",
    "        self.compile_elmo()\n",
    "        self._model.load_weights(os.path.join(MODELS_DIR, 'elmo_best_weights.hdf5'))\n",
    "        self._model.save(os.path.join(MODELS_DIR, 'ELMo_LM_EVAL.hd5'))\n",
    "        print('ELMo Language Model saved successfully')\n",
    "\n",
    "    def load(self):\n",
    "        self._model = load_model(os.path.join(MODELS_DIR, 'ELMo_LM.h5'),\n",
    "                                 custom_objects={'TimestepDropout': TimestepDropout,\n",
    "                                                 'Camouflage': Camouflage})\n",
    "\n",
    "    def load_elmo_encoder(self):\n",
    "        self._elmo_model = load_model(os.path.join(MODELS_DIR, 'ELMo_Encoder.hd5'),\n",
    "                                      custom_objects={'TimestepDropout': TimestepDropout,\n",
    "                                                      'Camouflage': Camouflage})\n",
    "\n",
    "    def get_outputs(self, test_data, output_type='word', state='last'):\n",
    "        \"\"\"\n",
    "       Wrap ELMo meta-model encoder, which returns an array of the 3 intermediate ELMo outputs\n",
    "       :param test_data: data generator\n",
    "       :param output_type: \"word\" for word vectors or \"sentence\" for sentence vectors\n",
    "       :param state: 'last' for 2nd LSTMs outputs or 'mean' for mean-pooling over inputs, 1st LSTMs and 2nd LSTMs\n",
    "       :return: None\n",
    "       \"\"\"\n",
    "        # Generate samples\n",
    "        x = test_data[0]\n",
    "\n",
    "        preds = np.asarray(self._elmo_model.predict(x))\n",
    "        if state == 'last':\n",
    "            elmo_vectors = preds[-1]\n",
    "        else:\n",
    "            elmo_vectors = np.mean(preds, axis=0)\n",
    "\n",
    "        if output_type == 'words':\n",
    "            return elmo_vectors, preds\n",
    "        else:\n",
    "            return np.mean(elmo_vectors, axis=1), preds\n",
    "\n",
    "    @staticmethod\n",
    "    def reverse(inputs, axes=1):\n",
    "        return K.reverse(inputs, axes=axes)\n",
    "\n",
    "    @staticmethod\n",
    "    def perplexity(y_pred, y_true):\n",
    "\n",
    "        cross_entropies = []\n",
    "        for y_pred_seq, y_true_seq in zip(y_pred, y_true):\n",
    "            # Reshape targets to one-hot vectors\n",
    "            y_true_seq = to_categorical(y_true_seq, y_pred_seq.shape[-1])\n",
    "            # Compute cross_entropy for sentence words\n",
    "            cross_entropy = K.categorical_crossentropy(tf.convert_to_tensor(y_true_seq, dtype=tf.float32),\n",
    "                                                       tf.convert_to_tensor(y_pred_seq, dtype=tf.float32))\n",
    "            cross_entropies.extend(cross_entropy.eval(session=K.get_session()))\n",
    "\n",
    "        # Compute mean cross_entropy and perplexity\n",
    "        cross_entropy = np.mean(np.asarray(cross_entropies), axis=-1)\n",
    "\n",
    "        return pow(2.0, cross_entropy)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "['/job:localhost/replica:0/task:0/device:GPU:0']"
     },
     "metadata": {},
     "execution_count": 9
    }
   ],
   "source": [
    "K.tensorflow_backend._get_available_gpus()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "compile\nModel: \"model_2\"\n__________________________________________________________________________________________________\nLayer (type)                    Output Shape         Param #     Connected to                     \n==================================================================================================\nword_indices (InputLayer)       (None, None)         0                                            \n__________________________________________________________________________________________________\ntoken_encoding (Embedding)      (None, None, 200)    10967400    word_indices[0][0]               \n__________________________________________________________________________________________________\nspatial_dropout1d_6 (SpatialDro (None, None, 200)    0           token_encoding[0][0]             \n__________________________________________________________________________________________________\ntimestep_dropout_2 (TimestepDro (None, None, 200)    0           spatial_dropout1d_6[0][0]        \n__________________________________________________________________________________________________\nlambda_3 (Lambda)               (None, None, 200)    0           timestep_dropout_2[0][0]         \n__________________________________________________________________________________________________\ncu_dnnlstm_7 (CuDNNLSTM)        (None, None, 400)    963200      lambda_3[0][0]                   \n__________________________________________________________________________________________________\nlambda_4 (Lambda)               (None, None, 200)    0           spatial_dropout1d_6[0][0]        \n__________________________________________________________________________________________________\ncu_dnnlstm_5 (CuDNNLSTM)        (None, None, 400)    963200      timestep_dropout_2[0][0]         \n__________________________________________________________________________________________________\ncamouflage_7 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_7[0][0]               \n                                                                 lambda_4[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_5 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_5[0][0]               \n                                                                 spatial_dropout1d_6[0][0]        \n__________________________________________________________________________________________________\ntime_distributed_7 (TimeDistrib (None, None, 200)    80200       camouflage_7[0][0]               \n__________________________________________________________________________________________________\ntime_distributed_5 (TimeDistrib (None, None, 200)    80200       camouflage_5[0][0]               \n__________________________________________________________________________________________________\nb_block_1 (Add)                 (None, None, 200)    0           time_distributed_7[0][0]         \n                                                                 lambda_3[0][0]                   \n__________________________________________________________________________________________________\nf_block_1 (Add)                 (None, None, 200)    0           time_distributed_5[0][0]         \n                                                                 timestep_dropout_2[0][0]         \n__________________________________________________________________________________________________\nspatial_dropout1d_9 (SpatialDro (None, None, 200)    0           b_block_1[0][0]                  \n__________________________________________________________________________________________________\nspatial_dropout1d_7 (SpatialDro (None, None, 200)    0           f_block_1[0][0]                  \n__________________________________________________________________________________________________\ncu_dnnlstm_8 (CuDNNLSTM)        (None, None, 400)    963200      spatial_dropout1d_9[0][0]        \n__________________________________________________________________________________________________\ncu_dnnlstm_6 (CuDNNLSTM)        (None, None, 400)    963200      spatial_dropout1d_7[0][0]        \n__________________________________________________________________________________________________\ncamouflage_8 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_8[0][0]               \n                                                                 lambda_4[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_6 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_6[0][0]               \n                                                                 spatial_dropout1d_6[0][0]        \n__________________________________________________________________________________________________\ntime_distributed_8 (TimeDistrib (None, None, 200)    80200       camouflage_8[0][0]               \n__________________________________________________________________________________________________\ntime_distributed_6 (TimeDistrib (None, None, 200)    80200       camouflage_6[0][0]               \n__________________________________________________________________________________________________\nb_block_2 (Add)                 (None, None, 200)    0           time_distributed_8[0][0]         \n                                                                 spatial_dropout1d_9[0][0]        \n__________________________________________________________________________________________________\nf_block_2 (Add)                 (None, None, 200)    0           time_distributed_6[0][0]         \n                                                                 spatial_dropout1d_7[0][0]        \n__________________________________________________________________________________________________\nspatial_dropout1d_10 (SpatialDr (None, None, 200)    0           b_block_2[0][0]                  \n__________________________________________________________________________________________________\nspatial_dropout1d_8 (SpatialDro (None, None, 200)    0           f_block_2[0][0]                  \n__________________________________________________________________________________________________\nnext_ids (InputLayer)           (None, None, 1)      0                                            \n__________________________________________________________________________________________________\nreverse (Lambda)                (None, None, 200)    0           spatial_dropout1d_10[0][0]       \n__________________________________________________________________________________________________\nprevious_ids (InputLayer)       (None, None, 1)      0                                            \n__________________________________________________________________________________________________\nsampled_softmax_2 (SampledSoftm (None, None, 200)    11022237    spatial_dropout1d_8[0][0]        \n                                                                 next_ids[0][0]                   \n                                                                 reverse[0][0]                    \n                                                                 previous_ids[0][0]               \n==================================================================================================\nTotal params: 15,195,837\nTrainable params: 15,195,837\nNon-trainable params: 0\n__________________________________________________________________________________________________\ntrain\nWARNING:tensorflow:From /home/xieck13/miniconda3/envs/tensorflow_py3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nTrain on 800 samples, validate on 200 samples\nEpoch 1/1\n800/800 [==============================] - 5s 6ms/step - loss: 232.6873 - val_loss: 63.0209\n\nEpoch 00001: val_loss improved from inf to 63.02093, saving model to /home/xieck13/Workspace/ELMo-keras/data/models/elmo_best_weights.hdf5\nTraining took 8.92537260055542 sec\nsave\nWARNING:tensorflow:From <ipython-input-8-82935a718c3e>:264: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\nELMo Language Model saved successfully\n"
    }
   ],
   "source": [
    "\n",
    "parameters = {\n",
    "    'multi_processing': True,\n",
    "    'n_threads': os.cpu_count(),\n",
    "    'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,\n",
    "    'train_dataset': 'txt/advertiser_id.train.tokens',\n",
    "    'valid_dataset': 'txt/advertiser_id.valid.tokens',\n",
    "    'test_dataset': 'txt/advertiser_id.test.tokens',\n",
    "    'vocab': 'txt/advertiser_id.vocab',\n",
    "    'vocab_size': 54837,\n",
    "    'num_sampled': 1000,\n",
    "    'charset_size': 262,\n",
    "    'sentence_maxlen': 100,\n",
    "    'token_maxlen': 50,\n",
    "    'token_encoding': 'word',\n",
    "    'epochs': 1,\n",
    "    'patience': 2,\n",
    "    'batch_size': 36,\n",
    "    'clip_value': 1,\n",
    "    'cell_clip': 5,\n",
    "    'proj_clip': 5,\n",
    "    'lr': 0.2,\n",
    "    'shuffle': True,\n",
    "    'n_lstm_layers': 2,\n",
    "    'n_highway_layers': 2,\n",
    "    'cnn_filters': [[1, 32],\n",
    "                    [2, 32],\n",
    "                    [3, 64],\n",
    "                    [4, 128],\n",
    "                    [5, 256],\n",
    "                    [6, 512],\n",
    "                    [7, 512]\n",
    "                    ],\n",
    "    'lstm_units_size': 400,\n",
    "    'hidden_units_size': 200,\n",
    "    'char_embedding_size': 16,\n",
    "    'dropout_rate': 0.1,\n",
    "    'word_dropout_rate': 0.05,\n",
    "    'weight_tying': True,\n",
    "}\n",
    "\n",
    "# Set-up Generators\n",
    "# train_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['train_dataset']),\n",
    "#                                   os.path.join(DATA_SET_DIR, parameters['vocab']),\n",
    "#                                   sentence_maxlen=parameters['sentence_maxlen'],\n",
    "#                                   token_maxlen=parameters['token_maxlen'],\n",
    "#                                   batch_size=parameters['batch_size'],\n",
    "#                                   shuffle=parameters['shuffle'],\n",
    "#                                   token_encoding=parameters['token_encoding'])\n",
    "\n",
    "# val_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['valid_dataset']),\n",
    "#                                 os.path.join(DATA_SET_DIR, parameters['vocab']),\n",
    "#                                 sentence_maxlen=parameters['sentence_maxlen'],\n",
    "#                                 token_maxlen=parameters['token_maxlen'],\n",
    "#                                 batch_size=parameters['batch_size'],\n",
    "#                                 shuffle=parameters['shuffle'],\n",
    "#                                 token_encoding=parameters['token_encoding'])\n",
    "\n",
    "# test_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['test_dataset']),\n",
    "#                                 os.path.join(DATA_SET_DIR, parameters['vocab']),\n",
    "#                                 sentence_maxlen=parameters['sentence_maxlen'],\n",
    "#                                 token_maxlen=parameters['token_maxlen'],\n",
    "#                                 batch_size=parameters['batch_size'],\n",
    "#                                 shuffle=parameters['shuffle'],\n",
    "#                                 token_encoding=parameters['token_encoding'])\n",
    "\n",
    "\n",
    "# Compile ELMo\n",
    "print('compile')\n",
    "elmo_model = ELMo(parameters)\n",
    "elmo_model.compile_elmo(print_summary=True)\n",
    "\n",
    "# Train ELMo\n",
    "print('train')\n",
    "elmo_model.train(train_data=train, valid_data=valid)\n",
    "\n",
    "# Persist ELMo Bidirectional Language Model in disk\n",
    "print('save')\n",
    "elmo_model.save(sampled_softmax=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "evaluate\n"
    },
    {
     "output_type": "error",
     "ename": "NameError",
     "evalue": "name 'elmo_model' is not defined",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-1-29ca49d37fdc>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'evaluate'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0melmo_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m: name 'elmo_model' is not defined"
     ]
    }
   ],
   "source": [
    "print('evaluate')\n",
    "elmo_model.evaluate(test[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Model: \"model_4\"\n__________________________________________________________________________________________________\nLayer (type)                    Output Shape         Param #     Connected to                     \n==================================================================================================\nword_indices (InputLayer)       (None, None)         0                                            \n__________________________________________________________________________________________________\ntoken_encoding (Embedding)      (None, None, 200)    10967400    word_indices[0][0]               \n__________________________________________________________________________________________________\nspatial_dropout1d_11 (SpatialDr (None, None, 200)    0           token_encoding[0][0]             \n__________________________________________________________________________________________________\ntimestep_dropout_3 (TimestepDro (None, None, 200)    0           spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\nlambda_5 (Lambda)               (None, None, 200)    0           timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\ncu_dnnlstm_11 (CuDNNLSTM)       (None, None, 400)    963200      lambda_5[0][0]                   \n__________________________________________________________________________________________________\nlambda_6 (Lambda)               (None, None, 200)    0           spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ncu_dnnlstm_9 (CuDNNLSTM)        (None, None, 400)    963200      timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\ncamouflage_11 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_11[0][0]              \n                                                                 lambda_6[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_9 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_9[0][0]               \n                                                                 spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ntime_distributed_11 (TimeDistri (None, None, 200)    80200       camouflage_11[0][0]              \n__________________________________________________________________________________________________\ntime_distributed_9 (TimeDistrib (None, None, 200)    80200       camouflage_9[0][0]               \n__________________________________________________________________________________________________\nb_block_1 (Add)                 (None, None, 200)    0           time_distributed_11[0][0]        \n                                                                 lambda_5[0][0]                   \n__________________________________________________________________________________________________\nf_block_1 (Add)                 (None, None, 200)    0           time_distributed_9[0][0]         \n                                                                 timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\nspatial_dropout1d_14 (SpatialDr (None, None, 200)    0           b_block_1[0][0]                  \n__________________________________________________________________________________________________\nspatial_dropout1d_12 (SpatialDr (None, None, 200)    0           f_block_1[0][0]                  \n__________________________________________________________________________________________________\ncu_dnnlstm_12 (CuDNNLSTM)       (None, None, 400)    963200      spatial_dropout1d_14[0][0]       \n__________________________________________________________________________________________________\ncu_dnnlstm_10 (CuDNNLSTM)       (None, None, 400)    963200      spatial_dropout1d_12[0][0]       \n__________________________________________________________________________________________________\ncamouflage_12 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_12[0][0]              \n                                                                 lambda_6[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_10 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_10[0][0]              \n                                                                 spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ntime_distributed_12 (TimeDistri (None, None, 200)    80200       camouflage_12[0][0]              \n__________________________________________________________________________________________________\ntime_distributed_10 (TimeDistri (None, None, 200)    80200       camouflage_10[0][0]              \n__________________________________________________________________________________________________\nb_block_2 (Add)                 (None, None, 200)    0           time_distributed_12[0][0]        \n                                                                 spatial_dropout1d_14[0][0]       \n__________________________________________________________________________________________________\nlambda_7 (Lambda)               (None, None, 200)    0           b_block_1[0][0]                  \n__________________________________________________________________________________________________\nf_block_2 (Add)                 (None, None, 200)    0           time_distributed_10[0][0]        \n                                                                 spatial_dropout1d_12[0][0]       \n__________________________________________________________________________________________________\nlambda_8 (Lambda)               (None, None, 200)    0           b_block_2[0][0]                  \n__________________________________________________________________________________________________\nelmo_embeddings_level_0 (Concat (None, None, 400)    0           token_encoding[0][0]             \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\nelmo_embeddings_level_1 (Concat (None, None, 400)    0           f_block_1[0][0]                  \n                                                                 lambda_7[0][0]                   \n__________________________________________________________________________________________________\nelmo_embeddings_level_2 (Concat (None, None, 400)    0           f_block_2[0][0]                  \n                                                                 lambda_8[0][0]                   \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_1 (C (None, None, 400)    0           elmo_embeddings_level_0[0][0]    \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_2 (C (None, None, 400)    0           elmo_embeddings_level_1[0][0]    \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_3 (C (None, None, 400)    0           elmo_embeddings_level_2[0][0]    \n                                                                 token_encoding[0][0]             \n==================================================================================================\nTotal params: 15,141,000\nTrainable params: 15,141,000\nNon-trainable params: 0\n__________________________________________________________________________________________________\nELMo Encoder saved successfully\n"
    }
   ],
   "source": [
    "elmo_model.wrap_multi_elmo_encoder(print_summary=True, save=True)\n",
    "elmo_model.load_elmo_encoder()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "mean_emb, emb_mats = elmo_model.get_outputs(test[0], output_type='words', state='mean')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(1000, 100, 400)"
     },
     "metadata": {},
     "execution_count": 25
    }
   ],
   "source": [
    "mean_emb.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "array([[[-16.793087  ,  15.469028  ,   6.586699  , ...,   9.4156685 ,\n          -5.0877647 ,   4.0806293 ],\n        [-25.946507  ,  26.7283    ,   9.983514  , ..., -12.227016  ,\n           0.12517738,  -3.2325528 ],\n        [-28.366516  ,  29.114775  ,   5.9488387 , ...,   1.5749168 ,\n          -5.6783786 ,   0.95954895],\n        ...,\n        [-28.617298  ,  23.901163  ,   6.3567624 , ...,  11.935164  ,\n           5.124914  ,   9.142811  ],\n        [-28.619017  ,  23.8971    ,   6.3506746 , ...,  14.0541525 ,\n           4.7058    ,  11.179049  ],\n        [-28.620733  ,  23.893045  ,   6.3445926 , ...,  17.241693  ,\n          -0.6677715 ,   8.502923  ]],\n\n       [[-16.793087  ,  15.469028  ,   6.586699  , ...,  12.899727  ,\n          -6.553994  ,   6.699628  ],\n        [-25.970016  ,  26.755335  ,   9.90082   , ..., -11.053856  ,\n          -4.1123486 ,  -5.694795  ],\n        [-27.1172    ,  29.04404   ,   7.7042985 , ..., -12.095973  ,\n          -3.8920786 ,  -7.626403  ],\n        ...,\n        [-28.61994   ,  23.898762  ,   6.3540106 , ...,  11.935164  ,\n           5.124914  ,   9.142811  ],\n        [-28.621655  ,  23.894676  ,   6.347871  , ...,  14.0541525 ,\n           4.7058    ,  11.179049  ],\n        [-28.623367  ,  23.890596  ,   6.3417377 , ...,  17.241693  ,\n          -0.6677715 ,   8.502923  ]],\n\n       [[-16.793087  ,  15.469028  ,   6.586699  , ...,   6.6081085 ,\n           0.37598693,   6.6891613 ],\n        [-26.07968   ,  26.844093  ,   9.898005  , ...,   0.3338145 ,\n          -3.3879504 ,   7.3424125 ],\n        [-26.398752  ,  27.967247  ,   7.487482  , ..., -10.7416935 ,\n         -10.666886  , -18.618855  ],\n        ...,\n        [-28.613876  ,  23.907576  ,   6.3363686 , ...,  11.935164  ,\n           5.124914  ,   9.142811  ],\n        [-28.615557  ,  23.903591  ,   6.3303633 , ...,  14.0541525 ,\n           4.7058    ,  11.179049  ],\n        [-28.617231  ,  23.899614  ,   6.3243637 , ...,  17.241693  ,\n          -0.6677715 ,   8.502923  ]],\n\n       ...,\n\n       [[-16.793087  ,  15.469029  ,   6.5866995 , ...,  12.353892  ,\n          -5.0755324 ,   7.268743  ],\n        [-25.967415  ,  26.669243  ,   9.856211  , ...,  -5.1594357 ,\n          -4.492308  ,  -6.446598  ],\n        [-27.160378  ,  29.01619   ,   7.7570906 , ..., -11.71669   ,\n           3.2558236 ,  -1.0776737 ],\n        ...,\n        [-28.986538  ,  23.79695   ,   6.1578426 , ...,  11.935166  ,\n           5.124913  ,   9.142811  ],\n        [-28.988205  ,  23.79273   ,   6.15238   , ...,  14.054153  ,\n           4.7057996 ,  11.179047  ],\n        [-28.989868  ,  23.788523  ,   6.146924  , ...,  17.241693  ,\n          -0.6677714 ,   8.502924  ]],\n\n       [[-16.793087  ,  15.469029  ,   6.5866995 , ...,   9.112533  ,\n          -4.6198525 ,   5.581876  ],\n        [-20.288982  ,  24.478676  ,  12.305756  , ..., -12.936905  ,\n          -4.4525294 , -17.142239  ],\n        [-26.96672   ,  29.961977  ,   2.790993  , ..., -12.816844  ,\n          -3.020033  , -10.446811  ],\n        ...,\n        [-28.435146  ,  23.93958   ,   6.4744244 , ...,  11.935166  ,\n           5.124913  ,   9.142811  ],\n        [-28.461792  ,  23.946312  ,   6.4709854 , ...,  14.054153  ,\n           4.7057996 ,  11.179047  ],\n        [-28.486237  ,  23.952082  ,   6.467319  , ...,  17.241693  ,\n          -0.6677714 ,   8.502924  ]],\n\n       [[-16.793087  ,  15.469029  ,   6.5866995 , ...,  12.058703  ,\n          -5.9529104 ,   5.4074845 ],\n        [-26.045197  ,  26.735758  ,   9.874409  , ..., -12.779766  ,\n          -2.5051064 ,  -7.1145196 ],\n        [-27.175032  ,  29.003054  ,   7.7416496 , ..., -11.19169   ,\n          -3.3599205 ,  -5.5803494 ],\n        ...,\n        [-28.629162  ,  23.878538  ,   6.3248634 , ...,  11.935166  ,\n           5.124913  ,   9.142811  ],\n        [-28.63086   ,  23.874493  ,   6.318746  , ...,  14.054153  ,\n           4.7057996 ,  11.179047  ],\n        [-28.632559  ,  23.870455  ,   6.3126383 , ...,  17.241693  ,\n          -0.6677714 ,   8.502924  ]]], dtype=float32)"
     },
     "metadata": {},
     "execution_count": 15
    }
   ],
   "source": [
    "        x = test_data[0]\n",
    "\n",
    "        preds = np.asarray(self._elmo_model.predict(x))\n",
    "        if state == 'last':\n",
    "            elmo_vectors = preds[-1]\n",
    "        else:\n",
    "            elmo_vectors = np.mean(preds, axis=0)\n",
    "\n",
    "        if output_type == 'words':\n",
    "            return elmo_vectors, preds\n",
    "        else:\n",
    "            return np.mean(elmo_vectors, axis=1), preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(1000, 100, 400)"
     },
     "metadata": {},
     "execution_count": 18
    }
   ],
   "source": [
    "np.mean(emb_mats, axis=0).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(1000, 100)"
     },
     "metadata": {},
     "execution_count": 16
    }
   ],
   "source": [
    "        preds = np.asarray(self._elmo_model.predict(x))\n",
    "        if state == 'last':\n",
    "            elmo_vectors = preds[-1]\n",
    "        else:\n",
    "            elmo_vectors = np.mean(preds, axis=0)\n",
    "\n",
    "        if output_type == 'words':\n",
    "            return elmo_vectors, preds\n",
    "        else:\n",
    "            return np.mean(elmo_vectors, axis=1), preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "evaluate\n"
    },
    {
     "output_type": "error",
     "ename": "IndexError",
     "evalue": "invalid index to scalar variable.",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mIndexError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-20-807f1b38b905>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m# Evaluate Bidirectional Language Model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'evaluate'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0melmo_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0;31m# Build ELMo meta-model to deploy for production and persist in disk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-9-8d3feef6953c>\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, test_data)\u001b[0m\n\u001b[1;32m    468\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    469\u001b[0m             \u001b[0mtest_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtest_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 470\u001b[0;31m             \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    471\u001b[0m             \u001b[0my_true_forward\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    472\u001b[0m             \u001b[0my_true_backward\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mIndexError\u001b[0m: invalid index to scalar variable."
     ]
    }
   ],
   "source": [
    "# Evaluate Bidirectional Language Model\n",
    "print('evaluate')\n",
    "elmo_model.evaluate(test[0][0])\n",
    "\n",
    "# Build ELMo meta-model to deploy for production and persist in disk\n",
    "print('??')\n",
    "elmo_model.wrap_multi_elmo_encoder(print_summary=True, save=True)\n",
    "\n",
    "# Load ELMo encoder\n",
    "print('load')\n",
    "elmo_model.load_elmo_encoder()\n",
    "\n",
    "# Get ELMo embeddings to feed as inputs for downstream tasks\n",
    "elmo_embeddings = elmo_model.get_outputs(test_generator, output_type='word', state='mean')\n",
    "\n",
    "# BUILD & TRAIN NEW KERAS MODEL FOR DOWNSTREAM TASK (E.G., TEXT CLASSIFICATION)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "array([[    1, 19467, 32676, ...,     0,     0,     0],\n       [    1, 44510, 44510, ...,     0,     0,     0],\n       [    1, 34239, 38115, ...,     0,     0,     0],\n       ...,\n       [    1, 20464, 46086, ...,     0,     0,     0],\n       [    1,   867,  1950, ...,     0,     0,     0],\n       [    1,  7567, 19855, ...,     0,     0,     0]], dtype=int32)"
     },
     "metadata": {},
     "execution_count": 21
    }
   ],
   "source": [
    "test[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['test_dataset']),\n",
    "                                os.path.join(DATA_SET_DIR, parameters['vocab']),\n",
    "                                sentence_maxlen=parameters['sentence_maxlen'],\n",
    "                                token_maxlen=parameters['token_maxlen'],\n",
    "                                batch_size=parameters['batch_size'],\n",
    "                                shuffle=parameters['shuffle'],\n",
    "                                token_encoding=parameters['token_encoding'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "??\nModel: \"model_4\"\n__________________________________________________________________________________________________\nLayer (type)                    Output Shape         Param #     Connected to                     \n==================================================================================================\nword_indices (InputLayer)       (None, None)         0                                            \n__________________________________________________________________________________________________\ntoken_encoding (Embedding)      (None, None, 200)    10967400    word_indices[0][0]               \n__________________________________________________________________________________________________\nspatial_dropout1d_11 (SpatialDr (None, None, 200)    0           token_encoding[0][0]             \n__________________________________________________________________________________________________\ntimestep_dropout_3 (TimestepDro (None, None, 200)    0           spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\nlambda_5 (Lambda)               (None, None, 200)    0           timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\ncu_dnnlstm_11 (CuDNNLSTM)       (None, None, 400)    963200      lambda_5[0][0]                   \n__________________________________________________________________________________________________\nlambda_6 (Lambda)               (None, None, 200)    0           spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ncu_dnnlstm_9 (CuDNNLSTM)        (None, None, 400)    963200      timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\ncamouflage_11 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_11[0][0]              \n                                                                 lambda_6[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_9 (Camouflage)       (None, None, 400)    0           cu_dnnlstm_9[0][0]               \n                                                                 spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ntime_distributed_11 (TimeDistri (None, None, 200)    80200       camouflage_11[0][0]              \n__________________________________________________________________________________________________\ntime_distributed_9 (TimeDistrib (None, None, 200)    80200       camouflage_9[0][0]               \n__________________________________________________________________________________________________\nb_block_1 (Add)                 (None, None, 200)    0           time_distributed_11[0][0]        \n                                                                 lambda_5[0][0]                   \n__________________________________________________________________________________________________\nf_block_1 (Add)                 (None, None, 200)    0           time_distributed_9[0][0]         \n                                                                 timestep_dropout_3[0][0]         \n__________________________________________________________________________________________________\nspatial_dropout1d_14 (SpatialDr (None, None, 200)    0           b_block_1[0][0]                  \n__________________________________________________________________________________________________\nspatial_dropout1d_12 (SpatialDr (None, None, 200)    0           f_block_1[0][0]                  \n__________________________________________________________________________________________________\ncu_dnnlstm_12 (CuDNNLSTM)       (None, None, 400)    963200      spatial_dropout1d_14[0][0]       \n__________________________________________________________________________________________________\ncu_dnnlstm_10 (CuDNNLSTM)       (None, None, 400)    963200      spatial_dropout1d_12[0][0]       \n__________________________________________________________________________________________________\ncamouflage_12 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_12[0][0]              \n                                                                 lambda_6[0][0]                   \n__________________________________________________________________________________________________\ncamouflage_10 (Camouflage)      (None, None, 400)    0           cu_dnnlstm_10[0][0]              \n                                                                 spatial_dropout1d_11[0][0]       \n__________________________________________________________________________________________________\ntime_distributed_12 (TimeDistri (None, None, 200)    80200       camouflage_12[0][0]              \n__________________________________________________________________________________________________\ntime_distributed_10 (TimeDistri (None, None, 200)    80200       camouflage_10[0][0]              \n__________________________________________________________________________________________________\nb_block_2 (Add)                 (None, None, 200)    0           time_distributed_12[0][0]        \n                                                                 spatial_dropout1d_14[0][0]       \n__________________________________________________________________________________________________\nlambda_7 (Lambda)               (None, None, 200)    0           b_block_1[0][0]                  \n__________________________________________________________________________________________________\nf_block_2 (Add)                 (None, None, 200)    0           time_distributed_10[0][0]        \n                                                                 spatial_dropout1d_12[0][0]       \n__________________________________________________________________________________________________\nlambda_8 (Lambda)               (None, None, 200)    0           b_block_2[0][0]                  \n__________________________________________________________________________________________________\nelmo_embeddings_level_0 (Concat (None, None, 400)    0           token_encoding[0][0]             \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\nelmo_embeddings_level_1 (Concat (None, None, 400)    0           f_block_1[0][0]                  \n                                                                 lambda_7[0][0]                   \n__________________________________________________________________________________________________\nelmo_embeddings_level_2 (Concat (None, None, 400)    0           f_block_2[0][0]                  \n                                                                 lambda_8[0][0]                   \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_1 (C (None, None, 400)    0           elmo_embeddings_level_0[0][0]    \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_2 (C (None, None, 400)    0           elmo_embeddings_level_1[0][0]    \n                                                                 token_encoding[0][0]             \n__________________________________________________________________________________________________\ncamo_elmo_embeddings_level_3 (C (None, None, 400)    0           elmo_embeddings_level_2[0][0]    \n                                                                 token_encoding[0][0]             \n==================================================================================================\nTotal params: 15,141,000\nTrainable params: 15,141,000\nNon-trainable params: 0\n__________________________________________________________________________________________________\nELMo Encoder saved successfully\nload\n"
    }
   ],
   "source": [
    "# Evaluate Bidirectional Language Model\n",
    "# print('evaluate')\n",
    "# elmo_model.evaluate(test_generator)\n",
    "\n",
    "# Build ELMo meta-model to deploy for production and persist in disk\n",
    "print('??')\n",
    "elmo_model.wrap_multi_elmo_encoder(print_summary=True, save=True)\n",
    "\n",
    "# Load ELMo encoder\n",
    "print('load')\n",
    "elmo_model.load_elmo_encoder()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "output_type": "error",
     "ename": "TypeError",
     "evalue": "'numpy.int32' object is not iterable",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-29-566fb3b7ec69>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m# Get ELMo embeddings to feed as inputs for downstream tasks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0melmo_embeddings\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0melmo_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_outputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_type\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'word'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'mean'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0;31m# BUILD & TRAIN NEW KERAS MODEL FOR DOWNSTREAM TASK (E.G., TEXT CLASSIFICATION)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-8-48ae256abdc7>\u001b[0m in \u001b[0;36mget_outputs\u001b[0;34m(self, test_data, output_type, state)\u001b[0m\n\u001b[1;32m    553\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    554\u001b[0m             \u001b[0mtest_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtest_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 555\u001b[0;31m             \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    556\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    557\u001b[0m         \u001b[0mpreds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_elmo_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: 'numpy.int32' object is not iterable"
     ]
    }
   ],
   "source": [
    "# Get ELMo embeddings to feed as inputs for downstream tasks\n",
    "elmo_embeddings = elmo_model.get_outputs(test_generator, output_type='word', state='mean')\n",
    "\n",
    "# BUILD & TRAIN NEW KERAS MODEL FOR DOWNSTREAM TASK (E.G., TEXT CLASSIFICATION)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "1%|          | 54/7813 [00:13<32:41,  3.96it/s]\n"
    },
    {
     "output_type": "error",
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-32-431ae0bf1acf>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_generator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m     \u001b[0mtest_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtest_generator\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      4\u001b[0m     \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/ELMo-keras/elmo/lm_generator.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, index)\u001b[0m\n\u001b[1;32m     47\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_id\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_indices\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     48\u001b[0m             \u001b[0;31m# Read sentence (sample)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 49\u001b[0;31m             \u001b[0mword_indices_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_token_indices\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msent_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     50\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtoken_encoding\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'char'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     51\u001b[0m                 \u001b[0mword_char_indices_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_token_char_indices\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msent_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workspace/ELMo-keras/elmo/lm_generator.py\u001b[0m in \u001b[0;36mget_token_indices\u001b[0;34m(self, sent_id)\u001b[0m\n\u001b[1;32m     78\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mget_token_indices\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msent_id\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     79\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcorpus\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mfp\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 80\u001b[0;31m             \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mline\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     81\u001b[0m                 \u001b[0;32mif\u001b[0m \u001b[0mi\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0msent_id\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     82\u001b[0m                     \u001b[0mtoken_ids\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msentence_maxlen\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/miniconda3/envs/tensorflow_py3/lib/python3.6/codecs.py\u001b[0m in \u001b[0;36mdecode\u001b[0;34m(self, input, final)\u001b[0m\n\u001b[1;32m    319\u001b[0m         \u001b[0;31m# decode input (taking the buffer into account)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    320\u001b[0m         \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuffer\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 321\u001b[0;31m         \u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconsumed\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_buffer_decode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfinal\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    322\u001b[0m         \u001b[0;31m# keep undecoded input until the next call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    323\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuffer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconsumed\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "x = []\n",
    "for i in tqdm(range(len(test_generator))):\n",
    "    test_batch = test_generator[i][0]  \n",
    "    # x.extend(test_batch[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "7813"
     },
     "metadata": {},
     "execution_count": 31
    }
   ],
   "source": [
    "len(test_generator)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.6.5 64-bit ('tensorflow_py3': conda)",
   "language": "python",
   "name": "python36564bittensorflowpy3condae845837f4a004aac989f16f52c8df904"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}