{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Sentiment Classification with Attention\n",
    "In this notebook I train a sentiment classification model with a GRU model. I also add an attention layer so as to visualize what the model is learnining and considers important for sentiment classification. Code adopted from [here](https://github.com/ilivans/tf-rnn-attention) (thanks to the author for releasing his code)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.contrib.rnn import GRUCell\n",
    "from tensorflow.python.ops.rnn import dynamic_rnn as rnn\n",
    "from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn\n",
    "from keras.datasets import imdb\n",
    "import os\n",
    "\n",
    "#from attention import attention\n",
    "#from utils import *\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '' # avoids using GPU for this session"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "NUM_WORDS = 10000\n",
    "INDEX_FROM = 3\n",
    "SEQUENCE_LENGTH = 250\n",
    "EMBEDDING_DIM = 100\n",
    "HIDDEN_SIZE = 150\n",
    "ATTENTION_SIZE = 50\n",
    "KEEP_PROB = 0.8\n",
    "BATCH_SIZE = 256\n",
    "NUM_EPOCHS = 3  # Model easily overfits without pre-trained words embeddings, that's why train for a few epochs\n",
    "DELTA = 0.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load the dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading data from https://s3.amazonaws.com/text-datasets/imdb.npz\n",
      "17047552/17464789 [============================>.] - ETA: 0s"
     ]
    }
   ],
   "source": [
    "(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=NUM_WORDS, index_from=INDEX_FROM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ list([1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]),\n",
       "       list([1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634, 14, 394, 20, 13, 119, 954, 189, 102, 5, 207, 110, 3103, 21, 14, 69, 188, 8, 30, 23, 7, 4, 249, 126, 93, 4, 114, 9, 2300, 1523, 5, 647, 4, 116, 9, 35, 8163, 4, 229, 9, 340, 1322, 4, 118, 9, 4, 130, 4901, 19, 4, 1002, 5, 89, 29, 952, 46, 37, 4, 455, 9, 45, 43, 38, 1543, 1905, 398, 4, 1649, 26, 6853, 5, 163, 11, 3215, 2, 4, 1153, 9, 194, 775, 7, 8255, 2, 349, 2637, 148, 605, 2, 8003, 15, 123, 125, 68, 2, 6853, 15, 349, 165, 4362, 98, 5, 4, 228, 9, 43, 2, 1157, 15, 299, 120, 5, 120, 174, 11, 220, 175, 136, 50, 9, 4373, 228, 8255, 5, 2, 656, 245, 2350, 5, 4, 9837, 131, 152, 491, 18, 2, 32, 7464, 1212, 14, 9, 6, 371, 78, 22, 625, 64, 1382, 9, 8, 168, 145, 23, 4, 1690, 15, 16, 4, 1355, 5, 28, 6, 52, 154, 462, 33, 89, 78, 285, 16, 145, 95]),\n",
       "       list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112, 4, 2401, 311, 12, 16, 3711, 33, 75, 43, 1829, 296, 4, 86, 320, 35, 534, 19, 263, 4821, 1301, 4, 1873, 33, 89, 78, 12, 66, 16, 4, 360, 7, 4, 58, 316, 334, 11, 4, 1716, 43, 645, 662, 8, 257, 85, 1200, 42, 1228, 2578, 83, 68, 3912, 15, 36, 165, 1539, 278, 36, 69, 2, 780, 8, 106, 14, 6905, 1338, 18, 6, 22, 12, 215, 28, 610, 40, 6, 87, 326, 23, 2300, 21, 23, 22, 12, 272, 40, 57, 31, 11, 4, 22, 47, 6, 2307, 51, 9, 170, 23, 595, 116, 595, 1352, 13, 191, 79, 638, 89, 2, 14, 9, 8, 106, 607, 624, 35, 534, 6, 227, 7, 129, 113]),\n",
       "       ...,\n",
       "       list([1, 11, 6, 230, 245, 6401, 9, 6, 1225, 446, 2, 45, 2174, 84, 8322, 4007, 21, 4, 912, 84, 2, 325, 725, 134, 2, 1715, 84, 5, 36, 28, 57, 1099, 21, 8, 140, 8, 703, 5, 2, 84, 56, 18, 1644, 14, 9, 31, 7, 4, 9406, 1209, 2295, 2, 1008, 18, 6, 20, 207, 110, 563, 12, 8, 2901, 2, 8, 97, 6, 20, 53, 4767, 74, 4, 460, 364, 1273, 29, 270, 11, 960, 108, 45, 40, 29, 2961, 395, 11, 6, 4065, 500, 7, 2, 89, 364, 70, 29, 140, 4, 64, 4780, 11, 4, 2678, 26, 178, 4, 529, 443, 2, 5, 27, 710, 117, 2, 8123, 165, 47, 84, 37, 131, 818, 14, 595, 10, 10, 61, 1242, 1209, 10, 10, 288, 2260, 1702, 34, 2901, 2, 4, 65, 496, 4, 231, 7, 790, 5, 6, 320, 234, 2766, 234, 1119, 1574, 7, 496, 4, 139, 929, 2901, 2, 7750, 5, 4241, 18, 4, 8497, 2, 250, 11, 1818, 7561, 4, 4217, 5408, 747, 1115, 372, 1890, 1006, 541, 9303, 7, 4, 59, 2, 4, 3586, 2]),\n",
       "       list([1, 1446, 7079, 69, 72, 3305, 13, 610, 930, 8, 12, 582, 23, 5, 16, 484, 685, 54, 349, 11, 4120, 2959, 45, 58, 1466, 13, 197, 12, 16, 43, 23, 2, 5, 62, 30, 145, 402, 11, 4131, 51, 575, 32, 61, 369, 71, 66, 770, 12, 1054, 75, 100, 2198, 8, 4, 105, 37, 69, 147, 712, 75, 3543, 44, 257, 390, 5, 69, 263, 514, 105, 50, 286, 1814, 23, 4, 123, 13, 161, 40, 5, 421, 4, 116, 16, 897, 13, 2, 40, 319, 5872, 112, 6700, 11, 4803, 121, 25, 70, 3468, 4, 719, 3798, 13, 18, 31, 62, 40, 8, 7200, 4, 2, 7, 14, 123, 5, 942, 25, 8, 721, 12, 145, 5, 202, 12, 160, 580, 202, 12, 6, 52, 58, 2, 92, 401, 728, 12, 39, 14, 251, 8, 15, 251, 5, 2, 12, 38, 84, 80, 124, 12, 9, 23]),\n",
       "       list([1, 17, 6, 194, 337, 7, 4, 204, 22, 45, 254, 8, 106, 14, 123, 4, 2, 270, 2, 5, 2, 2, 732, 2098, 101, 405, 39, 14, 1034, 4, 1310, 9, 115, 50, 305, 12, 47, 4, 168, 5, 235, 7, 38, 111, 699, 102, 7, 4, 4039, 9245, 9, 24, 6, 78, 1099, 17, 2345, 2, 21, 27, 9685, 6139, 5, 2, 1603, 92, 1183, 4, 1310, 7, 4, 204, 42, 97, 90, 35, 221, 109, 29, 127, 27, 118, 8, 97, 12, 157, 21, 6789, 2, 9, 6, 66, 78, 1099, 4, 631, 1191, 5, 2642, 272, 191, 1070, 6, 7585, 8, 2197, 2, 2, 544, 5, 383, 1271, 848, 1468, 2, 497, 2, 8, 1597, 8778, 2, 21, 60, 27, 239, 9, 43, 8368, 209, 405, 10, 10, 12, 764, 40, 4, 248, 20, 12, 16, 5, 174, 1791, 72, 7, 51, 6, 1739, 22, 4, 204, 131, 9])], dtype=object)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ list([1, 89, 27, 2, 9289, 17, 199, 132, 5, 4191, 16, 1339, 24, 8, 760, 4, 1385, 7, 4, 22, 1368, 2, 16, 5149, 17, 1635, 7, 2, 1368, 9, 4, 1357, 8, 14, 991, 13, 877, 38, 19, 27, 239, 13, 100, 235, 61, 483, 2, 4, 7, 4, 20, 131, 1102, 72, 8, 14, 251, 27, 1146, 7, 308, 16, 735, 1517, 17, 29, 144, 28, 77, 2305, 18, 12]),\n",
       "       list([1, 3452, 7, 2, 517, 522, 31, 314, 17, 1909, 2046, 2, 6829, 2, 83, 4, 2314, 673, 33, 27, 568, 1709, 2923, 32, 4, 189, 22, 11, 975, 4135, 29, 2376, 4, 1287, 7, 4, 2, 4217, 15, 1435, 455, 1394, 848, 1538, 4031, 96, 145, 11, 4, 204, 6156, 297, 5418, 29, 3044, 4, 1287, 8, 35, 4383, 1609, 121, 2, 1233, 980, 2, 2100, 2, 2, 2, 3681, 304, 4, 1287, 145, 8, 41, 1472, 50, 2, 2, 2, 2, 4364, 34, 2782, 2, 145, 295, 174, 772, 6, 2, 18, 274, 961, 90, 145, 8, 4041, 113, 155, 92, 140, 17, 2, 69, 3205, 2, 505, 46, 24, 8, 30, 4, 132, 7, 41, 1306, 103, 32, 38, 59, 9560, 90, 11, 6, 297, 7389, 33, 63, 2, 9, 329, 74, 654, 137, 2, 304, 6, 4548, 2, 2949, 2, 41, 772, 15, 274, 961, 41, 145, 8, 113, 11, 4, 2995, 7, 6, 668, 4217, 1810, 17, 6, 3452, 1082, 181, 8, 30, 1571, 11, 3161, 2350, 28, 8, 157, 295, 8, 79, 8, 6, 6068, 11, 162, 6869, 121, 2, 1249, 648, 69, 77, 3554, 19, 4, 2, 887, 8, 4416, 68, 4123, 145, 83, 406, 2350, 4, 2350, 7, 2, 2, 3509, 1851, 27, 980, 2, 2, 2, 37, 26, 199, 23, 4, 521, 39, 3408, 1697, 2297, 7, 568, 3864, 2, 308, 3659, 80, 81, 1780, 10, 10, 526, 34, 2, 2, 13, 119, 3452, 7, 2, 4, 229, 34, 1561, 2, 9, 87, 253, 55, 702, 728, 545, 441, 2072, 958, 7, 85, 189, 22, 19, 52, 5499, 39, 4, 636, 720, 121, 75, 67, 1655, 2, 9792, 2377, 39, 4, 2553, 4, 4971, 108, 2281, 2, 6997, 4626, 2, 39, 4, 6, 1726, 23, 4903, 890, 201, 488, 4664, 2377, 39, 4, 2195, 3135, 8, 4, 2974, 343, 39, 3452, 7, 5279, 2, 54, 12, 2360, 2, 4, 172, 136, 3452, 7, 2, 115, 304, 410, 615, 63, 9, 43, 17, 73, 50, 26, 775, 7, 31, 2433, 532, 2, 1994, 15, 2039, 4142, 93, 9032, 6, 171, 153, 908, 12, 152, 306, 1595, 8, 9155, 253, 33, 410, 4, 189, 512, 11, 831, 13, 119, 4, 136, 54, 3509, 2, 26, 260, 6, 2711, 2, 731, 2599, 15, 2, 5224, 29, 166, 163, 2, 795, 7320, 469, 198, 24, 8, 135, 15, 50, 218, 6, 1543, 52, 22, 11, 50, 17, 73, 88, 50, 91, 434, 9, 167, 2, 1030, 8, 987, 52, 841, 6, 147, 281, 7, 253, 199, 406, 3161, 732, 7, 105, 26, 1451, 4091, 17, 257, 2162, 2712, 68, 205, 732, 7, 4816, 712, 15, 4, 4951, 7, 5512, 15, 36, 26, 1200, 496, 62, 540, 1203, 2536, 3452, 7, 2, 9, 87, 18, 4, 91, 173, 47, 15, 194, 352, 6713, 44, 12, 33, 44, 2476, 1782, 1782, 13, 144, 440, 38, 4, 64, 155, 15, 13, 80, 135, 9, 15, 49, 7, 4, 5076, 302, 34, 1842, 26, 6, 117, 3463, 2631, 13, 191, 377, 101, 1683, 139, 11, 3452, 7, 2, 345, 2670, 4, 22, 152, 9185, 4, 541, 599, 19, 6, 646, 2, 3681, 5573, 2, 83, 4472, 393, 11, 3532, 6, 2, 5003, 3490, 84, 2, 23, 2, 7, 3062, 294, 112, 2, 34, 6, 666, 2832, 6, 3314, 125, 5484, 2, 998, 2, 2, 4, 116, 9, 184, 52, 2, 17, 2, 9, 55, 163, 17, 29, 2, 4, 31, 2433, 46, 13, 82, 40, 4, 139, 19, 2, 33, 4, 454, 169, 41, 55, 1279, 54, 442, 1658, 32, 15, 7717, 5745, 13, 191, 30, 4, 64, 31, 1348, 13, 1276, 104, 3452, 7, 2, 9, 6, 777, 22, 964, 722, 39, 380, 8, 1363, 87, 1285, 189, 11, 3215, 4160, 33, 64, 7304, 234, 196, 12, 115, 461, 357, 42, 753, 6, 965, 1640, 7, 1923, 106, 12, 17, 515, 17, 25, 70]),\n",
       "       list([1, 1868, 256, 34, 31, 7, 4, 91, 2305, 1507, 7, 4, 236, 2068, 7, 14, 1117, 5, 82, 31, 7, 4, 91, 1020, 1507, 5051, 4686, 46, 7, 2415, 59, 9, 389, 9, 175, 173, 15, 59, 299, 4, 2, 7466, 9, 4, 3114, 5, 1805, 7, 4, 298, 438, 10, 10, 2, 3365, 9, 2, 5, 41, 658, 742, 217, 73, 1391, 34, 530, 284, 5, 82, 735, 2286, 1024, 1487, 3740, 2828, 7, 4, 5072, 255, 47, 6, 254, 58, 19, 4, 2, 3365, 7, 27, 31, 283, 155, 5, 4846, 27, 5569, 339, 4, 338, 577, 3996, 2, 2, 1516, 9477, 47, 96, 99, 76, 873, 7, 41, 57, 2010, 4, 65, 304, 6, 55, 821, 650, 23, 4, 4696, 7, 6, 4069, 11, 14, 20, 4, 64, 577, 47, 8, 276, 41, 113, 23, 1070, 8, 459, 18, 4, 738, 7, 409, 50, 9, 210, 31, 11, 175, 223, 37, 1590, 15, 243, 7, 4756, 3996, 9, 1612, 4, 454, 7, 4, 20, 21, 17, 58, 4097, 59, 630, 56, 1897, 41, 2, 113, 58, 8774, 8, 41, 223, 59, 60, 1643, 41, 1633, 89, 81, 25, 81, 27, 175, 251, 11, 5, 46, 5, 1337, 8132, 12, 15, 9, 51, 372, 81, 6, 176, 7, 51, 13, 683, 2504, 157, 5359, 75, 2170, 75, 4290, 75, 2, 75, 3218, 75, 8265, 75, 26, 4, 118, 369, 75, 26, 4, 4727, 2728, 49, 7, 178, 40, 199, 372, 11, 14, 20, 28, 4, 404, 4421, 26, 4, 1987, 2, 18, 4, 436, 223, 5, 82, 81, 32, 15, 2504, 157, 15, 9, 1868, 3996, 5, 111, 372, 11, 263, 926, 111, 7, 178, 28, 460, 825, 143, 15, 868, 7, 113, 54, 263, 846, 559, 5, 1131, 13, 28, 77, 50, 36, 43, 435, 99, 185, 13, 28, 348, 61, 846, 61, 1216, 21, 13, 115, 2717, 98, 17, 73, 17, 54, 13, 69, 8, 297, 68, 555, 5, 69, 8, 1135, 11, 68, 3730, 14, 20, 6048, 4, 635, 7, 113, 382, 12, 9, 619, 21, 15, 9, 89, 113, 9, 33, 211, 742, 6, 2489, 33, 2, 9, 2732, 415, 37, 739, 8, 104, 15, 27, 157, 9, 53, 674, 74, 1462, 334, 5, 47, 6, 55, 1300, 5385, 5695, 1841, 4, 372, 11, 27, 113, 29, 9, 24, 565, 195, 8, 5587, 48, 25, 181, 8, 67, 52, 116, 5, 4, 635, 7, 113, 81, 24, 717, 14, 20, 514, 139, 4, 3756, 582, 8, 1868, 2, 5, 32, 4, 231, 7, 6, 2702, 46, 7, 1912, 2714, 15, 13, 38, 5846, 75, 26, 32, 1912, 2, 514, 4414, 742, 12, 9, 64, 34, 170, 2, 15, 25, 923, 15, 25, 26, 66, 170, 4451, 742, 25, 28, 6, 2, 4421, 21, 121, 9, 129, 483, 10, 10]),\n",
       "       ...,\n",
       "       list([1, 14, 390, 7, 2, 1194, 285, 4, 123, 9, 44, 8, 130, 45, 840, 811, 5, 32, 609, 9, 2244, 1888, 11, 14, 390, 4, 2, 663, 721, 35, 1356, 773, 884, 2, 8, 4, 2, 4, 2910, 90, 39, 4, 6953, 5059, 54, 3034, 29, 2, 11, 17, 6, 2, 5, 95, 83, 27, 2734, 2391, 29, 2, 3913, 6, 1513, 63, 484, 6635, 41, 46, 5, 2201, 1098, 41, 95, 2, 2, 3913, 51, 9, 317, 7, 4, 1513, 2, 266, 39, 4, 8543, 5, 560, 4, 2, 3341, 159, 385, 516, 4, 1042, 21, 112, 4, 671, 7, 31, 12, 43, 6367, 90, 4892, 266, 8, 2, 4, 85, 2481, 5, 494, 8, 169, 5, 2330, 90, 18, 147, 2, 2086, 9, 11, 4, 5593, 269, 8, 169, 3636, 54, 5, 2, 140, 46, 83, 4, 890, 8, 169, 4, 2734, 4, 2734, 659, 98, 103, 68, 985, 4, 4701, 923, 15, 6, 370, 1059, 285, 54, 36, 79, 145, 8, 2, 269, 8, 985, 4, 1776, 5, 103, 36, 2, 6, 6, 1718, 825, 2, 3234, 2, 1077, 41, 2, 8, 847, 84, 46, 7, 4, 96, 38, 59, 70, 79, 8, 4, 1550, 21, 36, 79, 68, 8, 522, 5, 2, 1442, 5, 43, 54, 9, 44, 8, 79, 324, 58, 9, 2, 8, 121, 36, 721, 884, 2, 8, 4, 2, 3682, 11, 5, 2, 5, 2, 21, 2, 9, 131, 11, 4, 5593, 38, 1098, 4, 1042, 5, 2, 46, 7, 4, 2, 54, 4892, 417, 266, 29, 191, 2, 9, 351, 5, 38, 9, 4, 671, 7, 289, 18, 150, 1276, 14, 390, 16, 619, 16, 4, 7, 32, 7, 98, 13, 62, 119, 8, 28, 41, 671, 7, 2, 13, 66, 92, 104, 2, 144, 7, 435, 8, 4, 5593, 88, 48, 59, 161, 586, 28, 556, 21, 2, 961, 4, 671, 7, 289, 295, 174, 5, 146, 654, 19, 4, 3769, 3724]),\n",
       "       list([1, 13, 435, 83, 14, 22, 1017, 1383, 18, 6, 2928, 1278, 11, 405, 5228, 7, 4039, 2228, 21, 51, 13, 188, 16, 53, 7, 6, 1162, 3905, 1010, 19, 230, 99, 76, 662, 5, 24, 195, 206, 45, 788, 15, 14, 22, 16, 93, 23, 6, 352, 4, 1979, 26, 6982, 5, 862, 324, 137, 4, 116, 889, 6, 176, 8, 30, 4630, 82, 4, 114, 2679, 23, 6, 3993, 7, 7202, 6, 336, 5, 107, 3197, 15, 2114, 6, 3817, 7, 1818, 103, 880, 49, 2, 36, 216, 638, 6, 2816, 9135, 34, 6, 185, 250, 5, 41, 8505, 5, 32, 14, 9, 579, 11, 2183, 34, 4, 185, 250, 3880, 2, 11, 35, 5356, 45, 788, 15, 907, 2393, 5, 1024, 2, 197, 36, 71, 231, 142, 66, 1621, 21, 466, 94, 118, 2048, 1226, 7, 609, 2527, 9, 43, 99, 357, 8, 1465, 4, 529, 4, 22, 2, 23, 18, 44, 2, 234, 5, 91, 7, 12, 3202, 7, 357, 105, 2, 125, 357, 5, 196, 2, 414, 4, 64, 52, 155, 13, 28, 8, 135, 44, 4, 22, 9, 19, 6093, 8, 4, 228, 63, 9, 52, 11, 1370, 4, 277, 9, 4, 64, 85, 52, 155, 44, 4, 20, 5, 198, 64, 88, 45, 4, 236, 155, 15, 571, 13, 586, 386, 259, 8780, 5430, 14, 180, 50, 16, 76, 128, 1157, 93, 11, 4, 4039]),\n",
       "       list([1, 1252, 54, 13, 435, 8, 67, 14, 20, 33, 4, 5165, 750, 11, 6637, 13, 122, 24, 535, 76, 13, 435, 8, 14, 20, 64, 88, 13, 2626, 1400, 45, 6, 5524, 20, 4092, 30, 52, 18, 6, 462, 95, 13, 1829, 180, 5, 296, 12, 5, 219, 138, 36, 2471, 2, 2, 3561, 8, 297, 2, 2, 29, 9, 242, 31, 7, 4, 2, 493, 23, 4, 194, 268, 76, 433, 11, 61, 652, 74, 2281, 42, 1655, 5, 47, 31, 194, 3079, 8, 85, 102, 15, 2, 72, 8, 6, 189, 20, 12, 287, 2, 2, 17, 294, 37, 9, 406, 29, 47, 6, 483, 57, 551, 89, 2509, 5, 948, 12, 9, 29, 764, 1460, 142, 15, 1655, 115, 127, 42, 739, 8, 123, 29, 764, 8534, 5, 1742, 151, 174, 199, 7, 98, 2140, 63, 25, 80, 1495, 48, 25, 67, 4, 20, 32, 11, 32, 6, 275, 585, 11, 61, 652, 74, 111, 7416, 5, 12, 770, 72, 11, 6, 171, 771, 17, 11, 37, 1452, 11, 4, 130])], dtype=object)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Sequence Preprocessing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Returns the largest wordid as the vocab_size\n",
    "def get_vocabulary_size(X):\n",
    "    return max([max(x) for x in X]) + 1  # plus the 0th word"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Removes words not in vocabulary\n",
    "def fit_in_vocabulary(X, voc_size):\n",
    "    return [[w for w in x if w < voc_size] for x in X]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Zero padding in sequences\n",
    "def zero_pad(X, seq_len):\n",
    "    return np.array([x[:seq_len - 1] + [0] * max(seq_len - len(x), 1) for x in X])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "vocabulary_size = get_vocabulary_size(X_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000\n"
     ]
    }
   ],
   "source": [
    "print(vocabulary_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_test = fit_in_vocabulary(X_test, vocabulary_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000\n"
     ]
    }
   ],
   "source": [
    "print(get_vocabulary_size(X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1, 0, 0, ..., 0, 1, 0])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train = zero_pad(X_train, SEQUENCE_LENGTH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[   1,   14,   22, ...,    0,    0,    0],\n",
       "       [   1,  194, 1153, ...,    0,    0,    0],\n",
       "       [   1,   14,   47, ...,    0,    0,    0],\n",
       "       ..., \n",
       "       [   1,   11,    6, ...,    0,    0,    0],\n",
       "       [   1, 1446, 7079, ...,    0,    0,    0],\n",
       "       [   1,   17,    6, ...,    0,    0,    0]])"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_test = zero_pad(X_test, SEQUENCE_LENGTH)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Placeholders for Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_ph = tf.placeholder(tf.int32, [None, SEQUENCE_LENGTH])\n",
    "target_ph = tf.placeholder(tf.float32, [None])\n",
    "seq_len_ph = tf.placeholder(tf.int32, [None])\n",
    "keep_prob_ph = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Embedding Layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "embeddings_var = tf.Variable(tf.random_uniform([vocabulary_size, EMBEDDING_DIM], -1.0, 1.0), trainable=True)\n",
    "batch_embedded = tf.nn.embedding_lookup(embeddings_var, batch_ph)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'embedding_lookup:0' shape=(?, 250, 100) dtype=float32>"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_embedded"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### BI-RNN Layers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "rnn_outputs, _ = bi_rnn(GRUCell(HIDDEN_SIZE), GRUCell(HIDDEN_SIZE),\n",
    "                        inputs=batch_embedded, sequence_length=seq_len_ph, dtype=tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Attention Layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def attention(inputs, attention_size, time_major=False, return_alphas=False):\n",
    "    if isinstance(inputs, tuple):\n",
    "        # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n",
    "        inputs = tf.concat(inputs, 2)\n",
    "\n",
    "    if time_major:\n",
    "        # (T,B,D) => (B,T,D)\n",
    "        inputs = tf.array_ops.transpose(inputs, [1, 0, 2])\n",
    "\n",
    "    hidden_size = inputs.shape[2].value  # D value - hidden size of the RNN layer\n",
    "\n",
    "    # Trainable parameters\n",
    "    W_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n",
    "    b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n",
    "    u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n",
    "\n",
    "    # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n",
    "    #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n",
    "    v = tf.tanh(tf.tensordot(inputs, W_omega, axes=1) + b_omega)\n",
    "    # For each of the timestamps its vector of size A from `v` is reduced with `u` vector\n",
    "    vu = tf.tensordot(v, u_omega, axes=1)   # (B,T) shape\n",
    "    alphas = tf.nn.softmax(vu)              # (B,T) shape also\n",
    "\n",
    "    # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n",
    "    output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)\n",
    "\n",
    "    if not return_alphas:\n",
    "        return output\n",
    "    else:\n",
    "        return output, alphas"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "### attention layer\n",
    "attention_output, alphas = attention(rnn_outputs, ATTENTION_SIZE, return_alphas=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Add DROPOUT (avoids overfitting)\n",
    "drop = tf.nn.dropout(attention_output, keep_prob_ph)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Fully Connected Layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "W = tf.Variable(tf.truncated_normal([HIDDEN_SIZE * 2, 1], stddev=0.1))  # Hidden size is multiplied by 2 for Bi-RNN\n",
    "b = tf.Variable(tf.constant(0., shape=[1]))\n",
    "y_hat = tf.nn.xw_plus_b(drop, W, b)\n",
    "y_hat = tf.squeeze(y_hat)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Cross-entropy loss and optimizer initialization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Cross-entropy loss and optimizer initialization\n",
    "loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=target_ph))\n",
    "optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Accuracy Metric"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(y_hat)), target_ph), tf.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data Preparation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Determine batches\n",
    "def batch_generator(X, y, batch_size):\n",
    "    \"\"\"Primitive batch generator \n",
    "    \"\"\"\n",
    "    size = X.shape[0]\n",
    "    X_copy = X.copy()\n",
    "    y_copy = y.copy()\n",
    "    indices = np.arange(size)\n",
    "    np.random.shuffle(indices)\n",
    "    X_copy = X_copy[indices]\n",
    "    y_copy = y_copy[indices]\n",
    "    i = 0\n",
    "    while True:\n",
    "        if i + batch_size <= size:\n",
    "            yield X_copy[i:i + batch_size], y_copy[i:i + batch_size]\n",
    "            i += batch_size\n",
    "        else:\n",
    "            i = 0\n",
    "            indices = np.arange(size)\n",
    "            np.random.shuffle(indices)\n",
    "            X_copy = X_copy[indices]\n",
    "            y_copy = y_copy[indices]\n",
    "            continue"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Actual lengths of sequences\n",
    "seq_len_test = np.array([list(x).index(0) + 1 for x in X_test])\n",
    "seq_len_train = np.array([list(x).index(0) + 1 for x in X_train])\n",
    "\n",
    "# Batch generators\n",
    "train_batch_generator = batch_generator(X_train, y_train, BATCH_SIZE)\n",
    "test_batch_generator = batch_generator(X_test, y_test, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "saver = tf.train.Saver()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start learning...\n",
      "epoch: 0\tloss: 0.426, val_loss: 0.437, acc: 0.705, val_acc: 0.795\n",
      "epoch: 1\tloss: 0.320, val_loss: 0.359, acc: 0.841, val_acc: 0.847\n",
      "epoch: 2\tloss: 0.273, val_loss: 0.318, acc: 0.888, val_acc: 0.863\n"
     ]
    }
   ],
   "source": [
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    print(\"Start learning...\")\n",
    "    for epoch in range(NUM_EPOCHS):\n",
    "        loss_train = 0\n",
    "        loss_test = 0\n",
    "        accuracy_train = 0\n",
    "        accuracy_test = 0\n",
    "\n",
    "        print(\"epoch: {}\\t\".format(epoch), end=\"\")\n",
    "\n",
    "        # Training\n",
    "        num_batches = X_train.shape[0] // BATCH_SIZE\n",
    "        for b in range(num_batches):\n",
    "            x_batch, y_batch = next(train_batch_generator)\n",
    "            seq_len = np.array([list(x).index(0) + 1 for x in x_batch])  # actual lengths of sequences\n",
    "            loss_tr, acc, _ = sess.run([loss, accuracy, optimizer],\n",
    "                                       feed_dict={batch_ph: x_batch,\n",
    "                                                  target_ph: y_batch,\n",
    "                                                  seq_len_ph: seq_len,\n",
    "                                                  keep_prob_ph: KEEP_PROB})\n",
    "            accuracy_train += acc\n",
    "            loss_train = loss_tr * DELTA + loss_train * (1 - DELTA)\n",
    "        accuracy_train /= num_batches\n",
    "\n",
    "        # Testing\n",
    "        num_batches = X_test.shape[0] // BATCH_SIZE\n",
    "        for b in range(num_batches):\n",
    "            x_batch, y_batch = next(test_batch_generator)\n",
    "            seq_len = np.array([list(x).index(0) + 1 for x in x_batch])  # actual lengths of sequences\n",
    "            loss_test_batch, acc = sess.run([loss, accuracy],\n",
    "                                            feed_dict={batch_ph: x_batch,\n",
    "                                                       target_ph: y_batch,\n",
    "                                                       seq_len_ph: seq_len,\n",
    "                                                       keep_prob_ph: 1.0})\n",
    "            accuracy_test += acc\n",
    "            loss_test += loss_test_batch\n",
    "        accuracy_test /= num_batches\n",
    "        loss_test /= num_batches\n",
    "\n",
    "        print(\"loss: {:.3f}, val_loss: {:.3f}, acc: {:.3f}, val_acc: {:.3f}\".format(\n",
    "            loss_train, loss_test, accuracy_train, accuracy_test\n",
    "        ))\n",
    "    saver.save(sess, \"model/test-rnn\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "97"
      ]
     },
     "execution_count": 96,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_test.shape[0] // BATCH_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "24832"
      ]
     },
     "execution_count": 97,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "num_batches * 256"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Visualize Attention output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "saver = tf.train.import_meta_graph(\"model/test-rnn.meta\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from model/test-rnn\n"
     ]
    }
   ],
   "source": [
    "### calculate alpha coefficients for the first test example\n",
    "with tf.Session() as sess:\n",
    "    saver.restore(sess, \"model/test-rnn\")\n",
    "    x_batch_test, y_batch_test = X_test[:1], y_test[:1] # or sum((x_batch_test != 0)[0]) + 1\n",
    "    seq_len_test = np.array([list(x).index(0) + 1 for x in x_batch_test])\n",
    "    alphas_test = sess.run([alphas], feed_dict={batch_ph: x_batch_test, target_ph: y_batch_test,\n",
    "                                                seq_len_ph: seq_len_test, keep_prob_ph: 1.0})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "alphas_values = alphas_test[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1])"
      ]
     },
     "execution_count": 92,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_batch_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Mapping from word to index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build correct mapping from word to index and inverse\n",
    "original_word_index = imdb.get_word_index()\n",
    "word_index = {word:index + INDEX_FROM for word, index in original_word_index.items()}\n",
    "word_index[\":PAD:\"] = 0\n",
    "word_index[\":START:\"] = 1\n",
    "word_index[\":UNK:\"] = 2\n",
    "index_word = {value:key for key,value in word_index.items()}\n",
    "# Represent the sample by words rather than indices\n",
    "words = list(map(index_word.get, x_batch_test[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([   1,   89,   27,    2, 9289,   17,  199,  132,    5, 4191,   16,\n",
       "       1339,   24,    8,  760,    4, 1385,    7,    4,   22, 1368,    2,\n",
       "         16, 5149,   17, 1635,    7,    2, 1368,    9,    4, 1357,    8,\n",
       "         14,  991,   13,  877,   38,   19,   27,  239,   13,  100,  235,\n",
       "         61,  483,    2,    4,    7,    4,   20,  131, 1102,   72,    8,\n",
       "         14,  251,   27, 1146,    7,  308,   16,  735, 1517,   17,   29,\n",
       "        144,   28,   77, 2305,   18,   12,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0])"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_batch_test[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(9286, 9289)"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "original_word_index[\"evolved\"],word_index[\"evolved\"] # difference of 3 because of the special tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "88584"
      ]
     },
     "execution_count": 91,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(original_word_index)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[':START:',\n",
       " 'how',\n",
       " 'his',\n",
       " ':UNK:',\n",
       " 'evolved',\n",
       " 'as',\n",
       " 'both',\n",
       " 'man',\n",
       " 'and',\n",
       " 'ape']"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "words[:10]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Save Visualization as HTML"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save visualization as HTML\n",
    "with open(\"visualization.html\", \"w\") as html_file:\n",
    "    for word, alpha in zip(words, alphas_values / alphas_values.max()):\n",
    "        if word == \":START:\":\n",
    "            continue\n",
    "        elif word == \":PAD:\":\n",
    "            break\n",
    "        html_file.write('<font style=\"background: rgba(255, 255, 0, %f)\">%s</font>\\n' % (alpha, word))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<font style=\"background: rgba(255, 255, 0, 0.137532)\">how</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.244724)\">his</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.209821)\">:UNK:</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.309763)\">evolved</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.257780)\">as</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.038192)\">both</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.022234)\">man</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.163844)\">and</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.054250)\">ape</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.098180)\">was</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.036675)\">outstanding</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.010458)\">not</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.018669)\">to</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.187986)\">mention</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.204342)\">the</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.279103)\">scenery</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.248269)\">of</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.214707)\">the</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.487835)\">film</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.126003)\">christopher</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.084002)\">:UNK:</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.349048)\">was</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.124762)\">astonishing</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.094618)\">as</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.107658)\">lord</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.133789)\">of</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.023283)\">:UNK:</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.017145)\">christopher</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.125181)\">is</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.339479)\">the</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.232825)\">soul</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.092834)\">to</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.592080)\">this</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.205868)\">masterpiece</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.159037)\">i</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.053323)\">became</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.139376)\">so</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.075827)\">with</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.042023)\">his</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.012152)\">performance</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.022969)\">i</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.088159)\">could</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.075313)\">feel</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.070331)\">my</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.261460)\">heart</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.173020)\">:UNK:</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.523096)\">the</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.963473)\">of</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.716253)\">the</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.857790)\">movie</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.230923)\">still</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.219952)\">moves</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.097631)\">me</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.035794)\">to</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.283783)\">this</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.140937)\">day</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.334726)\">his</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.417742)\">portrayal</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 1.000000)\">of</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.975759)\">john</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.965445)\">was</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.448975)\">oscar</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.084113)\">worthy</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.068766)\">as</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.019348)\">he</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.035080)\">should</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.018581)\">have</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.011531)\">been</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.013981)\">nominated</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.013575)\">for</font>\n",
       "<font style=\"background: rgba(255, 255, 0, 0.037687)\">it</font>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "from IPython.core.display import display, HTML\n",
    "import codecs\n",
    "display(HTML(codecs.open(\"visualization.html\",'r').read()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
