{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "D7tqLMoKF6uq"
   },
   "source": [
    "Deep Learning\n",
    "=============\n",
    "\n",
    "Assignment 6\n",
    "------------\n",
    "\n",
    "After training a skip-gram model in `5_word2vec.ipynb`, the goal of this notebook is to train a LSTM character model over [Text8](http://mattmahoney.net/dc/textdata) data."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "MvEblsgEXxrd"
   },
   "outputs": [],
   "source": [
    "# These are all the modules we'll be using later. Make sure you can import them\n",
    "# before proceeding further.\n",
    "from __future__ import print_function\n",
    "import os\n",
    "import numpy as np\n",
    "import random\n",
    "import string\n",
    "import tensorflow as tf\n",
    "import zipfile\n",
    "from six.moves import range\n",
    "from six.moves.urllib.request import urlretrieve"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 5993,
     "status": "ok",
     "timestamp": 1445965582896,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "RJ-o3UBUFtCw",
    "outputId": "d530534e-0791-4a94-ca6d-1c8f1b908a9e"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found and verified text8.zip\n"
     ]
    }
   ],
   "source": [
    "url = 'http://mattmahoney.net/dc/'\n",
    "\n",
    "def maybe_download(filename, expected_bytes):\n",
    "  \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n",
    "  if not os.path.exists(filename):\n",
    "    filename, _ = urlretrieve(url + filename, filename)\n",
    "  statinfo = os.stat(filename)\n",
    "  if statinfo.st_size == expected_bytes:\n",
    "    print('Found and verified %s' % filename)\n",
    "  else:\n",
    "    print(statinfo.st_size)\n",
    "    raise Exception(\n",
    "      'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
    "  return filename\n",
    "\n",
    "filename = maybe_download('text8.zip', 31344016)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 5982,
     "status": "ok",
     "timestamp": 1445965582916,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "Mvf09fjugFU_",
    "outputId": "8f75db58-3862-404b-a0c3-799380597390"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Data size 100000000\n"
     ]
    }
   ],
   "source": [
    "def read_data(filename):\n",
    "  with zipfile.ZipFile(filename) as f:\n",
    "    name = f.namelist()[0]\n",
    "    data = tf.compat.as_str(f.read(name))\n",
    "  return data\n",
    "  \n",
    "text = read_data(filename)\n",
    "print('Data size %d' % len(text))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "ga2CYACE-ghb"
   },
   "source": [
    "Create a small validation set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 6184,
     "status": "ok",
     "timestamp": 1445965583138,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "w-oBpfFG-j43",
    "outputId": "bdb96002-d021-4379-f6de-a977924f0d02"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "99999000 ons anarchists advocate social relations based upon voluntary as\n",
      "1000  anarchism originated as a term of abuse first used against earl\n"
     ]
    }
   ],
   "source": [
    "valid_size = 1000\n",
    "valid_text = text[:valid_size]\n",
    "train_text = text[valid_size:]\n",
    "train_size = len(train_text)\n",
    "print(train_size, train_text[:64])\n",
    "print(valid_size, valid_text[:64])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "Zdw6i4F8glpp"
   },
   "source": [
    "Utility functions to map characters to vocabulary IDs and back."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 6276,
     "status": "ok",
     "timestamp": 1445965583249,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "gAL1EECXeZsD",
    "outputId": "88fc9032-feb9-45ff-a9a0-a26759cc1f2e"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Unexpected character: ï\n",
      "1 26 0 0\n",
      "a z  \n"
     ]
    }
   ],
   "source": [
    "vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '\n",
    "first_letter = ord(string.ascii_lowercase[0])\n",
    "\n",
    "def char2id(char):\n",
    "  if char in string.ascii_lowercase:\n",
    "    return ord(char) - first_letter + 1\n",
    "  elif char == ' ':\n",
    "    return 0\n",
    "  else:\n",
    "    print('Unexpected character: %s' % char)\n",
    "    return 0\n",
    "  \n",
    "def id2char(dictid):\n",
    "  if dictid > 0:\n",
    "    return chr(dictid + first_letter - 1)\n",
    "  else:\n",
    "    return ' '\n",
    "\n",
    "print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))\n",
    "print(id2char(1), id2char(26), id2char(0))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "lFwoyygOmWsL"
   },
   "source": [
    "Function to generate a training batch for the LSTM model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 6473,
     "status": "ok",
     "timestamp": 1445965583467,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "d9wMtjy5hCj9",
    "outputId": "3dd79c80-454a-4be0-8b71-4a4a357b3367"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['ons anarchi', 'when milita', 'lleria arch', ' abbeys and', 'married urr', 'hel and ric', 'y and litur', 'ay opened f', 'tion from t', 'migration t', 'new york ot', 'he boeing s', 'e listed wi', 'eber has pr', 'o be made t', 'yer who rec', 'ore signifi', 'a fierce cr', ' two six ei', 'aristotle s', 'ity can be ', ' and intrac', 'tion of the', 'dy to pass ', 'f certain d', 'at it will ', 'e convince ', 'ent told hi', 'ampaign and', 'rver side s', 'ious texts ', 'o capitaliz', 'a duplicate', 'gh ann es d', 'ine january', 'ross zero t', 'cal theorie', 'ast instanc', ' dimensiona', 'most holy m', 't s support', 'u is still ', 'e oscillati', 'o eight sub', 'of italy la', 's the tower', 'klahoma pre', 'erprise lin', 'ws becomes ', 'et in a naz', 'the fabian ', 'etchy to re', ' sharman ne', 'ised empero', 'ting in pol', 'd neo latin', 'th risky ri', 'encyclopedi', 'fense the a', 'duating fro', 'treet grid ', 'ations more', 'appeal of d', 'si have mad']\n",
      "['ists advoca', 'ary governm', 'hes nationa', 'd monasteri', 'raca prince', 'chard baer ', 'rgical lang', 'for passeng', 'the nationa', 'took place ', 'ther well k', 'seven six s', 'ith a gloss', 'robably bee', 'to recogniz', 'ceived the ', 'icant than ', 'ritic of th', 'ight in sig', 's uncaused ', ' lost as in', 'cellular ic', 'e size of t', ' him a stic', 'drugs confu', ' take to co', ' the priest', 'im to name ', 'd barred at', 'standard fo', ' such as es', 'ze on the g', 'e of the or', 'd hiver one', 'y eight mar', 'the lead ch', 'es classica', 'ce the non ', 'al analysis', 'mormons bel', 't or at lea', ' disagreed ', 'ing system ', 'btypes base', 'anguages th', 'r commissio', 'ess one nin', 'nux suse li', ' the first ', 'zi concentr', ' society ne', 'elatively s', 'etworks sha', 'or hirohito', 'litical ini', 'n most of t', 'iskerdoo ri', 'ic overview', 'air compone', 'om acnm acc', ' centerline', 'e than any ', 'devotional ', 'de such dev']\n",
      "[' a']\n",
      "['an']\n"
     ]
    }
   ],
   "source": [
    "batch_size=64\n",
    "num_unrollings=10\n",
    "\n",
    "class BatchGenerator(object):\n",
    "  def __init__(self, text, batch_size, num_unrollings):\n",
    "    self._text = text\n",
    "    self._text_size = len(text)\n",
    "    self._batch_size = batch_size\n",
    "    self._num_unrollings = num_unrollings\n",
    "    segment = self._text_size // batch_size\n",
    "    self._cursor = [ offset * segment for offset in range(batch_size)]\n",
    "    self._last_batch = self._next_batch()\n",
    "  \n",
    "  def _next_batch(self):\n",
    "    \"\"\"Generate a single batch from the current cursor position in the data.\"\"\"\n",
    "    batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)\n",
    "    for b in range(self._batch_size):\n",
    "      batch[b, char2id(self._text[self._cursor[b]])] = 1.0\n",
    "      self._cursor[b] = (self._cursor[b] + 1) % self._text_size\n",
    "    return batch\n",
    "  \n",
    "  def next(self):\n",
    "    \"\"\"Generate the next array of batches from the data. The array consists of\n",
    "    the last batch of the previous array, followed by num_unrollings new ones.\n",
    "    \"\"\"\n",
    "    batches = [self._last_batch]\n",
    "    for step in range(self._num_unrollings):\n",
    "      batches.append(self._next_batch())\n",
    "    self._last_batch = batches[-1]\n",
    "    return batches\n",
    "\n",
    "def characters(probabilities):\n",
    "  \"\"\"Turn a 1-hot encoding or a probability distribution over the possible\n",
    "  characters back into its (most likely) character representation.\"\"\"\n",
    "  return [id2char(c) for c in np.argmax(probabilities, 1)]\n",
    "\n",
    "def batches2string(batches):\n",
    "  \"\"\"Convert a sequence of batches back into their (most likely) string\n",
    "  representation.\"\"\"\n",
    "  s = [''] * batches[0].shape[0]\n",
    "  for b in batches:\n",
    "    s = [''.join(x) for x in zip(s, characters(b))]\n",
    "  return s\n",
    "\n",
    "train_batches = BatchGenerator(train_text, batch_size, num_unrollings)\n",
    "valid_batches = BatchGenerator(valid_text, 1, 1)\n",
    "\n",
    "print(batches2string(train_batches.next()))\n",
    "print(batches2string(train_batches.next()))\n",
    "print(batches2string(valid_batches.next()))\n",
    "print(batches2string(valid_batches.next()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "I always find useful to display the shape or the content of the variables to better understand their structure:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(64, 27)\n",
      "1562484\n",
      "26\n",
      "[[ 0.  0.  0.  0.]\n",
      " [ 0.  0.  0.  0.]]\n"
     ]
    }
   ],
   "source": [
    "print(train_batches.next()[1].shape)\n",
    "print(len(train_text) // batch_size)\n",
    "print(len(string.ascii_lowercase))\n",
    "print(np.zeros(shape=(2, 4), dtype=np.float))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "KyVd8FxT5QBc"
   },
   "outputs": [],
   "source": [
    "def logprob(predictions, labels):\n",
    "  \"\"\"Log-probability of the true labels in a predicted batch.\"\"\"\n",
    "  predictions[predictions < 1e-10] = 1e-10\n",
    "  return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]\n",
    "\n",
    "def sample_distribution(distribution):\n",
    "  \"\"\"Sample one element from a distribution assumed to be an array of normalized\n",
    "  probabilities.\n",
    "  \"\"\"\n",
    "  r = random.uniform(0, 1)\n",
    "  s = 0\n",
    "  for i in range(len(distribution)):\n",
    "    s += distribution[i]\n",
    "    if s >= r:\n",
    "      return i\n",
    "  return len(distribution) - 1\n",
    "\n",
    "def sample(prediction):\n",
    "  \"\"\"Turn a (column) prediction into 1-hot encoded samples.\"\"\"\n",
    "  p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n",
    "  p[0, sample_distribution(prediction[0])] = 1.0\n",
    "  return p\n",
    "\n",
    "def random_distribution():\n",
    "  \"\"\"Generate a random column of probabilities.\"\"\"\n",
    "  b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n",
    "  return b/np.sum(b, 1)[:,None]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "K8f67YXaDr4C"
   },
   "source": [
    "Simple LSTM Model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "Q5rxZK6RDuGe"
   },
   "outputs": [],
   "source": [
    "num_nodes = 64\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "  \n",
    "  # Parameters:\n",
    "  # Input gate: input, previous output, and bias.\n",
    "  ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ib = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Forget gate: input, previous output, and bias.\n",
    "  fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  fb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Memory cell: input, state and bias.                             \n",
    "  cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  cb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Output gate: input, previous output, and bias.\n",
    "  ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ob = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Variables saving state across unrollings.\n",
    "  saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  # Classifier weights and biases.\n",
    "  w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
    "  b = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "  \n",
    "  # Definition of the cell computation.\n",
    "  def lstm_cell(i, o, state):\n",
    "    \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n",
    "    Note that in this formulation, we omit the various connections between the\n",
    "    previous state and the gates.\"\"\"\n",
    "    input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n",
    "    forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n",
    "    update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n",
    "    state = forget_gate * state + input_gate * tf.tanh(update)\n",
    "    output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n",
    "    return output_gate * tf.tanh(state), state\n",
    "\n",
    "  # Input data.\n",
    "  train_data = list()\n",
    "  for _ in range(num_unrollings + 1):\n",
    "    train_data.append(\n",
    "      tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n",
    "  train_inputs = train_data[:num_unrollings]\n",
    "  train_labels = train_data[1:]  # labels are inputs shifted by one time step.\n",
    "\n",
    "  # Unrolled LSTM loop.\n",
    "  outputs = list()\n",
    "  output = saved_output\n",
    "  state = saved_state\n",
    "  for i in train_inputs:\n",
    "    output, state = lstm_cell(i, output, state)\n",
    "    outputs.append(output)\n",
    "\n",
    "  # State saving across unrollings.\n",
    "  with tf.control_dependencies([saved_output.assign(output),\n",
    "                                saved_state.assign(state)]):\n",
    "    # Classifier.\n",
    "    logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
    "    loss = tf.reduce_mean(\n",
    "      tf.nn.softmax_cross_entropy_with_logits(\n",
    "        logits, tf.concat(0, train_labels)))\n",
    "\n",
    "  # Optimizer.\n",
    "  global_step = tf.Variable(0)\n",
    "  learning_rate = tf.train.exponential_decay(\n",
    "    10.0, global_step, 5000, 0.1, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "  gradients, v = zip(*optimizer.compute_gradients(loss))\n",
    "  gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n",
    "  optimizer = optimizer.apply_gradients(\n",
    "    zip(gradients, v), global_step=global_step)\n",
    "\n",
    "  # Predictions.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  \n",
    "  # Sampling and validation eval: batch 1, no unrolling.\n",
    "  sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])\n",
    "  saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  reset_sample_state = tf.group(\n",
    "    saved_sample_output.assign(tf.zeros([1, num_nodes])),\n",
    "    saved_sample_state.assign(tf.zeros([1, num_nodes])))\n",
    "  sample_output, sample_state = lstm_cell(\n",
    "    sample_input, saved_sample_output, saved_sample_state)\n",
    "  with tf.control_dependencies([saved_sample_output.assign(sample_output),\n",
    "                                saved_sample_state.assign(sample_state)]):\n",
    "    sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 41
      },
      {
       "item_id": 80
      },
      {
       "item_id": 126
      },
      {
       "item_id": 144
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 199909,
     "status": "ok",
     "timestamp": 1445965877333,
     "user": {
      "color": "#1FA15D",
      "displayName": "Vincent Vanhoucke",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "05076109866853157986",
      "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
      "sessionId": "6f6f07b359200c46",
      "userId": "102167687554210253930"
     },
     "user_tz": 420
    },
    "id": "RD9zQCZTEaEm",
    "outputId": "5e868466-2532-4545-ce35-b403cf5d9de6"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step 0: 3.299134 learning rate: 10.000000\n",
      "Minibatch perplexity: 27.09\n",
      "================================================================================\n",
      "mevttjgr t il  larmnwrdojvrpvgdiyybpgpxmcax rxnrjseysxaquxmuu eec vk  iuu  ayuck\n",
      "x ouvkoteofyoc mo uadcowx bwiodxfen ncnggkx  otarh zik crtomutrqkeqlsipsmnrr a u\n",
      "btdib tqkslaoeoguairoa xtgsiiagaoc  jkt xhunrmslsooge  n vuspliot aotmtmtiqbprrz\n",
      "cdyzl quipemeomapmnd hamozgtkoe mhnr lsbvdzlr  mx pbnl ak giyoapcm zfdfiyl nayz \n",
      "npnstkdidbmyurgthnybgm  eqcn ooo lmaqnfheclas qwevoxdiywzpinr odtcnrplow zidwjjc\n",
      "================================================================================\n",
      "Validation set perplexity: 20.16\n",
      "Average loss at step 100: 2.605366 learning rate: 10.000000\n",
      "Minibatch perplexity: 10.10\n",
      "Validation set perplexity: 10.63\n",
      "Average loss at step 200: 2.252164 learning rate: 10.000000\n",
      "Minibatch perplexity: 9.49\n",
      "Validation set perplexity: 9.15\n",
      "Average loss at step 300: 2.100869 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.66\n",
      "Validation set perplexity: 8.00\n",
      "Average loss at step 400: 1.996958 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.59\n",
      "Validation set perplexity: 7.57\n",
      "Average loss at step 500: 1.928817 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.17\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 600: 1.905662 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.23\n",
      "Validation set perplexity: 6.56\n",
      "Average loss at step 700: 1.855662 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.31\n",
      "Validation set perplexity: 6.32\n",
      "Average loss at step 800: 1.816159 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.17\n",
      "Validation set perplexity: 6.35\n",
      "Average loss at step 900: 1.823400 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.10\n",
      "Validation set perplexity: 6.06\n",
      "Average loss at step 1000: 1.817724 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.88\n",
      "================================================================================\n",
      "frics b a humplicage sperigeations hald an in to and hind of with aimbe and hay \n",
      "quary gramchil clittroming the stith in mustounk in a cecesey churised chir atpa\n",
      "dater tracity priffroaic vedich the d hitpress intovitha mand gow sevan cens dis\n",
      "ure virtrival in maker thei stend over he is apcort verdist heffetle pripprinad \n",
      "main wile stht apprising brefisifisheng phish the an x nulition dvengeen widenh \n",
      "================================================================================\n",
      "Validation set perplexity: 5.84\n",
      "Average loss at step 1100: 1.770611 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.70\n",
      "Validation set perplexity: 5.65\n",
      "Average loss at step 1200: 1.744925 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.08\n",
      "Validation set perplexity: 5.33\n",
      "Average loss at step 1300: 1.724434 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.78\n",
      "Validation set perplexity: 5.45\n",
      "Average loss at step 1400: 1.732781 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.69\n",
      "Validation set perplexity: 5.34\n",
      "Average loss at step 1500: 1.732636 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.08\n",
      "Validation set perplexity: 5.21\n",
      "Average loss at step 1600: 1.739676 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.13\n",
      "Validation set perplexity: 5.20\n",
      "Average loss at step 1700: 1.706354 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.52\n",
      "Validation set perplexity: 5.26\n",
      "Average loss at step 1800: 1.672674 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.14\n",
      "Validation set perplexity: 5.05\n",
      "Average loss at step 1900: 1.648071 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.97\n",
      "Validation set perplexity: 5.10\n",
      "Average loss at step 2000: 1.684935 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.79\n",
      "================================================================================\n",
      "boll stilled hersequita cauding heemater knowe mosters the piblinet and universe\n",
      "entins boble comerion commeteping filmy the user the tolaking chrest the reluted\n",
      "lic four pa dingul one nine eight four nine vived three nine ang the missed east\n",
      "s the relies ma houlds mengued wers about the the magity for the the milers for \n",
      "st of culine are american duking yernia chrest beam both chyss of prebe as bongo\n",
      "================================================================================\n",
      "Validation set perplexity: 4.94\n",
      "Average loss at step 2100: 1.680651 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.10\n",
      "Validation set perplexity: 4.78\n",
      "Average loss at step 2200: 1.673425 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 4.92\n",
      "Average loss at step 2300: 1.638613 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.70\n",
      "Validation set perplexity: 4.71\n",
      "Average loss at step 2400: 1.657819 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.23\n",
      "Validation set perplexity: 4.73\n",
      "Average loss at step 2500: 1.680447 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.68\n",
      "Validation set perplexity: 4.63\n",
      "Average loss at step 2600: 1.652392 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.77\n",
      "Validation set perplexity: 4.69\n",
      "Average loss at step 2700: 1.655129 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.30\n",
      "Validation set perplexity: 4.66\n",
      "Average loss at step 2800: 1.646930 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.83\n",
      "Validation set perplexity: 4.56\n",
      "Average loss at step 2900: 1.646308 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.55\n",
      "Validation set perplexity: 4.58\n",
      "Average loss at step 3000: 1.646835 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.66\n",
      "================================================================================\n",
      "zality the sezare lation dialton on houl lan and in three wone insoration is thr\n",
      " ray in hovesen with a nations the one nithe five one nine two six nine three x \n",
      "ribsly maching and the loaked and as at berchulative he white cocrorage and ager\n",
      "ve pater later the norrangea which the ctrime divided deathnort two two eccetral\n",
      "x fotractions monto die and a as ral featurera nine in two nine two inctipulty h\n",
      "================================================================================\n",
      "Validation set perplexity: 4.66\n",
      "Average loss at step 3100: 1.626374 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.74\n",
      "Validation set perplexity: 4.63\n",
      "Average loss at step 3200: 1.638042 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.79\n",
      "Validation set perplexity: 4.60\n",
      "Average loss at step 3300: 1.635808 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.95\n",
      "Validation set perplexity: 4.50\n",
      "Average loss at step 3400: 1.662044 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.25\n",
      "Validation set perplexity: 4.60\n",
      "Average loss at step 3500: 1.652251 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.70\n",
      "Validation set perplexity: 4.64\n",
      "Average loss at step 3600: 1.665707 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.81\n",
      "Validation set perplexity: 4.58\n",
      "Average loss at step 3700: 1.637895 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.39\n",
      "Validation set perplexity: 4.63\n",
      "Average loss at step 3800: 1.640260 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 4.50\n",
      "Average loss at step 3900: 1.634847 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.02\n",
      "Validation set perplexity: 4.56\n",
      "Average loss at step 4000: 1.647302 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.64\n",
      "================================================================================\n",
      "da in first suppretions a vectors with requated pyowing arreen the relations sos\n",
      " rages as ses of alroh his namb to bellition febri for foll was evropiliation wa\n",
      " zero zero zero zero zero two four zero feistrible the vouption zerezserfat rent\n",
      "jation antchary constant for whimis allo rightorage wolls or greatura guilt mupa\n",
      "dust in the a currerce untorchs staristly or the ontone juke evidenting orlan pa\n",
      "================================================================================\n",
      "Validation set perplexity: 4.50\n",
      "Average loss at step 4100: 1.629127 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.30\n",
      "Validation set perplexity: 4.68\n",
      "Average loss at step 4200: 1.633017 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.81\n",
      "Validation set perplexity: 4.44\n",
      "Average loss at step 4300: 1.615406 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.13\n",
      "Validation set perplexity: 4.55\n",
      "Average loss at step 4400: 1.604910 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 4.35\n",
      "Average loss at step 4500: 1.613096 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.22\n",
      "Validation set perplexity: 4.55\n",
      "Average loss at step 4600: 1.610029 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.99\n",
      "Validation set perplexity: 4.47\n",
      "Average loss at step 4700: 1.622415 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.11\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 4800: 1.629460 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.00\n",
      "Validation set perplexity: 4.39\n",
      "Average loss at step 4900: 1.633675 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.09\n",
      "Validation set perplexity: 4.46\n",
      "Average loss at step 5000: 1.604236 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.36\n",
      "================================================================================\n",
      "med to will suld the syptanks a death sitil posses of grows one six six zero and\n",
      "hes and millbura ex coted scenturus skulturery s dewailding yaiut prosed by the \n",
      "x of at rullic with that there and winster reades glanded by the gromai pyirate \n",
      "sport fourty to antide gaee frind resir nam qoepboth for nature which however to\n",
      "zed roilinizu from the macgion to indstandard socad origin blect ps s low elemes\n",
      "================================================================================\n",
      "Validation set perplexity: 4.54\n",
      "Average loss at step 5100: 1.604379 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 4.34\n",
      "Average loss at step 5200: 1.589715 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 4.23\n",
      "Average loss at step 5300: 1.577970 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 4.22\n",
      "Average loss at step 5400: 1.578119 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.13\n",
      "Validation set perplexity: 4.20\n",
      "Average loss at step 5500: 1.558165 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.94\n",
      "Validation set perplexity: 4.18\n",
      "Average loss at step 5600: 1.578970 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.55\n",
      "Validation set perplexity: 4.19\n",
      "Average loss at step 5700: 1.565745 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.66\n",
      "Validation set perplexity: 4.18\n",
      "Average loss at step 5800: 1.581570 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.03\n",
      "Validation set perplexity: 4.19\n",
      "Average loss at step 5900: 1.572714 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 4.20\n",
      "Average loss at step 6000: 1.544598 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.99\n",
      "================================================================================\n",
      "cainoptoge for ppplinaht purchibe prowects officely words of a economic proposed\n",
      "very most the life of ited his six grauguts them treage law speed and johny one \n",
      "x withi mavitos see since emperor used that send celen ed precerso brother danch\n",
      "ficually has as one eibht year pearian foree by founumed atherition of till cons\n",
      "when injuptically them nectif high one five wn nzedraw rock of compopic than inc\n",
      "================================================================================\n",
      "Validation set perplexity: 4.20\n",
      "Average loss at step 6100: 1.559681 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.16\n",
      "Validation set perplexity: 4.20\n",
      "Average loss at step 6200: 1.534572 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.25\n",
      "Validation set perplexity: 4.21\n",
      "Average loss at step 6300: 1.543815 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.78\n",
      "Validation set perplexity: 4.17\n",
      "Average loss at step 6400: 1.540488 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.08\n",
      "Validation set perplexity: 4.15\n",
      "Average loss at step 6500: 1.556721 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.61\n",
      "Validation set perplexity: 4.16\n",
      "Average loss at step 6600: 1.593627 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.57\n",
      "Validation set perplexity: 4.15\n",
      "Average loss at step 6700: 1.573838 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.43\n",
      "Validation set perplexity: 4.15\n",
      "Average loss at step 6800: 1.605053 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.93\n",
      "Validation set perplexity: 4.17\n",
      "Average loss at step 6900: 1.583883 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.60\n",
      "Validation set perplexity: 4.21\n",
      "Average loss at step 7000: 1.569005 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.78\n",
      "================================================================================\n",
      "ck such ic ways consusce the againster frendures his to the century and deng and\n",
      "wing one nine zero one seven two j dir when courly in panharso durini zero zero \n",
      "cottury on the into varch an one five zero four five usung of plumsium one soxic\n",
      "ries there his firfler there pribrially fainel all weblyckary fferan is and vay \n",
      "nt contentance word life the blitius part maticcla of barla how are name briti o\n",
      "================================================================================\n",
      "Validation set perplexity: 4.19\n"
     ]
    }
   ],
   "source": [
    "num_steps = 7001\n",
    "summary_frequency = 100\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print('Initialized')\n",
    "  mean_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    batches = train_batches.next()\n",
    "    feed_dict = dict()\n",
    "    for i in range(num_unrollings + 1):\n",
    "      feed_dict[train_data[i]] = batches[i]\n",
    "    _, l, predictions, lr = session.run(\n",
    "      [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n",
    "    mean_loss += l\n",
    "    if step % summary_frequency == 0:\n",
    "      if step > 0:\n",
    "        mean_loss = mean_loss / summary_frequency\n",
    "      # The mean loss is an estimate of the loss over the last few batches.\n",
    "      print(\n",
    "        'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n",
    "      mean_loss = 0\n",
    "      labels = np.concatenate(list(batches)[1:])\n",
    "      print('Minibatch perplexity: %.2f' % float(\n",
    "        np.exp(logprob(predictions, labels))))\n",
    "      if step % (summary_frequency * 10) == 0:\n",
    "        # Generate some samples.\n",
    "        print('=' * 80)\n",
    "        for _ in range(5):\n",
    "          feed = sample(random_distribution())\n",
    "          sentence = characters(feed)[0]\n",
    "          reset_sample_state.run()\n",
    "          for _ in range(79):\n",
    "            prediction = sample_prediction.eval({sample_input: feed})\n",
    "            feed = sample(prediction)\n",
    "            sentence += characters(feed)[0]\n",
    "          print(sentence)\n",
    "        print('=' * 80)\n",
    "      # Measure validation set perplexity.\n",
    "      reset_sample_state.run()\n",
    "      valid_logprob = 0\n",
    "      for _ in range(valid_size):\n",
    "        b = valid_batches.next()\n",
    "        predictions = sample_prediction.eval({sample_input: b[0]})\n",
    "        valid_logprob = valid_logprob + logprob(predictions, b[1])\n",
    "      print('Validation set perplexity: %.2f' % float(np.exp(\n",
    "        valid_logprob / valid_size)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "pl4vtmFfa5nn"
   },
   "source": [
    "---\n",
    "Problem 1\n",
    "---------\n",
    "\n",
    "You might have noticed that the definition of the LSTM cell involves 4 matrix multiplications with the input, and 4 matrix multiplications with the output. Simplify the expression by using a single matrix multiply for each, and variables that are 4 times larger.\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_nodes = 64\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "  \n",
    "  # Parameters:\n",
    "  # Input gate: input, previous output, and bias.\n",
    "  ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ib = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Forget gate: input, previous output, and bias.\n",
    "  fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  fb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Memory cell: input, state and bias.                             \n",
    "  cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  cb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Output gate: input, previous output, and bias.\n",
    "  ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
    "  om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ob = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Concatenate parameters  \n",
    "  sx = tf.concat(1, [ix, fx, cx, ox])\n",
    "  sm = tf.concat(1, [im, fm, cm, om])\n",
    "  sb = tf.concat(1, [ib, fb, cb, ob])\n",
    "  # Variables saving state across unrollings.\n",
    "  saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  # Classifier weights and biases.\n",
    "  w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
    "  b = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "  \n",
    "  # Definition of the cell computation.\n",
    "  def lstm_cell(i, o, state):\n",
    "    \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n",
    "    Note that in this formulation, we omit the various connections between the\n",
    "    previous state and the gates.\"\"\"\n",
    "    smatmul = tf.matmul(i, sx) + tf.matmul(o, sm) + sb\n",
    "    smatmul_input, smatmul_forget, update, smatmul_output = tf.split(1, 4, smatmul)\n",
    "    input_gate = tf.sigmoid(smatmul_input)\n",
    "    forget_gate = tf.sigmoid(smatmul_forget)\n",
    "    output_gate = tf.sigmoid(smatmul_output)\n",
    "    #input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n",
    "    #forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n",
    "    #update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n",
    "    state = forget_gate * state + input_gate * tf.tanh(update)\n",
    "    #output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n",
    "    return output_gate * tf.tanh(state), state\n",
    "\n",
    "  # Input data.\n",
    "  train_data = list()\n",
    "  for _ in range(num_unrollings + 1):\n",
    "    train_data.append(\n",
    "      tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n",
    "  train_inputs = train_data[:num_unrollings]\n",
    "  train_labels = train_data[1:]  # labels are inputs shifted by one time step.\n",
    "\n",
    "  # Unrolled LSTM loop.\n",
    "  outputs = list()\n",
    "  output = saved_output\n",
    "  state = saved_state\n",
    "  for i in train_inputs:\n",
    "    output, state = lstm_cell(i, output, state)\n",
    "    outputs.append(output)\n",
    "\n",
    "  # State saving across unrollings.\n",
    "  with tf.control_dependencies([saved_output.assign(output),\n",
    "                                saved_state.assign(state)]):\n",
    "    # Classifier.\n",
    "    logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
    "    loss = tf.reduce_mean(\n",
    "      tf.nn.softmax_cross_entropy_with_logits(\n",
    "        logits, tf.concat(0, train_labels)))\n",
    "\n",
    "  # Optimizer.\n",
    "  global_step = tf.Variable(0)\n",
    "  learning_rate = tf.train.exponential_decay(\n",
    "    10.0, global_step, 5000, 0.1, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "  gradients, v = zip(*optimizer.compute_gradients(loss))\n",
    "  gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n",
    "  optimizer = optimizer.apply_gradients(\n",
    "    zip(gradients, v), global_step=global_step)\n",
    "\n",
    "  # Predictions.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  \n",
    "  # Sampling and validation eval: batch 1, no unrolling.\n",
    "  sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])\n",
    "  saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  reset_sample_state = tf.group(\n",
    "    saved_sample_output.assign(tf.zeros([1, num_nodes])),\n",
    "    saved_sample_state.assign(tf.zeros([1, num_nodes])))\n",
    "  sample_output, sample_state = lstm_cell(\n",
    "    sample_input, saved_sample_output, saved_sample_state)\n",
    "  with tf.control_dependencies([saved_sample_output.assign(sample_output),\n",
    "                                saved_sample_state.assign(sample_state)]):\n",
    "    sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step 0: 3.292809 learning rate: 10.000000\n",
      "Minibatch perplexity: 26.92\n",
      "================================================================================\n",
      "geluusapn nw nvl els dfubselm xx pkwgna nne enhfjes icn ijrynfbxdgjezesovurzkcgx\n",
      "pa p  ccwhcihnnoidhubbzrvjrzptfiebap oirkkuu tliripfaa  eerm pztwq uxhe  ccqwibz\n",
      "hemzfeefysz s zzzhomuwmbehn  uosnocrapglltwdkhukwihaddmb n dezhgaietttloeceugaet\n",
      "iddscxe rab fsnnne osa ai hcorauc  htim zu xit fshodijzh  qewnqaofosaee rftntloh\n",
      "mwlayong q qd no ena tiha igg td taiml ud xl reeni gzesvet r e pec eu eejciqs fu\n",
      "================================================================================\n",
      "Validation set perplexity: 20.26\n",
      "Average loss at step 100: 2.585184 learning rate: 10.000000\n",
      "Minibatch perplexity: 9.96\n",
      "Validation set perplexity: 10.57\n",
      "Average loss at step 200: 2.264041 learning rate: 10.000000\n",
      "Minibatch perplexity: 9.51\n",
      "Validation set perplexity: 9.16\n",
      "Average loss at step 300: 2.095917 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.63\n",
      "Validation set perplexity: 8.24\n",
      "Average loss at step 400: 2.039169 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.00\n",
      "Validation set perplexity: 7.93\n",
      "Average loss at step 500: 1.983244 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.82\n",
      "Validation set perplexity: 7.33\n",
      "Average loss at step 600: 1.897832 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.41\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 700: 1.880298 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.25\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 800: 1.870571 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.73\n",
      "Validation set perplexity: 6.66\n",
      "Average loss at step 900: 1.848727 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.72\n",
      "Validation set perplexity: 6.53\n",
      "Average loss at step 1000: 1.846101 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.15\n",
      "================================================================================\n",
      "f f nines of through to argusies emparden wh ander by one non wasties theauls gr\n",
      "mading an excemmised works wiled c m roon ppilices bield exvelocy the pittackess\n",
      "y rasing andseps was a mamase beglighte brtruze is ik the one nine eighte now pl\n",
      "on and frow nive twons sour one one eight is haluge ase atters is and with while\n",
      "y beare of is kirn nivedentar port maaie to premave quennench theyip to ated the\n",
      "================================================================================\n",
      "Validation set perplexity: 6.15\n",
      "Average loss at step 1100: 1.802636 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.72\n",
      "Validation set perplexity: 6.29\n",
      "Average loss at step 1200: 1.775563 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.99\n",
      "Validation set perplexity: 6.00\n",
      "Average loss at step 1300: 1.763369 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.80\n",
      "Validation set perplexity: 6.03\n",
      "Average loss at step 1400: 1.765718 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.39\n",
      "Validation set perplexity: 5.79\n",
      "Average loss at step 1500: 1.751730 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.56\n",
      "Validation set perplexity: 5.63\n",
      "Average loss at step 1600: 1.736716 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.19\n",
      "Validation set perplexity: 5.98\n",
      "Average loss at step 1700: 1.717477 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.92\n",
      "Validation set perplexity: 5.74\n",
      "Average loss at step 1800: 1.692444 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.41\n",
      "Validation set perplexity: 5.51\n",
      "Average loss at step 1900: 1.696289 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.77\n",
      "Validation set perplexity: 5.50\n",
      "Average loss at step 2000: 1.682509 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.69\n",
      "================================================================================\n",
      " gait unarted impreated zartitical of nasietign is follower ponstecue lpentirate\n",
      "d of both in actional theny will jecceve two zero one nine seven and bot its mut\n",
      "for even nine eight four nine news fileions white compatiu but alsumany of new s\n",
      "haven deexperent cormon ipapality compaits is guilt priction mountsent appliacte\n",
      "quiters leteration thesipati not nife holactial and infortacy stas and passions \n",
      "================================================================================\n",
      "Validation set perplexity: 5.52\n",
      "Average loss at step 2100: 1.690326 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.89\n",
      "Validation set perplexity: 5.32\n",
      "Average loss at step 2200: 1.707654 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 5.34\n",
      "Average loss at step 2300: 1.708515 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.65\n",
      "Validation set perplexity: 5.26\n",
      "Average loss at step 2400: 1.688452 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.66\n",
      "Validation set perplexity: 5.29\n",
      "Average loss at step 2500: 1.691969 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.41\n",
      "Validation set perplexity: 5.29\n",
      "Average loss at step 2600: 1.675465 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.26\n",
      "Validation set perplexity: 5.08\n",
      "Average loss at step 2700: 1.683740 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.51\n",
      "Validation set perplexity: 5.23\n",
      "Average loss at step 2800: 1.678843 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.52\n",
      "Validation set perplexity: 5.31\n",
      "Average loss at step 2900: 1.679395 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.38\n",
      "Validation set perplexity: 5.33\n",
      "Average loss at step 3000: 1.683964 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.13\n",
      "================================================================================\n",
      "quiter which wrade typer justine life the miscubel e claush of time in the playe\n",
      "re be araly engrovorish africp and play allosenting two us plarine is us tourd c\n",
      "s the flama but to activerption wnglitenies part approdency tare of a emisran er\n",
      "ther the light of the somebory and suge was to rakilh resive nets was aicare gov\n",
      "x betiss was one zero seven the gomeloniohy by one eight six sourher to marintur\n",
      "================================================================================\n",
      "Validation set perplexity: 5.14\n",
      "Average loss at step 3100: 1.654920 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.70\n",
      "Validation set perplexity: 5.21\n",
      "Average loss at step 3200: 1.635135 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.59\n",
      "Validation set perplexity: 5.10\n",
      "Average loss at step 3300: 1.649721 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.53\n",
      "Validation set perplexity: 4.95\n",
      "Average loss at step 3400: 1.634235 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.06\n",
      "Validation set perplexity: 5.06\n",
      "Average loss at step 3500: 1.675146 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 5.00\n",
      "Average loss at step 3600: 1.657772 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.15\n",
      "Validation set perplexity: 4.90\n",
      "Average loss at step 3700: 1.653864 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.60\n",
      "Validation set perplexity: 5.02\n",
      "Average loss at step 3800: 1.658857 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.36\n",
      "Validation set perplexity: 4.83\n",
      "Average loss at step 3900: 1.645414 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.18\n",
      "Validation set perplexity: 5.02\n",
      "Average loss at step 4000: 1.645288 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.40\n",
      "================================================================================\n",
      "g is related wrecholary congures indefdongrable states actuliting obdose stodic \n",
      "mestach is mittonity and city to ofsinus ald edrazally englise one one eight eig\n",
      "voll the woald voll as therer and bical was absonjation of to to octart zero eig\n",
      "ge one nine eight zuror french and a five seff eight and commulting the itsuloon\n",
      "ents one nine nine eight one zero zero one two seven four the began dismived one\n",
      "================================================================================\n",
      "Validation set perplexity: 4.81\n",
      "Average loss at step 4100: 1.618779 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.64\n",
      "Validation set perplexity: 4.73\n",
      "Average loss at step 4200: 1.617543 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.17\n",
      "Validation set perplexity: 4.81\n",
      "Average loss at step 4300: 1.620752 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.08\n",
      "Validation set perplexity: 4.79\n",
      "Average loss at step 4400: 1.608578 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.89\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 4.90\n",
      "Average loss at step 4500: 1.638335 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.26\n",
      "Validation set perplexity: 5.04\n",
      "Average loss at step 4600: 1.620638 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.83\n",
      "Validation set perplexity: 4.86\n",
      "Average loss at step 4700: 1.618412 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.91\n",
      "Validation set perplexity: 4.93\n",
      "Average loss at step 4800: 1.606662 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 4.99\n",
      "Average loss at step 4900: 1.620079 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.73\n",
      "Validation set perplexity: 4.72\n",
      "Average loss at step 5000: 1.612539 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.53\n",
      "================================================================================\n",
      "k in products begws to on the suspartting life states hall vers in which arewgra\n",
      "mag ned das of the fing attempt that to histolies in the oth prene but the exanc\n",
      "k anasuelling the pelected with succest leading islage of transpears to the lati\n",
      "jocateruth bying nobs a cating it himing the a provory a barkem support theirn t\n",
      "mang sadalic in all the fissims inatol vill of conemparion from theres day final\n",
      "================================================================================\n",
      "Validation set perplexity: 4.88\n",
      "Average loss at step 5100: 1.593567 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.17\n",
      "Validation set perplexity: 4.74\n",
      "Average loss at step 5200: 1.593920 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.07\n",
      "Validation set perplexity: 4.69\n",
      "Average loss at step 5300: 1.592884 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.50\n",
      "Validation set perplexity: 4.68\n",
      "Average loss at step 5400: 1.590831 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.93\n",
      "Validation set perplexity: 4.65\n",
      "Average loss at step 5500: 1.589737 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.91\n",
      "Validation set perplexity: 4.61\n",
      "Average loss at step 5600: 1.566664 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.60\n",
      "Validation set perplexity: 4.57\n",
      "Average loss at step 5700: 1.580782 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.56\n",
      "Validation set perplexity: 4.54\n",
      "Average loss at step 5800: 1.595972 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.24\n",
      "Validation set perplexity: 4.56\n",
      "Average loss at step 5900: 1.582445 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.67\n",
      "Validation set perplexity: 4.56\n",
      "Average loss at step 6000: 1.580435 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.65\n",
      "================================================================================\n",
      "z the from feature id chinang as an also hill is feike kho futher of the trog in\n",
      "c equally orlia as side starian that other area ciece conductly of unitle it are\n",
      "lemented their of had of sen god to the likeland event arountius recass and anda\n",
      "y dulared so this has has a gronrable determinalian sauler name image also play \n",
      "ther respects and but three wil intereatial industrian the the is s he v s antai\n",
      "================================================================================\n",
      "Validation set perplexity: 4.52\n",
      "Average loss at step 6100: 1.576850 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.32\n",
      "Validation set perplexity: 4.58\n",
      "Average loss at step 6200: 1.586168 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.84\n",
      "Validation set perplexity: 4.58\n",
      "Average loss at step 6300: 1.585624 learning rate: 1.000000\n",
      "Minibatch perplexity: 6.04\n",
      "Validation set perplexity: 4.58\n",
      "Average loss at step 6400: 1.569555 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.99\n",
      "Validation set perplexity: 4.59\n",
      "Average loss at step 6500: 1.555985 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 4.60\n",
      "Average loss at step 6600: 1.599542 learning rate: 1.000000\n",
      "Minibatch perplexity: 6.31\n",
      "Validation set perplexity: 4.59\n",
      "Average loss at step 6700: 1.565493 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.80\n",
      "Validation set perplexity: 4.57\n",
      "Average loss at step 6800: 1.574143 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.46\n",
      "Validation set perplexity: 4.61\n",
      "Average loss at step 6900: 1.569983 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.69\n",
      "Validation set perplexity: 4.57\n",
      "Average loss at step 7000: 1.587124 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.28\n",
      "================================================================================\n",
      "stes public came dispaniso tore english land influe consessart cat in other peac\n",
      "geture by ded honomention the purpion protessions hothoral the disfins on other \n",
      "way and yus it lashmmery white have lost packed cuk laten pounce play of its rin\n",
      "cllation obdern in one two seven a site the currencheary this with sendertical t\n",
      " botan homect many some that closi in markex hass of the some cardications zero \n",
      "================================================================================\n",
      "Validation set perplexity: 4.55\n"
     ]
    }
   ],
   "source": [
    "num_steps = 7001\n",
    "summary_frequency = 100\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print('Initialized')\n",
    "  mean_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    batches = train_batches.next()\n",
    "    feed_dict = dict()\n",
    "    for i in range(num_unrollings + 1):\n",
    "      feed_dict[train_data[i]] = batches[i]\n",
    "    _, l, predictions, lr = session.run(\n",
    "      [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n",
    "    mean_loss += l\n",
    "    if step % summary_frequency == 0:\n",
    "      if step > 0:\n",
    "        mean_loss = mean_loss / summary_frequency\n",
    "      # The mean loss is an estimate of the loss over the last few batches.\n",
    "      print(\n",
    "        'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n",
    "      mean_loss = 0\n",
    "      labels = np.concatenate(list(batches)[1:])\n",
    "      print('Minibatch perplexity: %.2f' % float(\n",
    "        np.exp(logprob(predictions, labels))))\n",
    "      if step % (summary_frequency * 10) == 0:\n",
    "        # Generate some samples.\n",
    "        print('=' * 80)\n",
    "        for _ in range(5):\n",
    "          feed = sample(random_distribution())\n",
    "          sentence = characters(feed)[0]\n",
    "          reset_sample_state.run()\n",
    "          for _ in range(79):\n",
    "            prediction = sample_prediction.eval({sample_input: feed})\n",
    "            feed = sample(prediction)\n",
    "            sentence += characters(feed)[0]\n",
    "          print(sentence)\n",
    "        print('=' * 80)\n",
    "      # Measure validation set perplexity.\n",
    "      reset_sample_state.run()\n",
    "      valid_logprob = 0\n",
    "      for _ in range(valid_size):\n",
    "        b = valid_batches.next()\n",
    "        predictions = sample_prediction.eval({sample_input: b[0]})\n",
    "        valid_logprob = valid_logprob + logprob(predictions, b[1])\n",
    "      print('Validation set perplexity: %.2f' % float(np.exp(\n",
    "        valid_logprob / valid_size)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "4eErTCTybtph"
   },
   "source": [
    "---\n",
    "Problem 2\n",
    "---------\n",
    "\n",
    "We want to train a LSTM over bigrams, that is pairs of consecutive characters like 'ab' instead of single characters like 'a'. Since the number of possible bigrams is large, feeding them directly to the LSTM using 1-hot encodings will lead to a very sparse representation that is very wasteful computationally.\n",
    "\n",
    "a- Introduce an embedding lookup on the inputs, and feed the embeddings to the LSTM cell instead of the inputs themselves.\n",
    "\n",
    "b- Write a bigram-based LSTM, modeled on the character LSTM above.\n",
    "\n",
    "c- Introduce Dropout. For best practices on how to use Dropout in LSTMs, refer to this [article](http://arxiv.org/abs/1409.2329).\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's first adapt the LSTM for a single character input with embeddings. The feed_dict is unchanged, the embeddings are looked up from the inputs. Note that the output is an array probability for the possible characters, not an embedding."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "embedding_size = 128 # Dimension of the embedding vector.\n",
    "num_nodes = 64\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "  \n",
    "  # Parameters:\n",
    "  vocabulary_embeddings = tf.Variable(\n",
    "    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n",
    "  # Input gate: input, previous output, and bias.\n",
    "  ix = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ib = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Forget gate: input, previous output, and bias.\n",
    "  fx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  fb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Memory cell: input, state and bias.                             \n",
    "  cx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  cb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Output gate: input, previous output, and bias.\n",
    "  ox = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ob = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Variables saving state across unrollings.\n",
    "  saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  # Classifier weights and biases.\n",
    "  w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
    "  b = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "  \n",
    "  # Definition of the cell computation.\n",
    "  def lstm_cell(i, o, state):\n",
    "    \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n",
    "    Note that in this formulation, we omit the various connections between the\n",
    "    previous state and the gates.\"\"\"\n",
    "    input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n",
    "    forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n",
    "    update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n",
    "    state = forget_gate * state + input_gate * tf.tanh(update)\n",
    "    output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n",
    "    return output_gate * tf.tanh(state), state\n",
    "\n",
    "  # Input data.\n",
    "  train_data = list()\n",
    "  for _ in range(num_unrollings + 1):\n",
    "    train_data.append(\n",
    "      tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n",
    "  train_inputs = train_data[:num_unrollings]\n",
    "  train_labels = train_data[1:]  # labels are inputs shifted by one time step.\n",
    "\n",
    "  # Unrolled LSTM loop.\n",
    "  outputs = list()\n",
    "  output = saved_output\n",
    "  state = saved_state\n",
    "  for i in train_inputs:\n",
    "    i_embed = tf.nn.embedding_lookup(vocabulary_embeddings, tf.argmax(i, dimension=1))\n",
    "    output, state = lstm_cell(i_embed, output, state)\n",
    "    outputs.append(output)\n",
    "\n",
    "  # State saving across unrollings.\n",
    "  with tf.control_dependencies([saved_output.assign(output),\n",
    "                                saved_state.assign(state)]):\n",
    "    # Classifier.\n",
    "    logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
    "    loss = tf.reduce_mean(\n",
    "      tf.nn.softmax_cross_entropy_with_logits(\n",
    "        logits, tf.concat(0, train_labels)))\n",
    "\n",
    "  # Optimizer.\n",
    "  global_step = tf.Variable(0)\n",
    "  learning_rate = tf.train.exponential_decay(\n",
    "    10.0, global_step, 5000, 0.1, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "  gradients, v = zip(*optimizer.compute_gradients(loss))\n",
    "  gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n",
    "  optimizer = optimizer.apply_gradients(\n",
    "    zip(gradients, v), global_step=global_step)\n",
    "\n",
    "  # Predictions.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  \n",
    "  # Sampling and validation eval: batch 1, no unrolling.\n",
    "  sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])\n",
    "  sample_input_embedding = tf.nn.embedding_lookup(vocabulary_embeddings, tf.argmax(sample_input, dimension=1))\n",
    "  saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  reset_sample_state = tf.group(\n",
    "    saved_sample_output.assign(tf.zeros([1, num_nodes])),\n",
    "    saved_sample_state.assign(tf.zeros([1, num_nodes])))\n",
    "  sample_output, sample_state = lstm_cell(\n",
    "    sample_input_embedding, saved_sample_output, saved_sample_state)\n",
    "  with tf.control_dependencies([saved_sample_output.assign(sample_output),\n",
    "                                saved_sample_state.assign(sample_state)]):\n",
    "    sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step 0: 3.302869 learning rate: 10.000000\n",
      "Minibatch perplexity: 27.19\n",
      "================================================================================\n",
      "jimwpnwccia f bnqiwemn uzee  nai rxck xaeihxtwo klcwiucgnlejuv l fkltsie arne cn\n",
      "ae eeaghjvue rqiidewogrerfipl cdmthlspuzeilakevo d n deqazcvsi iv  k oquwvco wn \n",
      "nc   asnoo rch tcn yw lwfttg  rtgins  crpf yx svlivvnasgojnhhgat oqit w irfedpau\n",
      "uudj svzf oixy nweotozoulwuot n oqsleardssc  gbbxkafekfrsbsennevpz krq nsoaolcgi\n",
      "hiwmm h ve prntkptgotn iu aamchjekqoyj q bawmiceeekudehneqsoiiqsrjkluo e  jsw dm\n",
      "================================================================================\n",
      "Validation set perplexity: 19.36\n",
      "Average loss at step 100: 2.272454 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.93\n",
      "Validation set perplexity: 8.41\n",
      "Average loss at step 200: 2.012776 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.87\n",
      "Validation set perplexity: 7.44\n",
      "Average loss at step 300: 1.932351 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.25\n",
      "Validation set perplexity: 6.85\n",
      "Average loss at step 400: 1.908670 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.29\n",
      "Validation set perplexity: 6.78\n",
      "Average loss at step 500: 1.874670 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.42\n",
      "Validation set perplexity: 6.50\n",
      "Average loss at step 600: 1.801144 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.16\n",
      "Validation set perplexity: 6.05\n",
      "Average loss at step 700: 1.781183 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.82\n",
      "Validation set perplexity: 5.79\n",
      "Average loss at step 800: 1.792326 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.49\n",
      "Validation set perplexity: 5.79\n",
      "Average loss at step 900: 1.778003 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.25\n",
      "Validation set perplexity: 5.78\n",
      "Average loss at step 1000: 1.786153 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.94\n",
      "================================================================================\n",
      "per is gawel seen arinjeminata a atok intherlatios card wally traf mac hises wit\n",
      "f technifred linear mac in there dictury devoph the useasing unlays a s tan arua\n",
      "consingh nonew stred with aru but one suity trans light hish in weblled thalist \n",
      "s ottwee zero not gat in and basen ak melendork spik lin gaf the callatarations \n",
      "kidoon one two one eighte and the partite domers in thin have model doilardy ban\n",
      "================================================================================\n",
      "Validation set perplexity: 5.56\n",
      "Average loss at step 1100: 1.745347 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.48\n",
      "Validation set perplexity: 5.63\n",
      "Average loss at step 1200: 1.717595 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.66\n",
      "Validation set perplexity: 5.54\n",
      "Average loss at step 1300: 1.713646 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.54\n",
      "Validation set perplexity: 5.64\n",
      "Average loss at step 1400: 1.721312 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.96\n",
      "Validation set perplexity: 5.26\n",
      "Average loss at step 1500: 1.707923 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.68\n",
      "Validation set perplexity: 5.20\n",
      "Average loss at step 1600: 1.703789 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.07\n",
      "Validation set perplexity: 5.40\n",
      "Average loss at step 1700: 1.683208 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 5.19\n",
      "Average loss at step 1800: 1.660715 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.21\n",
      "Validation set perplexity: 5.08\n",
      "Average loss at step 1900: 1.670598 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.63\n",
      "Validation set perplexity: 5.15\n",
      "Average loss at step 2000: 1.662660 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.27\n",
      "================================================================================\n",
      "xtana have title peclic blownder flancay and rc commstinal cauldaquageter by of \n",
      "vent long and see pluaces and develocks difrence york torate that hashers with t\n",
      "nuess hundsist revina is for knows the colsearially measial experient the perfor\n",
      "beration of compled is their rriticlisiats lost ic jarlo dayp claimer this hist \n",
      "itulutally the basing likinual troditien gable mareltuted oston eight delecitly \n",
      "================================================================================\n",
      "Validation set perplexity: 5.11\n",
      "Average loss at step 2100: 1.669729 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.56\n",
      "Validation set perplexity: 5.05\n",
      "Average loss at step 2200: 1.684394 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.17\n",
      "Validation set perplexity: 4.91\n",
      "Average loss at step 2300: 1.690324 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.32\n",
      "Validation set perplexity: 4.89\n",
      "Average loss at step 2400: 1.673233 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 5.02\n",
      "Average loss at step 2500: 1.677422 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.55\n",
      "Validation set perplexity: 4.90\n",
      "Average loss at step 2600: 1.666416 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.30\n",
      "Validation set perplexity: 4.99\n",
      "Average loss at step 2700: 1.674459 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.54\n",
      "Validation set perplexity: 4.98\n",
      "Average loss at step 2800: 1.676916 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.38\n",
      "Validation set perplexity: 5.11\n",
      "Average loss at step 2900: 1.669349 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.46\n",
      "Validation set perplexity: 4.97\n",
      "Average loss at step 3000: 1.677114 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.01\n",
      "================================================================================\n",
      "t a euro of pistiviser the shiverse these the nenopusing gere bacast gleatission\n",
      "zand except consided itemed of his losudaives of wisted represive on the channer\n",
      "ud musication the kapabstbali on later savess taissian man four seven and high r\n",
      "zies an fox orbar opperations german the son on vated it of as nessy but the roe\n",
      "rents usessufed and days sention no can sories whattogifie every dreapain to boo\n",
      "================================================================================\n",
      "Validation set perplexity: 4.80\n",
      "Average loss at step 3100: 1.651254 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.97\n",
      "Validation set perplexity: 4.92\n",
      "Average loss at step 3200: 1.631395 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.51\n",
      "Validation set perplexity: 5.08\n",
      "Average loss at step 3300: 1.651373 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.67\n",
      "Validation set perplexity: 4.71\n",
      "Average loss at step 3400: 1.633053 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.06\n",
      "Validation set perplexity: 4.92\n",
      "Average loss at step 3500: 1.673680 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 4.89\n",
      "Average loss at step 3600: 1.656453 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.07\n",
      "Validation set perplexity: 4.85\n",
      "Average loss at step 3700: 1.657452 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.60\n",
      "Validation set perplexity: 4.83\n",
      "Average loss at step 3800: 1.663782 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.95\n",
      "Validation set perplexity: 4.78\n",
      "Average loss at step 3900: 1.654563 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.05\n",
      "Validation set perplexity: 4.94\n",
      "Average loss at step 4000: 1.647791 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.42\n",
      "================================================================================\n",
      "use groanituge to brods and irallital tracition on the one nine eight seven one \n",
      "river criginan situally to for s misst used was colloges to closear vmalia noude\n",
      "anish producial prevelized the dizes for comsen led to the orix south it probald\n",
      "tern to willin who prosess and put with the red eshism if used in the rentor one\n",
      "x still britonal comes is adretly to neuchated of the riss to about fox exclen o\n",
      "================================================================================\n",
      "Validation set perplexity: 4.86\n",
      "Average loss at step 4100: 1.625691 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 4.73\n",
      "Average loss at step 4200: 1.623269 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.27\n",
      "Validation set perplexity: 4.87\n",
      "Average loss at step 4300: 1.624639 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.35\n",
      "Validation set perplexity: 4.83\n",
      "Average loss at step 4400: 1.621694 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.01\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 4.74\n",
      "Average loss at step 4500: 1.651256 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 4.88\n",
      "Average loss at step 4600: 1.633909 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.86\n",
      "Validation set perplexity: 4.85\n",
      "Average loss at step 4700: 1.629066 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 4.88\n",
      "Average loss at step 4800: 1.619101 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.94\n",
      "Validation set perplexity: 4.75\n",
      "Average loss at step 4900: 1.636048 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.83\n",
      "Validation set perplexity: 4.66\n",
      "Average loss at step 5000: 1.628832 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.77\n",
      "================================================================================\n",
      "oth has americate this sh was jok step common b one quakests of four eight one t\n",
      "x maturephamed to loson in the term on ording to and spring the groval typilu as\n",
      "x himsing the differenced sotian sposk him idlens if such un near treath are a a\n",
      "um of the two zero four seven nine six eight seven three two two five discomion \n",
      "s or raila supativation two four regarpicomy people or syston to by doust ries m\n",
      "================================================================================\n",
      "Validation set perplexity: 4.66\n",
      "Average loss at step 5100: 1.586492 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.89\n",
      "Validation set perplexity: 4.44\n",
      "Average loss at step 5200: 1.574676 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.90\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 5300: 1.580075 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.49\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 5400: 1.573193 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 4.38\n",
      "Average loss at step 5500: 1.565169 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.62\n",
      "Validation set perplexity: 4.38\n",
      "Average loss at step 5600: 1.540331 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.60\n",
      "Validation set perplexity: 4.35\n",
      "Average loss at step 5700: 1.558872 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.38\n",
      "Validation set perplexity: 4.31\n",
      "Average loss at step 5800: 1.580605 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.00\n",
      "Validation set perplexity: 4.32\n",
      "Average loss at step 5900: 1.559760 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.63\n",
      "Validation set perplexity: 4.37\n",
      "Average loss at step 6000: 1.564874 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.50\n",
      "================================================================================\n",
      "wear others on climaking the tranus bleces boter one nine nine six eight one zer\n",
      "perruly this take his pallon tradition chasber typu or which in the lastnology o\n",
      "maim one potwainational six zero zero zeno four iraders was tribladh colon have \n",
      "ded imack ama jonation of exams of in nine and the addentapore notzerval stater \n",
      "and and and createon enty way cered strwarment in lawies begins their reallounia\n",
      "================================================================================\n",
      "Validation set perplexity: 4.28\n",
      "Average loss at step 6100: 1.554971 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.98\n",
      "Validation set perplexity: 4.30\n",
      "Average loss at step 6200: 1.567266 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.79\n",
      "Validation set perplexity: 4.37\n",
      "Average loss at step 6300: 1.564964 learning rate: 1.000000\n",
      "Minibatch perplexity: 6.00\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 6400: 1.552026 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 4.42\n",
      "Average loss at step 6500: 1.534349 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.59\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 6600: 1.580007 learning rate: 1.000000\n",
      "Minibatch perplexity: 6.17\n",
      "Validation set perplexity: 4.39\n",
      "Average loss at step 6700: 1.548775 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.77\n",
      "Validation set perplexity: 4.35\n",
      "Average loss at step 6800: 1.555629 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 4.43\n",
      "Average loss at step 6900: 1.545355 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.49\n",
      "Validation set perplexity: 4.35\n",
      "Average loss at step 7000: 1.567129 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.10\n",
      "================================================================================\n",
      "f planism fandom hild developed with his powerrayed they the dionidize release j\n",
      "pers part all fregter works reconta ceaskyled which the zeartly game one miniona\n",
      "de to bestennided was prersons cyafoued accity and in lide delogered  i achilled\n",
      "quences the yeright and centrial houdand in he the featuatter for dail claction \n",
      "with a front organds and infeat general caulchned as clubding books of also expl\n",
      "================================================================================\n",
      "Validation set perplexity: 4.33\n"
     ]
    }
   ],
   "source": [
    "num_steps = 7001\n",
    "summary_frequency = 100\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print('Initialized')\n",
    "  mean_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    batches = train_batches.next()\n",
    "    feed_dict = dict()\n",
    "    for i in range(num_unrollings + 1):\n",
    "      feed_dict[train_data[i]] = batches[i]\n",
    "    _, l, predictions, lr = session.run(\n",
    "      [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n",
    "    mean_loss += l\n",
    "    if step % summary_frequency == 0:\n",
    "      if step > 0:\n",
    "        mean_loss = mean_loss / summary_frequency\n",
    "      # The mean loss is an estimate of the loss over the last few batches.\n",
    "      print(\n",
    "        'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n",
    "      mean_loss = 0\n",
    "      labels = np.concatenate(list(batches)[1:])\n",
    "      print('Minibatch perplexity: %.2f' % float(\n",
    "        np.exp(logprob(predictions, labels))))\n",
    "      if step % (summary_frequency * 10) == 0:\n",
    "        # Generate some samples.\n",
    "        print('=' * 80)\n",
    "        for _ in range(5):\n",
    "          feed = sample(random_distribution())\n",
    "          sentence = characters(feed)[0]\n",
    "          reset_sample_state.run()\n",
    "          for _ in range(79):\n",
    "            prediction = sample_prediction.eval({sample_input: feed})\n",
    "            feed = sample(prediction)\n",
    "            sentence += characters(feed)[0]\n",
    "          print(sentence)\n",
    "        print('=' * 80)\n",
    "      # Measure validation set perplexity.\n",
    "      reset_sample_state.run()\n",
    "      valid_logprob = 0\n",
    "      for _ in range(valid_size):\n",
    "        b = valid_batches.next()\n",
    "        predictions = sample_prediction.eval({sample_input: b[0]})\n",
    "        valid_logprob = valid_logprob + logprob(predictions, b[1])\n",
    "      print('Validation set perplexity: %.2f' % float(np.exp(\n",
    "        valid_logprob / valid_size)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We can now use bigrams as inputs for the training. Here again, the feed_dict is unchanged, the bigram embeddings are looked up from the inputs. The output of the LSTM is still a probability array of the possible characters (not bigrams)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "embedding_size = 128 # Dimension of the embedding vector.\n",
    "num_nodes = 64\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "  \n",
    "  # Parameters:\n",
    "  vocabulary_embeddings = tf.Variable(\n",
    "    tf.random_uniform([vocabulary_size * vocabulary_size, embedding_size], -1.0, 1.0))\n",
    "  # Input gate: input, previous output, and bias.\n",
    "  ix = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ib = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Forget gate: input, previous output, and bias.\n",
    "  fx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  fb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Memory cell: input, state and bias.                             \n",
    "  cx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  cb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Output gate: input, previous output, and bias.\n",
    "  ox = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ob = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Variables saving state across unrollings.\n",
    "  saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  # Classifier weights and biases.\n",
    "  w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
    "  b = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "  \n",
    "  # Definition of the cell computation.\n",
    "  def lstm_cell(i, o, state):\n",
    "    \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n",
    "    Note that in this formulation, we omit the various connections between the\n",
    "    previous state and the gates.\"\"\"\n",
    "    input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n",
    "    forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n",
    "    update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n",
    "    state = forget_gate * state + input_gate * tf.tanh(update)\n",
    "    output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n",
    "    return output_gate * tf.tanh(state), state\n",
    "\n",
    "  # Input data.\n",
    "  train_data = list()\n",
    "  for _ in range(num_unrollings + 1):\n",
    "    train_data.append(\n",
    "      tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n",
    "  train_chars = train_data[:num_unrollings]\n",
    "  train_inputs = zip(train_chars[:-1], train_chars[1:])\n",
    "  train_labels = train_data[2:]  # labels are inputs shifted by one time step.\n",
    "\n",
    "  # Unrolled LSTM loop.\n",
    "  outputs = list()\n",
    "  output = saved_output\n",
    "  state = saved_state\n",
    "  for i in train_inputs:\n",
    "    #print(i.get_shape())\n",
    "    #print(i)\n",
    "    bigram_index = tf.argmax(i[0], dimension=1) + vocabulary_size * tf.argmax(i[1], dimension=1)\n",
    "    i_embed = tf.nn.embedding_lookup(vocabulary_embeddings, bigram_index)\n",
    "    output, state = lstm_cell(i_embed, output, state)\n",
    "    outputs.append(output)\n",
    "\n",
    "  # State saving across unrollings.\n",
    "  with tf.control_dependencies([saved_output.assign(output),\n",
    "                                saved_state.assign(state)]):\n",
    "    # Classifier.\n",
    "    logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
    "    #print(logits.get_shape())\n",
    "    #print(tf.concat(0, train_labels).get_shape())\n",
    "    loss = tf.reduce_mean(\n",
    "      tf.nn.softmax_cross_entropy_with_logits(\n",
    "        logits, tf.concat(0, train_labels)))\n",
    "\n",
    "  # Optimizer.\n",
    "  global_step = tf.Variable(0)\n",
    "  learning_rate = tf.train.exponential_decay(\n",
    "    10.0, global_step, 5000, 0.1, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "  gradients, v = zip(*optimizer.compute_gradients(loss))\n",
    "  gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n",
    "  optimizer = optimizer.apply_gradients(\n",
    "    zip(gradients, v), global_step=global_step)\n",
    "\n",
    "  # Predictions.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  \n",
    "  # Sampling and validation eval: batch 1, no unrolling.\n",
    "  #sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])\n",
    "  sample_input = list()\n",
    "  for _ in range(2):\n",
    "    sample_input.append(tf.placeholder(tf.float32, shape=[1, vocabulary_size]))\n",
    "  samp_in_index = tf.argmax(sample_input[0], dimension=1) + vocabulary_size * tf.argmax(sample_input[1], dimension=1)\n",
    "  sample_input_embedding = tf.nn.embedding_lookup(vocabulary_embeddings, samp_in_index)\n",
    "  saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  reset_sample_state = tf.group(\n",
    "    saved_sample_output.assign(tf.zeros([1, num_nodes])),\n",
    "    saved_sample_state.assign(tf.zeros([1, num_nodes])))\n",
    "  sample_output, sample_state = lstm_cell(\n",
    "    sample_input_embedding, saved_sample_output, saved_sample_state)\n",
    "  with tf.control_dependencies([saved_sample_output.assign(sample_output),\n",
    "                                saved_sample_state.assign(sample_state)]):\n",
    "    sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step 0: 3.311136 learning rate: 10.000000\n",
      "Minibatch perplexity: 27.42\n",
      "================================================================================\n",
      "aslck o cnbenidshhbmng w sjqkyyyruqdsosotwtc bdragesi l absebnkne c appn eaccfsyy\n",
      "pweqmeekveo  pss j sefz gaxseneenqefaex ss hbsu ppei l rnm k cfnc iw tm  bjaemaab\n",
      "qerxvhfdnlw ew ve fn  jrljo n  copzm zu eosrbecrurlech mniuwx  gopejtrlhh uhnotle\n",
      "lb szvd  n mpnb ttsket bjijrisoehklgltr y l n txciqde adknl ef tsije h bljmtgehhp\n",
      "zb oyoiev vtqyvu zo fq idtresm e ab odrseanrwfijcmm e v f  hnzdjc  i eqre hlt  pf\n",
      "================================================================================\n",
      "Validation set perplexity: 20.27\n",
      "Average loss at step 100: 2.294804 learning rate: 10.000000\n",
      "Minibatch perplexity: 8.12\n",
      "Validation set perplexity: 9.04\n",
      "Average loss at step 200: 1.947544 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.39\n",
      "Validation set perplexity: 8.26\n",
      "Average loss at step 300: 1.845564 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.65\n",
      "Validation set perplexity: 7.76\n",
      "Average loss at step 400: 1.793018 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.73\n",
      "Validation set perplexity: 7.61\n",
      "Average loss at step 500: 1.810686 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.03\n",
      "Validation set perplexity: 7.54\n",
      "Average loss at step 600: 1.753716 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.93\n",
      "Validation set perplexity: 7.48\n",
      "Average loss at step 700: 1.735744 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.94\n",
      "Validation set perplexity: 7.19\n",
      "Average loss at step 800: 1.730202 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.75\n",
      "Validation set perplexity: 7.20\n",
      "Average loss at step 900: 1.739970 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.78\n",
      "Validation set perplexity: 7.29\n",
      "Average loss at step 1000: 1.670093 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.07\n",
      "================================================================================\n",
      "vr to or yearly one two from the own cannigins joided it it eighke his which thre\n",
      "hg termins homore two zero importor first tickenced conber in ol was carsa ments \n",
      "fpies madiod invember foemular withnih two distructions of plook v these is taugh\n",
      "ny m sprish symbolssoles whabfnaturni less s functionals american in the designed\n",
      "cmy theollow more time that interganecaused to primoviduassian coamazetels design\n",
      "================================================================================\n",
      "Validation set perplexity: 7.67\n",
      "Average loss at step 1100: 1.655045 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.00\n",
      "Validation set perplexity: 7.98\n",
      "Average loss at step 1200: 1.689142 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.77\n",
      "Validation set perplexity: 7.73\n",
      "Average loss at step 1300: 1.666455 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.26\n",
      "Validation set perplexity: 8.10\n",
      "Average loss at step 1400: 1.651536 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.44\n",
      "Validation set perplexity: 8.18\n",
      "Average loss at step 1500: 1.652237 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.97\n",
      "Validation set perplexity: 8.03\n",
      "Average loss at step 1600: 1.653537 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.05\n",
      "Validation set perplexity: 7.49\n",
      "Average loss at step 1700: 1.675908 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 7.39\n",
      "Average loss at step 1800: 1.643648 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.15\n",
      "Validation set perplexity: 7.58\n",
      "Average loss at step 1900: 1.648063 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.85\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 2000: 1.662531 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.95\n",
      "================================================================================\n",
      "hritical and rouns rated the vally hand wrote some the force usells dig sweft pla\n",
      "vpnn campies of broarobiagn diret afser f and also michael harning josbendard the\n",
      " homideal conterian proacher or botand mucces life part aboat bact instylre are c\n",
      "jnzis dyxone chartle the law cudygain tenpclre hen the war who or lockended by po\n",
      "bhnight a deedirectory in gold jabae of part directbistered to prevests of the pr\n",
      "================================================================================\n",
      "Validation set perplexity: 7.34\n",
      "Average loss at step 2100: 1.650277 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.65\n",
      "Validation set perplexity: 7.10\n",
      "Average loss at step 2200: 1.628062 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.95\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 2300: 1.632113 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.04\n",
      "Validation set perplexity: 7.04\n",
      "Average loss at step 2400: 1.642932 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.87\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 2500: 1.670891 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.87\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 2600: 1.636022 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.30\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 2700: 1.656880 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.40\n",
      "Validation set perplexity: 7.18\n",
      "Average loss at step 2800: 1.617383 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.96\n",
      "Validation set perplexity: 7.13\n",
      "Average loss at step 2900: 1.621163 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.24\n",
      "Validation set perplexity: 6.96\n",
      "Average loss at step 3000: 1.629138 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.45\n",
      "================================================================================\n",
      " known is an of it engli it his ming pat bished one two clamic zero zero as belio\n",
      "ehabito the continue from the receolom dman external joing two very on the clisa \n",
      "ux   sydnolynhalleming press of populating the has costoted mname arting newed th\n",
      "greatred on the used seven the buy to nere of exterversial drught french was subm\n",
      "nce epipenshinecause lawing webee ruil the is as ired a organization length playe\n",
      "================================================================================\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 3100: 1.625598 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 3200: 1.625998 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.90\n",
      "Validation set perplexity: 7.00\n",
      "Average loss at step 3300: 1.608816 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.83\n",
      "Validation set perplexity: 7.02\n",
      "Average loss at step 3400: 1.614180 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.89\n",
      "Validation set perplexity: 7.37\n",
      "Average loss at step 3500: 1.612674 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.00\n",
      "Validation set perplexity: 7.23\n",
      "Average loss at step 3600: 1.607690 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 7.12\n",
      "Average loss at step 3700: 1.608650 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.94\n",
      "Validation set perplexity: 7.14\n",
      "Average loss at step 3800: 1.604318 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.28\n",
      "Validation set perplexity: 7.32\n",
      "Average loss at step 3900: 1.598782 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.06\n",
      "Validation set perplexity: 7.12\n",
      "Average loss at step 4000: 1.604309 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.61\n",
      "================================================================================\n",
      "tformating order       yely secuimulty paged allengthm to amp enterbit with succe\n",
      "jpas ya of later horizen the called hemaldratical week between the write island t\n",
      "cy these royle he aparaine nine the cared the country mona naviyah sed that opera\n",
      "xmle five two may containderandfallo that the workill builted arts the firess tri\n",
      "ope ubeed their prain in also israily of bepadike reneers august two two expister\n",
      "================================================================================\n",
      "Validation set perplexity: 7.23\n",
      "Average loss at step 4100: 1.604055 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.11\n",
      "Validation set perplexity: 7.39\n",
      "Average loss at step 4200: 1.587587 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.15\n",
      "Validation set perplexity: 7.40\n",
      "Average loss at step 4300: 1.582474 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 7.25\n",
      "Average loss at step 4400: 1.610317 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.69\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 7.23\n",
      "Average loss at step 4500: 1.612617 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.77\n",
      "Validation set perplexity: 7.47\n",
      "Average loss at step 4600: 1.619330 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.73\n",
      "Validation set perplexity: 7.27\n",
      "Average loss at step 4700: 1.584229 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 7.37\n",
      "Average loss at step 4800: 1.572571 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.76\n",
      "Validation set perplexity: 7.40\n",
      "Average loss at step 4900: 1.590912 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.91\n",
      "Validation set perplexity: 7.10\n",
      "Average loss at step 5000: 1.617723 learning rate: 1.000000\n",
      "Minibatch perplexity: 6.10\n",
      "================================================================================\n",
      "ure stage murch free regardy a woman tc millet neturn oin popular the days to the\n",
      "cw year introduccurs hesibii first whave end sociaturing two zero seven three aut\n",
      "ecessing case the seases this to is gables usuw acdaposition was cycles due his s\n",
      "lces and of the four coryre in spanef by coulvy batterd of coster common must com\n",
      "pfers the gernment of light at whajor sncle from of leks weber a salliance outsin\n",
      "================================================================================\n",
      "Validation set perplexity: 7.33\n",
      "Average loss at step 5100: 1.617452 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.69\n",
      "Validation set perplexity: 7.15\n",
      "Average loss at step 5200: 1.615472 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.91\n",
      "Validation set perplexity: 6.96\n",
      "Average loss at step 5300: 1.582262 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.71\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 5400: 1.580441 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 6.98\n",
      "Average loss at step 5500: 1.566747 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.06\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 5600: 1.583948 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.71\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 5700: 1.550708 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.60\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 5800: 1.548957 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.68\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 5900: 1.578632 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.59\n",
      "Validation set perplexity: 7.01\n",
      "Average loss at step 6000: 1.544305 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.47\n",
      "================================================================================\n",
      "dlengan the unitivery in the marat and mustreliship is scalbute mo a bettle deast\n",
      "zgata is on five six zero one one one seven mv speed force emerascille yemasian g\n",
      "aut jewissurqb nonwords leads mytinder an atatual territoizated jr litch his conc\n",
      "jdevil articless from the longiant with that controlas need to annet of the banni\n",
      "tx of outle d indirective includes it janities some of to bere public of their wa\n",
      "================================================================================\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 6100: 1.558997 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.78\n",
      "Validation set perplexity: 7.03\n",
      "Average loss at step 6200: 1.591446 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 6.94\n",
      "Average loss at step 6300: 1.592537 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.95\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 6400: 1.621768 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.95\n",
      "Validation set perplexity: 6.91\n",
      "Average loss at step 6500: 1.619973 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.92\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 6600: 1.584102 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.40\n",
      "Validation set perplexity: 6.89\n",
      "Average loss at step 6700: 1.573061 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 6.88\n",
      "Average loss at step 6800: 1.559578 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.44\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 6900: 1.547742 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.61\n",
      "Validation set perplexity: 6.82\n",
      "Average loss at step 7000: 1.556669 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.53\n",
      "================================================================================\n",
      "aped all calso major one nine three here of theated bunds wars including the long\n",
      "ain one eight zero zero zero zero zero zero million heweavskatombytragest burnale\n",
      "cness halburieged stative was be jupite rary anti of side poligior at the strirfe\n",
      "qr the zion will com article one v refere constances when the permismslao eventor\n",
      "qes would becesspute there japana by have in athous and services with change to t\n",
      "================================================================================\n",
      "Validation set perplexity: 6.79\n"
     ]
    }
   ],
   "source": [
    "import collections\n",
    "num_steps = 7001\n",
    "summary_frequency = 100\n",
    "\n",
    "valid_batches = BatchGenerator(valid_text, 1, 2)\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print('Initialized')\n",
    "  mean_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    batches = train_batches.next()\n",
    "    feed_dict = dict()\n",
    "    for i in range(num_unrollings + 1):\n",
    "      feed_dict[train_data[i]] = batches[i]\n",
    "    _, l, predictions, lr = session.run(\n",
    "      [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n",
    "    mean_loss += l\n",
    "    if step % summary_frequency == 0:\n",
    "      if step > 0:\n",
    "        mean_loss = mean_loss / summary_frequency\n",
    "      # The mean loss is an estimate of the loss over the last few batches.\n",
    "      print(\n",
    "        'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n",
    "      mean_loss = 0\n",
    "      labels = np.concatenate(list(batches)[2:])\n",
    "      print('Minibatch perplexity: %.2f' % float(\n",
    "        np.exp(logprob(predictions, labels))))\n",
    "      if step % (summary_frequency * 10) == 0:\n",
    "        # Generate some samples.\n",
    "        print('=' * 80)\n",
    "        for _ in range(5):\n",
    "          #feed = sample(random_distribution())\n",
    "          feed = collections.deque(maxlen=2)\n",
    "          for _ in range(2):  \n",
    "            feed.append(random_distribution())\n",
    "          #sentence = characters(feed)[0]\n",
    "          sentence = characters(feed[0])[0] + characters(feed[1])[0]\n",
    "          #print(sentence)\n",
    "          #print(feed)\n",
    "          reset_sample_state.run()\n",
    "          for _ in range(79):\n",
    "            prediction = sample_prediction.eval({\n",
    "                    sample_input[0]: feed[0],\n",
    "                    sample_input[1]: feed[1]\n",
    "                })\n",
    "            #feed = sample(prediction)\n",
    "            feed.append(sample(prediction))\n",
    "            #sentence += characters(feed)[0]\n",
    "            sentence += characters(feed[1])[0]\n",
    "          print(sentence)\n",
    "        print('=' * 80)\n",
    "      # Measure validation set perplexity.\n",
    "      reset_sample_state.run()\n",
    "      valid_logprob = 0\n",
    "      for _ in range(valid_size):\n",
    "        b = valid_batches.next()\n",
    "        predictions = sample_prediction.eval({\n",
    "                    sample_input[0]: b[0],\n",
    "                    sample_input[1]: b[1]\n",
    "            })\n",
    "        valid_logprob = valid_logprob + logprob(predictions, b[2])\n",
    "      print('Validation set perplexity: %.2f' % float(np.exp(\n",
    "        valid_logprob / valid_size)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "It works, but the validation perplexity is a bit worst."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's try the dropout, in the inputs/ouputs only, not between to cells."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "embedding_size = 128 # Dimension of the embedding vector.\n",
    "num_nodes = 64\n",
    "keep_prob_train = 1.0\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "  \n",
    "  # Parameters:\n",
    "  vocabulary_embeddings = tf.Variable(\n",
    "    tf.random_uniform([vocabulary_size * vocabulary_size, embedding_size], -1.0, 1.0))\n",
    "  # Input gate: input, previous output, and bias.\n",
    "  ix = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ib = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Forget gate: input, previous output, and bias.\n",
    "  fx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  fb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Memory cell: input, state and bias.                             \n",
    "  cx = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  cb = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Output gate: input, previous output, and bias.\n",
    "  ox = tf.Variable(tf.truncated_normal([embedding_size, num_nodes], -0.1, 0.1))\n",
    "  om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n",
    "  ob = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  # Variables saving state across unrollings.\n",
    "  saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n",
    "  # Classifier weights and biases.\n",
    "  w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
    "  b = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "  \n",
    "  # Definition of the cell computation.\n",
    "  def lstm_cell(i, o, state):\n",
    "    \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n",
    "    Note that in this formulation, we omit the various connections between the\n",
    "    previous state and the gates.\"\"\"\n",
    "    input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n",
    "    forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n",
    "    update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n",
    "    state = forget_gate * state + input_gate * tf.tanh(update)\n",
    "    output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n",
    "    return output_gate * tf.tanh(state), state\n",
    "  \n",
    "  # Input data.\n",
    "  train_data = list()\n",
    "  for _ in range(num_unrollings + 1):\n",
    "    train_data.append(\n",
    "      tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n",
    "  train_chars = train_data[:num_unrollings]\n",
    "  train_inputs = zip(train_chars[:-1], train_chars[1:])\n",
    "  train_labels = train_data[2:]  # labels are inputs shifted by one time step.\n",
    "\n",
    "  # Unrolled LSTM loop.\n",
    "  outputs = list()\n",
    "  output = saved_output\n",
    "  state = saved_state\n",
    "  for i in train_inputs:\n",
    "    bigram_index = tf.argmax(i[0], dimension=1) + vocabulary_size * tf.argmax(i[1], dimension=1)\n",
    "    i_embed = tf.nn.embedding_lookup(vocabulary_embeddings, bigram_index)\n",
    "    drop_i = tf.nn.dropout(i_embed, keep_prob_train)\n",
    "    output, state = lstm_cell(drop_i, output, state)\n",
    "    outputs.append(output)\n",
    "\n",
    "  # State saving across unrollings.\n",
    "  with tf.control_dependencies([saved_output.assign(output),\n",
    "                                saved_state.assign(state)]):\n",
    "    # Classifier.\n",
    "    logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
    "    drop_logits = tf.nn.dropout(logits, keep_prob_train)\n",
    "    loss = tf.reduce_mean(\n",
    "      tf.nn.softmax_cross_entropy_with_logits(\n",
    "        logits, tf.concat(0, train_labels)))\n",
    "\n",
    "  # Optimizer.\n",
    "  global_step = tf.Variable(0)\n",
    "  learning_rate = tf.train.exponential_decay(\n",
    "    10.0, global_step, 15000, 0.1, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "  gradients, v = zip(*optimizer.compute_gradients(loss))\n",
    "  gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n",
    "  optimizer = optimizer.apply_gradients(\n",
    "    zip(gradients, v), global_step=global_step)\n",
    "\n",
    "  # Predictions.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  \n",
    "  # Sampling and validation eval: batch 1, no unrolling.\n",
    "  #sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])\n",
    "  keep_prob_sample = tf.placeholder(tf.float32)\n",
    "  sample_input = list()\n",
    "  for _ in range(2):\n",
    "    sample_input.append(tf.placeholder(tf.float32, shape=[1, vocabulary_size]))\n",
    "  samp_in_index = tf.argmax(sample_input[0], dimension=1) + vocabulary_size * tf.argmax(sample_input[1], dimension=1)\n",
    "  sample_input_embedding = tf.nn.embedding_lookup(vocabulary_embeddings, samp_in_index)\n",
    "  saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n",
    "  reset_sample_state = tf.group(\n",
    "    saved_sample_output.assign(tf.zeros([1, num_nodes])),\n",
    "    saved_sample_state.assign(tf.zeros([1, num_nodes])))\n",
    "  sample_output, sample_state = lstm_cell(\n",
    "    sample_input_embedding, saved_sample_output, saved_sample_state)\n",
    "  with tf.control_dependencies([saved_sample_output.assign(sample_output),\n",
    "                                saved_sample_state.assign(sample_state)]):\n",
    "    sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step 0: 3.312982 learning rate: 10.000000\n",
      "Minibatch perplexity: 27.47\n",
      "================================================================================\n",
      "ua jksnei   ab fwoiwvy yxesjn o a euk xey t   rwroep tx v wqjczgtje khcef hbq uay\n",
      "m ep taksr    e  den eoptuj  t   yr   ftwoes admernkqj es  tftq eb uvk dcsc onesg\n",
      "xi gstmfwtbruhzbbpy c o waki h ka  avmwued egeieokwt ht loyfadoo v v ffnd p no os\n",
      "uyvq q gebyjdu o   a   tnpfukn nt h r a a kyy  lstedo yo vle imc dvfeptorgvd  ng \n",
      "pbkbt  nm mlutwr m z kkeovxsp ha msacki ptbnnlrsse a thrnkq nhvoaz jyls nr  wwtdt\n",
      "================================================================================\n",
      "Validation set perplexity: 20.29\n",
      "Average loss at step 100: 2.272221 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.86\n",
      "Validation set perplexity: 8.97\n",
      "Average loss at step 200: 1.965282 learning rate: 10.000000\n",
      "Minibatch perplexity: 7.21\n",
      "Validation set perplexity: 8.20\n",
      "Average loss at step 300: 1.880914 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.20\n",
      "Validation set perplexity: 7.52\n",
      "Average loss at step 400: 1.823542 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.85\n",
      "Validation set perplexity: 7.53\n",
      "Average loss at step 500: 1.764858 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.35\n",
      "Validation set perplexity: 7.36\n",
      "Average loss at step 600: 1.758226 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.16\n",
      "Validation set perplexity: 7.71\n",
      "Average loss at step 700: 1.740162 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.38\n",
      "Validation set perplexity: 7.41\n",
      "Average loss at step 800: 1.725485 learning rate: 10.000000\n",
      "Minibatch perplexity: 6.34\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 900: 1.713611 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.07\n",
      "Validation set perplexity: 7.10\n",
      "Average loss at step 1000: 1.687168 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.26\n",
      "================================================================================\n",
      "jtance but sen esix on sienzos as the appoin pafter in and de shwty and controbio\n",
      "hluo coporation indetwo four legan respecialish a self disaded innother of landed\n",
      "system wille the to the internal by naturances film i neace port confuntn but thr\n",
      "ggrees intenry the species mors change in only are doma memakildout pensignal  re\n",
      "vs encinstreal is franguistonacy word becausion nor he against subston lianguages\n",
      "================================================================================\n",
      "Validation set perplexity: 7.38\n",
      "Average loss at step 1100: 1.691909 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.56\n",
      "Validation set perplexity: 7.02\n",
      "Average loss at step 1200: 1.687979 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.84\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 1300: 1.691881 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.65\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 1400: 1.662722 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.42\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 1500: 1.648547 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.43\n",
      "Validation set perplexity: 7.46\n",
      "Average loss at step 1600: 1.640710 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.45\n",
      "Validation set perplexity: 7.36\n",
      "Average loss at step 1700: 1.648203 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.94\n",
      "Validation set perplexity: 7.05\n",
      "Average loss at step 1800: 1.669454 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.19\n",
      "Validation set perplexity: 7.08\n",
      "Average loss at step 1900: 1.648866 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.08\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 2000: 1.662779 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.39\n",
      "================================================================================\n",
      "hward glyxbn which place were encer of the known facule kipsing the very arroott \n",
      "ojecn firfcs a between perpainspirthirst new york conventer gotlogy the rich and \n",
      "zasay gip spond it bit things are crizeven this the mimurgan the the been on equi\n",
      "cjamp basegorent with promenty translater mixyr to younts of addithonal text part\n",
      "bn lew juim a during complette so skateb sherially no of modern apband person it \n",
      "================================================================================\n",
      "Validation set perplexity: 7.08\n",
      "Average loss at step 2100: 1.644568 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.55\n",
      "Validation set perplexity: 6.88\n",
      "Average loss at step 2200: 1.668902 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.34\n",
      "Validation set perplexity: 6.65\n",
      "Average loss at step 2300: 1.642988 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.42\n",
      "Validation set perplexity: 6.91\n",
      "Average loss at step 2400: 1.644059 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.90\n",
      "Validation set perplexity: 7.01\n",
      "Average loss at step 2500: 1.654602 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.55\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 2600: 1.641264 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.20\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 2700: 1.626378 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.32\n",
      "Validation set perplexity: 7.17\n",
      "Average loss at step 2800: 1.623586 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.95\n",
      "Validation set perplexity: 7.18\n",
      "Average loss at step 2900: 1.619106 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 7.18\n",
      "Average loss at step 3000: 1.642543 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.17\n",
      "================================================================================\n",
      "ey and indlon americe but gdp a the raters would halso film teractional posses pr\n",
      "ehaving shemakish frade nodeve be at is the religions through replements zero mor\n",
      "ld more co performal are invirtunes ner for exister less theory wisle safediller \n",
      "pv or for mand me or jat of sound power and the rypf the one enguage elected by l\n",
      "dbually inuental embers with suitaliamentations unning humonor literalier scotle \n",
      "================================================================================\n",
      "Validation set perplexity: 6.88\n",
      "Average loss at step 3100: 1.606926 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.41\n",
      "Validation set perplexity: 7.16\n",
      "Average loss at step 3200: 1.627913 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.48\n",
      "Validation set perplexity: 7.16\n",
      "Average loss at step 3300: 1.624794 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.36\n",
      "Validation set perplexity: 7.37\n",
      "Average loss at step 3400: 1.620182 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.65\n",
      "Validation set perplexity: 7.11\n",
      "Average loss at step 3500: 1.609055 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.22\n",
      "Validation set perplexity: 7.09\n",
      "Average loss at step 3600: 1.628013 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.10\n",
      "Validation set perplexity: 7.35\n",
      "Average loss at step 3700: 1.596388 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 7.40\n",
      "Average loss at step 3800: 1.597623 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.45\n",
      "Validation set perplexity: 7.22\n",
      "Average loss at step 3900: 1.587828 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.33\n",
      "Validation set perplexity: 6.96\n",
      "Average loss at step 4000: 1.604747 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.42\n",
      "================================================================================\n",
      "xcion of steed foundanur minard drade me system scen less with unife receive mell\n",
      "ive of grivary smalled is is engreasour projensroman matheries norman particularl\n",
      "were husinq is a rone eight dane malta showestroms nationally some for havall nha\n",
      "journ and righta in the coprist the grainese list fown parties is agt the nighh m\n",
      "nes have four the known for the one malund be spirl or rost also space malbat cho\n",
      "================================================================================\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 4100: 1.622482 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 7.31\n",
      "Average loss at step 4200: 1.597596 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.18\n",
      "Validation set perplexity: 7.42\n",
      "Average loss at step 4300: 1.567093 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.48\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 4400: 1.596256 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.96\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 6.79\n",
      "Average loss at step 4500: 1.581081 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.79\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 4600: 1.584997 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.13\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 4700: 1.597184 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.73\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 4800: 1.596506 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.33\n",
      "Validation set perplexity: 7.14\n",
      "Average loss at step 4900: 1.613372 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.76\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 5000: 1.626127 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.61\n",
      "================================================================================\n",
      "gba manity of make ruley wroccry no cusinem of interon four ceneeracill one seven\n",
      "rrory oln five tez becallon of positivity ma cant is ivarsharday berty homedical \n",
      "slamatie vanded one the was cribed flories a fall aced evices chycle was for mand\n",
      "rd where stars of mitment lack true botto was its the tates the oneote userve cup\n",
      "drat into the right maten if press a strict furthinenes are company records a cat\n",
      "================================================================================\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 5100: 1.596921 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.95\n",
      "Validation set perplexity: 6.56\n",
      "Average loss at step 5200: 1.614671 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.47\n",
      "Validation set perplexity: 6.75\n",
      "Average loss at step 5300: 1.584918 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.38\n",
      "Validation set perplexity: 6.59\n",
      "Average loss at step 5400: 1.581693 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.49\n",
      "Validation set perplexity: 6.62\n",
      "Average loss at step 5500: 1.583900 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.20\n",
      "Validation set perplexity: 7.06\n",
      "Average loss at step 5600: 1.567113 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.67\n",
      "Validation set perplexity: 7.04\n",
      "Average loss at step 5700: 1.602579 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.05\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 5800: 1.590295 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.45\n",
      "Validation set perplexity: 6.79\n",
      "Average loss at step 5900: 1.595461 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.50\n",
      "Validation set perplexity: 7.07\n",
      "Average loss at step 6000: 1.549138 learning rate: 10.000000\n",
      "Minibatch perplexity: 3.94\n",
      "================================================================================\n",
      "ol carrinage tate a five jured in urredution one five five linst the first crifie\n",
      "ms histon its see spree is bettapple javan undop quantily ilp one gvsted are aror\n",
      "zooly and an then civilling of had this gage also stose genucancy norther non one\n",
      "nny the giam computwo uses uniting dome five therlan or one even zero five three \n",
      "xual pr events emphil played activing fetcounviane one zero two mekber out the be\n",
      "================================================================================\n",
      "Validation set perplexity: 7.38\n",
      "Average loss at step 6100: 1.612336 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.65\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 6200: 1.609007 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.27\n",
      "Validation set perplexity: 7.55\n",
      "Average loss at step 6300: 1.596068 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.23\n",
      "Validation set perplexity: 7.28\n",
      "Average loss at step 6400: 1.609819 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.84\n",
      "Validation set perplexity: 7.59\n",
      "Average loss at step 6500: 1.602706 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.46\n",
      "Validation set perplexity: 6.91\n",
      "Average loss at step 6600: 1.595589 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.92\n",
      "Validation set perplexity: 6.91\n",
      "Average loss at step 6700: 1.584979 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.65\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 6800: 1.601643 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.29\n",
      "Validation set perplexity: 6.90\n",
      "Average loss at step 6900: 1.626150 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.89\n",
      "Validation set perplexity: 7.19\n",
      "Average loss at step 7000: 1.612793 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.06\n",
      "================================================================================\n",
      "octics and conductua i pihements dan in walk hundly from the envists trimmedice e\n",
      "qsame of coin an in with yoz  ls station in which one eight nine two eight nons s\n",
      "pfie tds in or just and deteries linuctive mathemati dimanyam lighter the excentr\n",
      " elected paper and east charible streates force putic lasers hijoshed to require \n",
      "nry in the community alpapave belot two martional stolefil into pits discendue th\n",
      "================================================================================\n",
      "Validation set perplexity: 7.38\n",
      "Average loss at step 7100: 1.595608 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.84\n",
      "Validation set perplexity: 7.24\n",
      "Average loss at step 7200: 1.585799 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.06\n",
      "Validation set perplexity: 7.38\n",
      "Average loss at step 7300: 1.590463 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.42\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 7400: 1.603288 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.04\n",
      "Validation set perplexity: 7.01\n",
      "Average loss at step 7500: 1.600733 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.74\n",
      "Validation set perplexity: 7.00\n",
      "Average loss at step 7600: 1.579837 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.80\n",
      "Validation set perplexity: 7.05\n",
      "Average loss at step 7700: 1.600444 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.52\n",
      "Validation set perplexity: 7.26\n",
      "Average loss at step 7800: 1.579847 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.48\n",
      "Validation set perplexity: 7.23\n",
      "Average loss at step 7900: 1.589854 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.23\n",
      "Validation set perplexity: 7.16\n",
      "Average loss at step 8000: 1.588062 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.92\n",
      "================================================================================\n",
      "da with was in united in the tated eventhine pright complith try played lands exa\n",
      "mqntive that sleadka v it realed of a mainland layears and controped elected thro\n",
      "hxdt the balt whitears only whitish before have ranged paranken chanlack monity i\n",
      "fp docury and may annecter biping a sevent the last studies help ellexing on afli\n",
      "qz malack source become is accivilon cist critical no number theretten originatio\n",
      "================================================================================\n",
      "Validation set perplexity: 7.42\n",
      "Average loss at step 8100: 1.586327 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.79\n",
      "Validation set perplexity: 7.29\n",
      "Average loss at step 8200: 1.586580 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.12\n",
      "Validation set perplexity: 6.98\n",
      "Average loss at step 8300: 1.576413 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.45\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 8400: 1.585819 learning rate: 10.000000\n",
      "Minibatch perplexity: 3.80\n",
      "Validation set perplexity: 7.17\n",
      "Average loss at step 8500: 1.609451 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.14\n",
      "Validation set perplexity: 6.76\n",
      "Average loss at step 8600: 1.603979 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.58\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 8700: 1.581825 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.63\n",
      "Validation set perplexity: 6.77\n",
      "Average loss at step 8800: 1.619011 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 7.06\n",
      "Average loss at step 8900: 1.618812 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.05\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 9000: 1.584490 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.29\n",
      "================================================================================\n",
      "qwo iii its vill of jackner two zero zero three eight one nine seven fiftdrinal a\n",
      "gdon instrumented w and in the prises human precililling eight nine one five seve\n",
      "zled a placed in eviven commoncted delvy wither a sophy homes the individed of an\n",
      "qlar dess had are sloagemonist plusifies addresse lighters in faire prerime remai\n",
      "pqs a supceives a followed to culture of at it is an aplist unsiber that relexlm \n",
      "================================================================================\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 7.00\n",
      "Average loss at step 9100: 1.583810 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.74\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 9200: 1.570461 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 6.89\n",
      "Average loss at step 9300: 1.617379 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.10\n",
      "Validation set perplexity: 7.36\n",
      "Average loss at step 9400: 1.598170 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.73\n",
      "Validation set perplexity: 6.98\n",
      "Average loss at step 9500: 1.577919 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.97\n",
      "Validation set perplexity: 7.10\n",
      "Average loss at step 9600: 1.571378 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.09\n",
      "Validation set perplexity: 7.05\n",
      "Average loss at step 9700: 1.574191 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.62\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 9800: 1.584836 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.96\n",
      "Validation set perplexity: 7.09\n",
      "Average loss at step 9900: 1.567870 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.82\n",
      "Validation set perplexity: 6.96\n",
      "Average loss at step 10000: 1.562707 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.21\n",
      "================================================================================\n",
      "obles gao where again algember the fer nuble plantapogenes up current unised his \n",
      "al he dichon are ney assissed yore v compilost arabe the camp external women nicd\n",
      "hk of refers became bode to first officiated b for santakeyr meeting of sentrable\n",
      "ux had the goore a summer escult handel b one two chinese of molement cds charles\n",
      "jyteting catholically writent the reney weir contrenmy used external obfoage arac\n",
      "================================================================================\n",
      "Validation set perplexity: 7.45\n",
      "Average loss at step 10100: 1.593186 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 7.10\n",
      "Average loss at step 10200: 1.594843 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.30\n",
      "Validation set perplexity: 7.15\n",
      "Average loss at step 10300: 1.581624 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.53\n",
      "Validation set perplexity: 7.64\n",
      "Average loss at step 10400: 1.575502 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.90\n",
      "Validation set perplexity: 7.49\n",
      "Average loss at step 10500: 1.568392 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.73\n",
      "Validation set perplexity: 7.32\n",
      "Average loss at step 10600: 1.550164 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.47\n",
      "Validation set perplexity: 7.26\n",
      "Average loss at step 10700: 1.565363 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.83\n",
      "Validation set perplexity: 7.36\n",
      "Average loss at step 10800: 1.577329 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.51\n",
      "Validation set perplexity: 7.31\n",
      "Average loss at step 10900: 1.587192 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 7.28\n",
      "Average loss at step 11000: 1.576626 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.33\n",
      "================================================================================\n",
      "bjects in of john which arouch deviantion capestation of bill first on s it blang\n",
      "ture to the student spajor than fent on has sincy a son rable defendent symboldli\n",
      "hvlate upid et barth from the system less of elponteration marriawi peason mronuf\n",
      "im of danube in the succe deanuit most lebseven reald and his presentle ighe ce w\n",
      "xfirstial puruclaw emphanneric reachabour a preves in safe farkwarrent were or pr\n",
      "================================================================================\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 11100: 1.588533 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.34\n",
      "Validation set perplexity: 6.71\n",
      "Average loss at step 11200: 1.563745 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.76\n",
      "Validation set perplexity: 6.80\n",
      "Average loss at step 11300: 1.567089 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.71\n",
      "Validation set perplexity: 6.67\n",
      "Average loss at step 11400: 1.576980 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.33\n",
      "Validation set perplexity: 7.12\n",
      "Average loss at step 11500: 1.584354 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.42\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 11600: 1.573431 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 11700: 1.570600 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.87\n",
      "Validation set perplexity: 7.17\n",
      "Average loss at step 11800: 1.589635 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.28\n",
      "Validation set perplexity: 7.32\n",
      "Average loss at step 11900: 1.569730 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.84\n",
      "Validation set perplexity: 7.21\n",
      "Average loss at step 12000: 1.587864 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.83\n",
      "================================================================================\n",
      "r may was recordinga graphy which actober regimily portuk a moth of mile in the c\n",
      "pqbitory s upcps a banry let one fourth attempt was east which to main one seven \n",
      "jpage cated january militas rule x dimend rebelleaseve the treaehonant maure sear\n",
      "lroon constance many calin sec symborbacks is at bill victing uki rominogible emp\n",
      "qxo the which helping with one one nine eussine out and from cemories to safe be \n",
      "================================================================================\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 12100: 1.581989 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.55\n",
      "Validation set perplexity: 7.31\n",
      "Average loss at step 12200: 1.582599 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.39\n",
      "Validation set perplexity: 7.34\n",
      "Average loss at step 12300: 1.577003 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.85\n",
      "Validation set perplexity: 7.26\n",
      "Average loss at step 12400: 1.565756 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.47\n",
      "Validation set perplexity: 7.07\n",
      "Average loss at step 12500: 1.566605 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.52\n",
      "Validation set perplexity: 7.25\n",
      "Average loss at step 12600: 1.588731 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.61\n",
      "Validation set perplexity: 7.32\n",
      "Average loss at step 12700: 1.565792 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.51\n",
      "Validation set perplexity: 7.33\n",
      "Average loss at step 12800: 1.563516 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.89\n",
      "Validation set perplexity: 7.61\n",
      "Average loss at step 12900: 1.576647 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.25\n",
      "Validation set perplexity: 7.46\n",
      "Average loss at step 13000: 1.584965 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.54\n",
      "================================================================================\n",
      "khausa of handle and advbiole at different one the means additionally public wave\n",
      "knoping wash signn and relations for children arlyss existanificially in the publ\n",
      "havized in nehis that encourality all linkeurope asporte exercillion involving sh\n",
      "ogrocnaed of pupse three four the can chieved introsses to is carogic portution t\n",
      "fkc kers released to havidic ancient publisk one three six one muniverapollo leti\n",
      "================================================================================\n",
      "Validation set perplexity: 7.34\n",
      "Average loss at step 13100: 1.600982 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.01\n",
      "Validation set perplexity: 7.48\n",
      "Average loss at step 13200: 1.575546 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 7.64\n",
      "Average loss at step 13300: 1.585092 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.11\n",
      "Validation set perplexity: 7.78\n",
      "Average loss at step 13400: 1.621015 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.41\n",
      "Validation set perplexity: 7.97\n",
      "Average loss at step 13500: 1.626365 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.79\n",
      "Validation set perplexity: 7.45\n",
      "Average loss at step 13600: 1.599021 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.36\n",
      "Validation set perplexity: 7.53\n",
      "Average loss at step 13700: 1.598067 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.59\n",
      "Validation set perplexity: 7.44\n",
      "Average loss at step 13800: 1.585107 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.88\n",
      "Validation set perplexity: 7.48\n",
      "Average loss at step 13900: 1.552006 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.05\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 7.33\n",
      "Average loss at step 14000: 1.592433 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.16\n",
      "================================================================================\n",
      "cludes in the actore signed by nez is american outrotes being employ modern do or\n",
      "rlaba peaced givers they of the atoke lassed there and a techniqar united commin \n",
      "ppears one zero b tpn two five richo b suggester is gonuman one one eight of ethn\n",
      "dmunds borem in khaund shame laba seven four s fund of constil gaminal perill giu\n",
      "zonog communists in one eight nine th century ography dispract of a set officies \n",
      "================================================================================\n",
      "Validation set perplexity: 7.98\n",
      "Average loss at step 14100: 1.588329 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.33\n",
      "Validation set perplexity: 7.51\n",
      "Average loss at step 14200: 1.577372 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.20\n",
      "Validation set perplexity: 7.34\n",
      "Average loss at step 14300: 1.588705 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.69\n",
      "Validation set perplexity: 7.32\n",
      "Average loss at step 14400: 1.586844 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.31\n",
      "Validation set perplexity: 7.26\n",
      "Average loss at step 14500: 1.582774 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.39\n",
      "Validation set perplexity: 7.54\n",
      "Average loss at step 14600: 1.572321 learning rate: 10.000000\n",
      "Minibatch perplexity: 5.23\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 14700: 1.578148 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.67\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 14800: 1.596074 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.99\n",
      "Validation set perplexity: 7.02\n",
      "Average loss at step 14900: 1.601744 learning rate: 10.000000\n",
      "Minibatch perplexity: 4.97\n",
      "Validation set perplexity: 7.31\n",
      "Average loss at step 15000: 1.593445 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.21\n",
      "================================================================================\n",
      "dp to anet including the sposal d one kacing leadinasted germl secides under band\n",
      "y fresson its becoma smoked intender this nooknhat the judes concoljanes an india\n",
      "nman or martibsagas the prirc prescructional ontable the towever willer a pruscos\n",
      "ioning in the best the counly the fyeadowaring of the left rice seji flowed in se\n",
      "ay enorwees and cultureanni whiler they and in the folfive one five atblays intee\n",
      "================================================================================\n",
      "Validation set perplexity: 7.13\n",
      "Average loss at step 15100: 1.558383 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.11\n",
      "Validation set perplexity: 6.96\n",
      "Average loss at step 15200: 1.550127 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.20\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 15300: 1.570603 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.89\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 15400: 1.560847 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.37\n",
      "Validation set perplexity: 6.80\n",
      "Average loss at step 15500: 1.578417 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.65\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 15600: 1.576686 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.62\n",
      "Validation set perplexity: 6.75\n",
      "Average loss at step 15700: 1.566823 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.32\n",
      "Validation set perplexity: 6.85\n",
      "Average loss at step 15800: 1.571675 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.29\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 15900: 1.552494 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.28\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 16000: 1.568033 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.20\n",
      "================================================================================\n",
      "ztted by kimportant the pbc for his can to the grouptkily that are open the anzym\n",
      "kvery one nine four legesved indicated theot of a hes and subdepiction falm for t\n",
      "yqhrome fermed countary crike for et childo west jmdmabley who dobern reter speci\n",
      "exists beast according should followed to skill r to orgeral on s den archead of \n",
      "gcommand insrationing they to much gange plated a were chauce hop him in four sep\n",
      "================================================================================\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 16100: 1.571582 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.58\n",
      "Validation set perplexity: 6.89\n",
      "Average loss at step 16200: 1.592779 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 6.90\n",
      "Average loss at step 16300: 1.572338 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.26\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 16400: 1.558903 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.86\n",
      "Validation set perplexity: 6.86\n",
      "Average loss at step 16500: 1.564894 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.25\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 16600: 1.557262 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.37\n",
      "Validation set perplexity: 6.86\n",
      "Average loss at step 16700: 1.571219 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 16800: 1.569582 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.80\n",
      "Validation set perplexity: 6.89\n",
      "Average loss at step 16900: 1.537035 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 17000: 1.555554 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.53\n",
      "================================================================================\n",
      "gficle well base sympha fd in can alone sexual many for widings to repailable nae\n",
      "ml in the published on a miraces keed of the kile of writers as listism key insop\n",
      "xads apol in catch cared as the bangary that episcopaltion andonly it was the pur\n",
      "w to a issued startigations the mast and developed hatspicially massacted caeserv\n",
      "ed their timies whingly yound the neery typiration armelesh four all domirt kant \n",
      "================================================================================\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 17100: 1.569329 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.64\n",
      "Validation set perplexity: 6.78\n",
      "Average loss at step 17200: 1.561756 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.28\n",
      "Validation set perplexity: 6.82\n",
      "Average loss at step 17300: 1.552267 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.63\n",
      "Validation set perplexity: 6.77\n",
      "Average loss at step 17400: 1.563423 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.56\n",
      "Validation set perplexity: 6.79\n",
      "Average loss at step 17500: 1.576732 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.13\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 17600: 1.531005 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.47\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 17700: 1.528385 learning rate: 1.000000\n",
      "Minibatch perplexity: 3.85\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 17800: 1.553611 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.42\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 17900: 1.558323 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.63\n",
      "Validation set perplexity: 6.84\n",
      "Average loss at step 18000: 1.531352 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.50\n",
      "================================================================================\n",
      "cumsee and eujproduced contoonry suical a struggland falled spower voho however a\n",
      " on part been which charitle their grammiliness of the u s a v cartice running si\n",
      "nd the posely nine contable of engle moral corow a floring preside suppliet addha\n",
      "shorks the desigation rn henry is idea used it tack of europe south tecificia it \n",
      "qely that natist phonet that that the bout regions aread of the thus name of some\n",
      "================================================================================\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 18100: 1.513103 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.87\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 18200: 1.514369 learning rate: 1.000000\n",
      "Minibatch perplexity: 3.96\n",
      "Validation set perplexity: 6.89\n",
      "Average loss at step 18300: 1.508121 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.08\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 18400: 1.518022 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.28\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation set perplexity: 7.01\n",
      "Average loss at step 18500: 1.522575 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.76\n",
      "Validation set perplexity: 6.99\n",
      "Average loss at step 18600: 1.494643 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.31\n",
      "Validation set perplexity: 6.92\n",
      "Average loss at step 18700: 1.490982 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.38\n",
      "Validation set perplexity: 6.97\n",
      "Average loss at step 18800: 1.519571 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.93\n",
      "Validation set perplexity: 6.95\n",
      "Average loss at step 18900: 1.503250 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.18\n",
      "Validation set perplexity: 6.94\n",
      "Average loss at step 19000: 1.490046 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.25\n",
      "================================================================================\n",
      "dtladitying the w colleged and on pacitly den containing of the actual organizati\n",
      "qds moved the case attachiens territories elding s snowards in president ice the \n",
      "tbans community higher austrial similar the sants talong leaderanding around that\n",
      "njusion and this prulsy of disployalance one nine nine one nine eight eight janui\n",
      "zft arries and luthere in s was to apograbbandard awals the viabilitem cissitaria\n",
      "================================================================================\n",
      "Validation set perplexity: 6.93\n",
      "Average loss at step 19100: 1.487422 learning rate: 1.000000\n",
      "Minibatch perplexity: 3.86\n",
      "Validation set perplexity: 6.90\n",
      "Average loss at step 19200: 1.522074 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.70\n",
      "Validation set perplexity: 6.91\n",
      "Average loss at step 19300: 1.509198 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.64\n",
      "Validation set perplexity: 6.82\n",
      "Average loss at step 19400: 1.536525 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.76\n",
      "Validation set perplexity: 6.86\n",
      "Average loss at step 19500: 1.514686 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.67\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 19600: 1.495965 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.72\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 19700: 1.504338 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.27\n",
      "Validation set perplexity: 6.80\n",
      "Average loss at step 19800: 1.518024 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.21\n",
      "Validation set perplexity: 6.87\n",
      "Average loss at step 19900: 1.545546 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.91\n",
      "Validation set perplexity: 6.90\n",
      "Average loss at step 20000: 1.515212 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.87\n",
      "================================================================================\n",
      "fhim with you writing this varles in the extricturs many and and political signal\n",
      "tralice enformer bellinoguiqes concernal matrial complex algened the different he\n",
      "smales memotinecietics the country the sole woulding tower of propu see is beyond\n",
      "vqrsions and the bottoping structure trick and endings toptors though brigher sco\n",
      "ez as composeomeltom or reimits fbists himself trip to the max one three increasi\n",
      "================================================================================\n",
      "Validation set perplexity: 6.82\n",
      "Average loss at step 20100: 1.515726 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.36\n",
      "Validation set perplexity: 6.85\n",
      "Average loss at step 20200: 1.553959 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.29\n",
      "Validation set perplexity: 6.83\n",
      "Average loss at step 20300: 1.536944 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.81\n",
      "Validation set perplexity: 6.78\n",
      "Average loss at step 20400: 1.536811 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.24\n",
      "Validation set perplexity: 6.77\n",
      "Average loss at step 20500: 1.549857 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.75\n",
      "Validation set perplexity: 6.77\n",
      "Average loss at step 20600: 1.534154 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.07\n",
      "Validation set perplexity: 6.78\n",
      "Average loss at step 20700: 1.508350 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.42\n",
      "Validation set perplexity: 6.77\n",
      "Average loss at step 20800: 1.490080 learning rate: 1.000000\n",
      "Minibatch perplexity: 4.47\n",
      "Validation set perplexity: 6.81\n",
      "Average loss at step 20900: 1.516819 learning rate: 1.000000\n",
      "Minibatch perplexity: 5.27\n",
      "Validation set perplexity: 6.76\n",
      "Average loss at step 21000: 1.513749 learning rate: 1.000000\n",
      "Minibatch perplexity: 3.97\n",
      "================================================================================\n",
      "own exorking dia are which elijdants has siding of the kharl meiracy with country\n",
      "xement two season one nine six two zero in coding jence keture aujointened by pla\n",
      "nhromast move a nine one one dio five in high home elation of resequence is the w\n",
      "ton s university by traditors hers include was leng with the taipal at the work w\n",
      "ive factional country proman modern i disk plans one nine nine zero one nine four\n",
      "================================================================================\n",
      "Validation set perplexity: 6.77\n"
     ]
    }
   ],
   "source": [
    "import collections\n",
    "num_steps = 21001\n",
    "summary_frequency = 100\n",
    "\n",
    "valid_batches = BatchGenerator(valid_text, 1, 2)\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print('Initialized')\n",
    "  mean_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    batches = train_batches.next()\n",
    "    feed_dict = dict()\n",
    "    for i in range(num_unrollings + 1):\n",
    "      feed_dict[train_data[i]] = batches[i]\n",
    "    _, l, predictions, lr = session.run(\n",
    "      [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n",
    "    mean_loss += l\n",
    "    if step % summary_frequency == 0:\n",
    "      if step > 0:\n",
    "        mean_loss = mean_loss / summary_frequency\n",
    "      # The mean loss is an estimate of the loss over the last few batches.\n",
    "      print(\n",
    "        'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n",
    "      mean_loss = 0\n",
    "      labels = np.concatenate(list(batches)[2:])\n",
    "      print('Minibatch perplexity: %.2f' % float(\n",
    "        np.exp(logprob(predictions, labels))))\n",
    "      if step % (summary_frequency * 10) == 0:\n",
    "        # Generate some samples.\n",
    "        print('=' * 80)\n",
    "        for _ in range(5):\n",
    "          #feed = sample(random_distribution())\n",
    "          feed = collections.deque(maxlen=2)\n",
    "          for _ in range(2):  \n",
    "            feed.append(random_distribution())\n",
    "          #sentence = characters(feed)[0]\n",
    "          sentence = characters(feed[0])[0] + characters(feed[1])[0]\n",
    "          #print(sentence)\n",
    "          #print(feed)\n",
    "          reset_sample_state.run()\n",
    "          for _ in range(79):\n",
    "            prediction = sample_prediction.eval({\n",
    "                    sample_input[0]: feed[0],\n",
    "                    sample_input[1]: feed[1],\n",
    "                })\n",
    "            #feed = sample(prediction)\n",
    "            feed.append(sample(prediction))\n",
    "            #sentence += characters(feed)[0]\n",
    "            sentence += characters(feed[1])[0]\n",
    "          print(sentence)\n",
    "        print('=' * 80)\n",
    "      # Measure validation set perplexity.\n",
    "      reset_sample_state.run()\n",
    "      valid_logprob = 0\n",
    "      for _ in range(valid_size):\n",
    "        b = valid_batches.next()\n",
    "        predictions = sample_prediction.eval({\n",
    "                sample_input[0]: b[0],\n",
    "                sample_input[1]: b[1],\n",
    "                keep_prob_sample: 1.0\n",
    "            })\n",
    "        valid_logprob = valid_logprob + logprob(predictions, b[2])\n",
    "      print('Validation set perplexity: %.2f' % float(np.exp(\n",
    "        valid_logprob / valid_size)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Even with more steps, the final perplexity is not better. Since I do not know what to expect, and since I do not see any obvious issue (the perplexity being consistent), I'm stuck."
   ]
  }
 ],
 "metadata": {
  "colab": {
   "default_view": {},
   "name": "6_lstm.ipynb",
   "provenance": [],
   "version": "0.3.2",
   "views": {}
  },
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
