{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LSTM + Deep CNN Text Classification with Keras"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[i] Loaded Parameters:\n",
      " 40000 30 0.2 50 \n",
      " dataset/glove/glove.twitter.27B.50d.txt\n",
      "[i] Importing Modules...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[i] Using Keras version 2.1.4\n",
      "[i] Finished Importing Modules\n",
      "[i] . Reading from csv file...Done!\n",
      "[i] Found 30692 unique tokens.\n",
      "[+] Shape of data tensor: (40000, 30)\n",
      "[+] Shape of label tensor: (40000, 4)\n",
      "[+] Number of entries in each category:\n",
      "[+] Training:\n",
      " [ 7762. 10890. 12239.  1109.]\n",
      "[+] Validation:\n",
      " [1882. 2734. 3060.  324.]\n",
      "[i] Loading GloVe from: dataset/glove/glove.twitter.27B.50d.txt ...Done.\n",
      "[+] Proceeding with Embedding Matrix...Completed!\n",
      "Finished running setup.\n"
     ]
    }
   ],
   "source": [
    "%run Setup.ipynb"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Load embedding matrix into an `Embedding` layer. Toggle `trainable=False` to prevent the weights from being updated during training."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "embedding_layer = Embedding(len(word_index) + 1,\n",
    "                            EMBEDDING_DIM,\n",
    "                            weights=[embedding_matrix],\n",
    "                            input_length=MAX_SEQUENCE_LENGTH,\n",
    "                            trainable=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### LSTM + Deep CNN Structure\n",
    "[Reference](https://github.com/richliao/textClassifier), [LTSM](http://colah.github.io/posts/2015-08-Understanding-LSTMs/), [CNN Source](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html) and [Notes](http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/)\n",
    "\n",
    "Deeper CNN as described in [CNN for Sentence Classification](http://www.aclweb.org/anthology/D14-1181) (Yoon Kim, 2014), multiple filters have been applied. This can be implemented using Keras `Merge` Layer."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n",
    "embedded_sequences = embedding_layer(sequence_input)\n",
    "\n",
    "l_lstm1 = Bidirectional(LSTM(4,dropout=0.3,recurrent_dropout=0.3,return_sequences=True))(embedded_sequences)\n",
    "\n",
    "convs, filter_sizes = [], [3,4,5]\n",
    "for fsz in filter_sizes:\n",
    "    l_conv = Conv1D(filters=32,kernel_size=fsz,\n",
    "                    activation='relu',kernel_regularizer=regularizers.l2(0.01))(l_lstm1)\n",
    "    convs.append(l_conv)\n",
    "\n",
    "l_merge = Concatenate(axis=1)(convs)\n",
    "l_pool1 = MaxPooling1D(2)(l_merge)\n",
    "l_drop1 = Dropout(0.4)(l_pool1)\n",
    "l_flat = Flatten()(l_drop1)\n",
    "l_dense = Dense(16, activation='relu')(l_flat)\n",
    "\n",
    "preds = Dense(4, activation='softmax')(l_dense)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_lr_metric(optimizer):\n",
    "    def lr(y_true, y_pred):\n",
    "        return optimizer.lr\n",
    "    return lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "model = Model(sequence_input, preds)\n",
    "adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)\n",
    "lr_metric = get_lr_metric(adadelta)\n",
    "model.compile(loss='categorical_crossentropy',\n",
    "              optimizer=adadelta,\n",
    "              metrics=['acc', lr_metric])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def step_cyclic(epoch):\n",
    "    try:\n",
    "        l_r, decay = 1.0, 0.0001\n",
    "        if epoch%33==0:multiplier = 10\n",
    "        else:multiplier = 1\n",
    "        rate = float(multiplier * l_r * 1/(1 + decay * epoch))\n",
    "        #print(\"Epoch\",epoch+1,\"- learning_rate\",rate)\n",
    "        return rate\n",
    "    except Exception as e:\n",
    "        print(\"Error in lr_schedule:\",str(e))\n",
    "        return float(1.0)\n",
    "    \n",
    "def initial_boost(epoch):\n",
    "    if epoch==0: return float(6.0)\n",
    "    else: return float(1.0)\n",
    "        \n",
    "tensorboard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=4, batch_size=16, write_grads=True , write_graph=True)\n",
    "model_checkpoints = callbacks.ModelCheckpoint(\"checkpoint.h5\", monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
    "lr_schedule = callbacks.LearningRateScheduler(step_cyclic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_3 (InputLayer)            (None, 30)           0                                            \n",
      "__________________________________________________________________________________________________\n",
      "embedding_1 (Embedding)         (None, 30, 50)       1534650     input_3[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_3 (Bidirectional) (None, 30, 8)        1760        embedding_1[2][0]                \n",
      "__________________________________________________________________________________________________\n",
      "conv1d_7 (Conv1D)               (None, 28, 32)       800         bidirectional_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv1d_8 (Conv1D)               (None, 27, 32)       1056        bidirectional_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv1d_9 (Conv1D)               (None, 26, 32)       1312        bidirectional_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_3 (Concatenate)     (None, 81, 32)       0           conv1d_7[0][0]                   \n",
      "                                                                 conv1d_8[0][0]                   \n",
      "                                                                 conv1d_9[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling1d_3 (MaxPooling1D)  (None, 40, 32)       0           concatenate_3[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "dropout_3 (Dropout)             (None, 40, 32)       0           max_pooling1d_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "flatten_1 (Flatten)             (None, 1280)         0           dropout_3[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "dense_4 (Dense)                 (None, 16)           20496       flatten_1[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "dense_5 (Dense)                 (None, 4)            68          dense_4[0][0]                    \n",
      "==================================================================================================\n",
      "Total params: 1,560,142\n",
      "Trainable params: 25,492\n",
      "Non-trainable params: 1,534,650\n",
      "__________________________________________________________________________________________________\n",
      "Training Progress:\n",
      "Train on 32000 samples, validate on 8000 samples\n",
      "Epoch 1/200\n",
      "32000/32000 [==============================] - 25s 790us/step - loss: 1.1756 - acc: 0.4495 - lr: 10.0000 - val_loss: 1.1393 - val_acc: 0.5085 - val_lr: 10.0000\n",
      "Epoch 2/200\n",
      "32000/32000 [==============================] - 20s 619us/step - loss: 1.0939 - acc: 0.5132 - lr: 0.9999 - val_loss: 1.0762 - val_acc: 0.5271 - val_lr: 0.9999\n",
      "Epoch 3/200\n",
      "32000/32000 [==============================] - 21s 655us/step - loss: 1.0820 - acc: 0.5202 - lr: 0.9998 - val_loss: 1.0660 - val_acc: 0.5351 - val_lr: 0.9998\n",
      "Epoch 4/200\n",
      "32000/32000 [==============================] - 21s 643us/step - loss: 1.0738 - acc: 0.5223 - lr: 0.9997 - val_loss: 1.0598 - val_acc: 0.5419 - val_lr: 0.9997\n",
      "Epoch 5/200\n",
      "32000/32000 [==============================] - 20s 639us/step - loss: 1.0692 - acc: 0.5298 - lr: 0.9996 - val_loss: 1.0518 - val_acc: 0.5474 - val_lr: 0.9996\n",
      "Epoch 6/200\n",
      "32000/32000 [==============================] - 20s 640us/step - loss: 1.0647 - acc: 0.5315 - lr: 0.9995 - val_loss: 1.0485 - val_acc: 0.5499 - val_lr: 0.9995\n",
      "Epoch 7/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0595 - acc: 0.5347 - lr: 0.9994 - val_loss: 1.0456 - val_acc: 0.5533 - val_lr: 0.9994\n",
      "Epoch 8/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0547 - acc: 0.5408 - lr: 0.9993 - val_loss: 1.0386 - val_acc: 0.5541 - val_lr: 0.9993\n",
      "Epoch 9/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0515 - acc: 0.5409 - lr: 0.9992 - val_loss: 1.0353 - val_acc: 0.5539 - val_lr: 0.9992\n",
      "Epoch 10/200\n",
      "32000/32000 [==============================] - 20s 636us/step - loss: 1.0507 - acc: 0.5391 - lr: 0.9991 - val_loss: 1.0348 - val_acc: 0.5565 - val_lr: 0.9991\n",
      "Epoch 11/200\n",
      "32000/32000 [==============================] - 20s 624us/step - loss: 1.0475 - acc: 0.5427 - lr: 0.9990 - val_loss: 1.0316 - val_acc: 0.5556 - val_lr: 0.9990\n",
      "Epoch 12/200\n",
      "32000/32000 [==============================] - 20s 632us/step - loss: 1.0472 - acc: 0.5434 - lr: 0.9989 - val_loss: 1.0328 - val_acc: 0.5566 - val_lr: 0.9989\n",
      "Epoch 13/200\n",
      "32000/32000 [==============================] - 20s 627us/step - loss: 1.0411 - acc: 0.5450 - lr: 0.9988 - val_loss: 1.0269 - val_acc: 0.5619 - val_lr: 0.9988\n",
      "Epoch 14/200\n",
      "32000/32000 [==============================] - 21s 669us/step - loss: 1.0420 - acc: 0.5444 - lr: 0.9987 - val_loss: 1.0247 - val_acc: 0.5633 - val_lr: 0.9987\n",
      "Epoch 15/200\n",
      "32000/32000 [==============================] - 21s 646us/step - loss: 1.0409 - acc: 0.5488 - lr: 0.9986 - val_loss: 1.0279 - val_acc: 0.5621 - val_lr: 0.9986\n",
      "Epoch 16/200\n",
      "32000/32000 [==============================] - 21s 648us/step - loss: 1.0410 - acc: 0.5458 - lr: 0.9985 - val_loss: 1.0341 - val_acc: 0.5546 - val_lr: 0.9985\n",
      "Epoch 17/200\n",
      "32000/32000 [==============================] - 21s 642us/step - loss: 1.0370 - acc: 0.5470 - lr: 0.9984 - val_loss: 1.0247 - val_acc: 0.5595 - val_lr: 0.9984\n",
      "Epoch 18/200\n",
      "32000/32000 [==============================] - 20s 627us/step - loss: 1.0376 - acc: 0.5477 - lr: 0.9983 - val_loss: 1.0244 - val_acc: 0.5594 - val_lr: 0.9983\n",
      "Epoch 19/200\n",
      "32000/32000 [==============================] - 20s 622us/step - loss: 1.0354 - acc: 0.5512 - lr: 0.9982 - val_loss: 1.0196 - val_acc: 0.5654 - val_lr: 0.9982\n",
      "Epoch 20/200\n",
      "32000/32000 [==============================] - 20s 624us/step - loss: 1.0358 - acc: 0.5491 - lr: 0.9981 - val_loss: 1.0210 - val_acc: 0.5666 - val_lr: 0.9981\n",
      "Epoch 21/200\n",
      "32000/32000 [==============================] - 20s 623us/step - loss: 1.0343 - acc: 0.5485 - lr: 0.9980 - val_loss: 1.0210 - val_acc: 0.5648 - val_lr: 0.9980\n",
      "Epoch 22/200\n",
      "32000/32000 [==============================] - 20s 632us/step - loss: 1.0331 - acc: 0.5499 - lr: 0.9979 - val_loss: 1.0175 - val_acc: 0.5686 - val_lr: 0.9979\n",
      "Epoch 23/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0336 - acc: 0.5523 - lr: 0.9978 - val_loss: 1.0183 - val_acc: 0.5661 - val_lr: 0.9978\n",
      "Epoch 24/200\n",
      "32000/32000 [==============================] - 20s 637us/step - loss: 1.0314 - acc: 0.5511 - lr: 0.9977 - val_loss: 1.0186 - val_acc: 0.5675 - val_lr: 0.9977\n",
      "Epoch 25/200\n",
      "32000/32000 [==============================] - 21s 649us/step - loss: 1.0308 - acc: 0.5528 - lr: 0.9976 - val_loss: 1.0198 - val_acc: 0.5685 - val_lr: 0.9976\n",
      "Epoch 26/200\n",
      "32000/32000 [==============================] - 20s 640us/step - loss: 1.0329 - acc: 0.5527 - lr: 0.9975 - val_loss: 1.0260 - val_acc: 0.5650 - val_lr: 0.9975\n",
      "Epoch 27/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0323 - acc: 0.5510 - lr: 0.9974 - val_loss: 1.0199 - val_acc: 0.5663 - val_lr: 0.9974\n",
      "Epoch 28/200\n",
      "32000/32000 [==============================] - 20s 640us/step - loss: 1.0291 - acc: 0.5526 - lr: 0.9973 - val_loss: 1.0170 - val_acc: 0.5699 - val_lr: 0.9973\n",
      "Epoch 29/200\n",
      "32000/32000 [==============================] - 21s 656us/step - loss: 1.0293 - acc: 0.5526 - lr: 0.9972 - val_loss: 1.0189 - val_acc: 0.5685 - val_lr: 0.9972\n",
      "Epoch 30/200\n",
      "32000/32000 [==============================] - 21s 655us/step - loss: 1.0280 - acc: 0.5537 - lr: 0.9971 - val_loss: 1.0145 - val_acc: 0.5706 - val_lr: 0.9971\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 31/200\n",
      "32000/32000 [==============================] - 21s 658us/step - loss: 1.0281 - acc: 0.5549 - lr: 0.9970 - val_loss: 1.0134 - val_acc: 0.5695 - val_lr: 0.9970\n",
      "Epoch 32/200\n",
      "32000/32000 [==============================] - 21s 664us/step - loss: 1.0265 - acc: 0.5598 - lr: 0.9969 - val_loss: 1.0163 - val_acc: 0.5686 - val_lr: 0.9969\n",
      "Epoch 33/200\n",
      "32000/32000 [==============================] - 21s 660us/step - loss: 1.0254 - acc: 0.5564 - lr: 0.9968 - val_loss: 1.0136 - val_acc: 0.5741 - val_lr: 0.9968\n",
      "Epoch 34/200\n",
      "32000/32000 [==============================] - 21s 645us/step - loss: 1.0778 - acc: 0.5305 - lr: 9.9671 - val_loss: 1.0495 - val_acc: 0.5595 - val_lr: 9.9671\n",
      "Epoch 35/200\n",
      "32000/32000 [==============================] - 20s 636us/step - loss: 1.0458 - acc: 0.5498 - lr: 0.9966 - val_loss: 1.0232 - val_acc: 0.5698 - val_lr: 0.9966\n",
      "Epoch 36/200\n",
      "32000/32000 [==============================] - 20s 630us/step - loss: 1.0395 - acc: 0.5519 - lr: 0.9965 - val_loss: 1.0207 - val_acc: 0.5725 - val_lr: 0.9965\n",
      "Epoch 37/200\n",
      "32000/32000 [==============================] - 20s 634us/step - loss: 1.0360 - acc: 0.5554 - lr: 0.9964 - val_loss: 1.0227 - val_acc: 0.5693 - val_lr: 0.9964\n",
      "Epoch 38/200\n",
      "32000/32000 [==============================] - 21s 646us/step - loss: 1.0322 - acc: 0.5543 - lr: 0.9963 - val_loss: 1.0157 - val_acc: 0.5723 - val_lr: 0.9963\n",
      "Epoch 39/200\n",
      "32000/32000 [==============================] - 20s 636us/step - loss: 1.0326 - acc: 0.5559 - lr: 0.9962 - val_loss: 1.0143 - val_acc: 0.5726 - val_lr: 0.9962\n",
      "Epoch 40/200\n",
      "32000/32000 [==============================] - 20s 631us/step - loss: 1.0291 - acc: 0.5564 - lr: 0.9961 - val_loss: 1.0150 - val_acc: 0.5716 - val_lr: 0.9961\n",
      "Epoch 41/200\n",
      "32000/32000 [==============================] - 20s 631us/step - loss: 1.0296 - acc: 0.5552 - lr: 0.9960 - val_loss: 1.0130 - val_acc: 0.5749 - val_lr: 0.9960\n",
      "Epoch 42/200\n",
      "32000/32000 [==============================] - 20s 626us/step - loss: 1.0276 - acc: 0.5573 - lr: 0.9959 - val_loss: 1.0114 - val_acc: 0.5750 - val_lr: 0.9959\n",
      "Epoch 43/200\n",
      "32000/32000 [==============================] - 20s 626us/step - loss: 1.0262 - acc: 0.5570 - lr: 0.9958 - val_loss: 1.0169 - val_acc: 0.5730 - val_lr: 0.9958\n",
      "Epoch 44/200\n",
      "32000/32000 [==============================] - 20s 623us/step - loss: 1.0252 - acc: 0.5590 - lr: 0.9957 - val_loss: 1.0105 - val_acc: 0.5789 - val_lr: 0.9957\n",
      "Epoch 45/200\n",
      "32000/32000 [==============================] - 20s 627us/step - loss: 1.0247 - acc: 0.5584 - lr: 0.9956 - val_loss: 1.0163 - val_acc: 0.5688 - val_lr: 0.9956\n",
      "Epoch 46/200\n",
      "32000/32000 [==============================] - 20s 639us/step - loss: 1.0236 - acc: 0.5582 - lr: 0.9955 - val_loss: 1.0119 - val_acc: 0.5726 - val_lr: 0.9955\n",
      "Epoch 47/200\n",
      "32000/32000 [==============================] - 21s 657us/step - loss: 1.0241 - acc: 0.5579 - lr: 0.9954 - val_loss: 1.0108 - val_acc: 0.5736 - val_lr: 0.9954\n",
      "Epoch 48/200\n",
      "32000/32000 [==============================] - 20s 635us/step - loss: 1.0226 - acc: 0.5599 - lr: 0.9953 - val_loss: 1.0119 - val_acc: 0.5764 - val_lr: 0.9953\n",
      "Epoch 49/200\n",
      "32000/32000 [==============================] - 21s 666us/step - loss: 1.0229 - acc: 0.5584 - lr: 0.9952 - val_loss: 1.0091 - val_acc: 0.5769 - val_lr: 0.9952\n",
      "Epoch 50/200\n",
      "32000/32000 [==============================] - 21s 668us/step - loss: 1.0254 - acc: 0.5573 - lr: 0.9951 - val_loss: 1.0127 - val_acc: 0.5746 - val_lr: 0.9951\n",
      "Epoch 51/200\n",
      "32000/32000 [==============================] - 21s 652us/step - loss: 1.0250 - acc: 0.5548 - lr: 0.9950 - val_loss: 1.0100 - val_acc: 0.5799 - val_lr: 0.9950\n",
      "Epoch 52/200\n",
      "32000/32000 [==============================] - 21s 648us/step - loss: 1.0249 - acc: 0.5574 - lr: 0.9949 - val_loss: 1.0079 - val_acc: 0.5789 - val_lr: 0.9949\n",
      "Epoch 53/200\n",
      "32000/32000 [==============================] - 20s 638us/step - loss: 1.0236 - acc: 0.5586 - lr: 0.9948 - val_loss: 1.0123 - val_acc: 0.5739 - val_lr: 0.9948\n",
      "Epoch 54/200\n",
      "32000/32000 [==============================] - 21s 657us/step - loss: 1.0207 - acc: 0.5588 - lr: 0.9947 - val_loss: 1.0112 - val_acc: 0.5795 - val_lr: 0.9947\n",
      "Epoch 55/200\n",
      "32000/32000 [==============================] - 21s 657us/step - loss: 1.0217 - acc: 0.5594 - lr: 0.9946 - val_loss: 1.0088 - val_acc: 0.5790 - val_lr: 0.9946\n",
      "Epoch 56/200\n",
      "32000/32000 [==============================] - 21s 651us/step - loss: 1.0218 - acc: 0.5596 - lr: 0.9945 - val_loss: 1.0084 - val_acc: 0.5753 - val_lr: 0.9945\n",
      "Epoch 57/200\n",
      "32000/32000 [==============================] - 21s 656us/step - loss: 1.0228 - acc: 0.5593 - lr: 0.9944 - val_loss: 1.0130 - val_acc: 0.5714 - val_lr: 0.9944\n",
      "Epoch 58/200\n",
      "32000/32000 [==============================] - 21s 669us/step - loss: 1.0221 - acc: 0.5601 - lr: 0.9943 - val_loss: 1.0118 - val_acc: 0.5705 - val_lr: 0.9943\n",
      "Epoch 59/200\n",
      "32000/32000 [==============================] - 21s 656us/step - loss: 1.0233 - acc: 0.5586 - lr: 0.9942 - val_loss: 1.0098 - val_acc: 0.5750 - val_lr: 0.9942\n",
      "Epoch 60/200\n",
      "32000/32000 [==============================] - 21s 648us/step - loss: 1.0219 - acc: 0.5586 - lr: 0.9941 - val_loss: 1.0092 - val_acc: 0.5754 - val_lr: 0.9941\n",
      "Epoch 61/200\n",
      "32000/32000 [==============================] - 21s 653us/step - loss: 1.0217 - acc: 0.5580 - lr: 0.9940 - val_loss: 1.0089 - val_acc: 0.5758 - val_lr: 0.9940\n",
      "Epoch 62/200\n",
      "32000/32000 [==============================] - 21s 662us/step - loss: 1.0200 - acc: 0.5619 - lr: 0.9939 - val_loss: 1.0069 - val_acc: 0.5783 - val_lr: 0.9939\n",
      "Epoch 63/200\n",
      "32000/32000 [==============================] - 21s 655us/step - loss: 1.0214 - acc: 0.5593 - lr: 0.9938 - val_loss: 1.0081 - val_acc: 0.5769 - val_lr: 0.9938\n",
      "Epoch 64/200\n",
      "32000/32000 [==============================] - 21s 663us/step - loss: 1.0217 - acc: 0.5602 - lr: 0.9937 - val_loss: 1.0087 - val_acc: 0.5740 - val_lr: 0.9937\n",
      "Epoch 65/200\n",
      "32000/32000 [==============================] - 21s 658us/step - loss: 1.0196 - acc: 0.5608 - lr: 0.9936 - val_loss: 1.0046 - val_acc: 0.5781 - val_lr: 0.9936\n",
      "Epoch 66/200\n",
      "32000/32000 [==============================] - 22s 674us/step - loss: 1.0194 - acc: 0.5598 - lr: 0.9935 - val_loss: 1.0054 - val_acc: 0.5793 - val_lr: 0.9935\n",
      "Epoch 67/200\n",
      "32000/32000 [==============================] - 21s 657us/step - loss: 1.0671 - acc: 0.5373 - lr: 9.9344 - val_loss: 1.0351 - val_acc: 0.5654 - val_lr: 9.9344\n",
      "Epoch 68/200\n",
      "32000/32000 [==============================] - 21s 651us/step - loss: 1.0369 - acc: 0.5533 - lr: 0.9933 - val_loss: 1.0164 - val_acc: 0.5778 - val_lr: 0.9933\n",
      "Epoch 69/200\n",
      "32000/32000 [==============================] - 21s 654us/step - loss: 1.0317 - acc: 0.5551 - lr: 0.9932 - val_loss: 1.0141 - val_acc: 0.5779 - val_lr: 0.9932\n",
      "Epoch 70/200\n",
      "32000/32000 [==============================] - 21s 659us/step - loss: 1.0273 - acc: 0.5557 - lr: 0.9931 - val_loss: 1.0100 - val_acc: 0.5783 - val_lr: 0.9931\n",
      "Epoch 71/200\n",
      "32000/32000 [==============================] - 21s 662us/step - loss: 1.0271 - acc: 0.5558 - lr: 0.9930 - val_loss: 1.0132 - val_acc: 0.5754 - val_lr: 0.9930\n",
      "Epoch 72/200\n",
      "32000/32000 [==============================] - 21s 671us/step - loss: 1.0241 - acc: 0.5617 - lr: 0.9930 - val_loss: 1.0095 - val_acc: 0.5774 - val_lr: 0.9930\n",
      "Epoch 73/200\n",
      "32000/32000 [==============================] - 21s 654us/step - loss: 1.0245 - acc: 0.5576 - lr: 0.9929 - val_loss: 1.0108 - val_acc: 0.5756 - val_lr: 0.9929\n",
      "Epoch 74/200\n",
      "32000/32000 [==============================] - 20s 636us/step - loss: 1.0235 - acc: 0.5588 - lr: 0.9928 - val_loss: 1.0106 - val_acc: 0.5785 - val_lr: 0.9928\n",
      "Epoch 75/200\n",
      "32000/32000 [==============================] - 20s 631us/step - loss: 1.0229 - acc: 0.5600 - lr: 0.9927 - val_loss: 1.0102 - val_acc: 0.5788 - val_lr: 0.9927\n",
      "Epoch 76/200\n",
      "32000/32000 [==============================] - 20s 623us/step - loss: 1.0217 - acc: 0.5599 - lr: 0.9926 - val_loss: 1.0112 - val_acc: 0.5758 - val_lr: 0.9926\n",
      "Epoch 77/200\n",
      "32000/32000 [==============================] - 20s 630us/step - loss: 1.0195 - acc: 0.5625 - lr: 0.9925 - val_loss: 1.0071 - val_acc: 0.5795 - val_lr: 0.9925\n",
      "Epoch 78/200\n",
      "32000/32000 [==============================] - 20s 630us/step - loss: 1.0218 - acc: 0.5588 - lr: 0.9924 - val_loss: 1.0100 - val_acc: 0.5763 - val_lr: 0.9924\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 79/200\n",
      "32000/32000 [==============================] - 20s 621us/step - loss: 1.0198 - acc: 0.5592 - lr: 0.9923 - val_loss: 1.0070 - val_acc: 0.5808 - val_lr: 0.9923\n",
      "Epoch 80/200\n",
      "32000/32000 [==============================] - 21s 655us/step - loss: 1.0182 - acc: 0.5602 - lr: 0.9922 - val_loss: 1.0069 - val_acc: 0.5801 - val_lr: 0.9922\n",
      "Epoch 81/200\n",
      "32000/32000 [==============================] - 21s 643us/step - loss: 1.0193 - acc: 0.5612 - lr: 0.9921 - val_loss: 1.0087 - val_acc: 0.5791 - val_lr: 0.9921\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-13-b684b2d14854>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      3\u001b[0m model.fit(x_train, y_train, validation_data=(x_val, y_val),\n\u001b[1;32m      4\u001b[0m           \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m200\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m50\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m           callbacks=[tensorboard, model_checkpoints, lr_schedule])\n\u001b[0m",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m   1710\u001b[0m                               \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1711\u001b[0m                               \u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1712\u001b[0;31m                               validation_steps=validation_steps)\n\u001b[0m\u001b[1;32m   1713\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1714\u001b[0m     def evaluate(self, x=None, y=None,\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_fit_loop\u001b[0;34m(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)\u001b[0m\n\u001b[1;32m   1253\u001b[0m                             \u001b[0;32mfor\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mo\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_labels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval_outs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1254\u001b[0m                                 \u001b[0mepoch_logs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'val_'\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mo\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1255\u001b[0;31m             \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_epoch_end\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch_logs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1256\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mcallback_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstop_training\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1257\u001b[0m                 \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/callbacks.py\u001b[0m in \u001b[0;36mon_epoch_end\u001b[0;34m(self, epoch, logs)\u001b[0m\n\u001b[1;32m     75\u001b[0m         \u001b[0mlogs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlogs\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mcallback\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcallbacks\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 77\u001b[0;31m             \u001b[0mcallback\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_epoch_end\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlogs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     78\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     79\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mon_batch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlogs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/callbacks.py\u001b[0m in \u001b[0;36mon_epoch_end\u001b[0;34m(self, epoch, logs)\u001b[0m\n\u001b[1;32m    846\u001b[0m                     \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmerged\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    847\u001b[0m                     \u001b[0msummary_str\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 848\u001b[0;31m                     \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_summary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary_str\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    849\u001b[0m                     \u001b[0mi\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    850\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/summary/writer/writer.py\u001b[0m in \u001b[0;36madd_summary\u001b[0;34m(self, summary, global_step)\u001b[0m\n\u001b[1;32m    116\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbytes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    117\u001b[0m       \u001b[0msumm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msummary_pb2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m       \u001b[0msumm\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mParseFromString\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    119\u001b[0m       \u001b[0msummary\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msumm\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    120\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/message.py\u001b[0m in \u001b[0;36mParseFromString\u001b[0;34m(self, serialized)\u001b[0m\n\u001b[1;32m    183\u001b[0m     \"\"\"\n\u001b[1;32m    184\u001b[0m     \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mClear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 185\u001b[0;31m     \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMergeFromString\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mserialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    186\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    187\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0mSerializeToString\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/python_message.py\u001b[0m in \u001b[0;36mMergeFromString\u001b[0;34m(self, serialized)\u001b[0m\n\u001b[1;32m   1081\u001b[0m     \u001b[0mlength\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mserialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1082\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1083\u001b[0;31m       \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_InternalParse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mserialized\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlength\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mlength\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1084\u001b[0m         \u001b[0;31m# The only reason _InternalParse would return early is if it\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1085\u001b[0m         \u001b[0;31m# encountered an end-group tag.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/python_message.py\u001b[0m in \u001b[0;36mInternalParse\u001b[0;34m(self, buffer, pos, end)\u001b[0m\n\u001b[1;32m   1118\u001b[0m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1119\u001b[0m       \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1120\u001b[0;31m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfield_decoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfield_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1121\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfield_desc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1122\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_UpdateOneofState\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfield_desc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/decoder.py\u001b[0m in \u001b[0;36mDecodeRepeatedField\u001b[0;34m(buffer, pos, end, message, field_dict)\u001b[0m\n\u001b[1;32m    610\u001b[0m           \u001b[0;32mraise\u001b[0m \u001b[0m_DecodeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Truncated message.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    611\u001b[0m         \u001b[0;31m# Read sub-message.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 612\u001b[0;31m         \u001b[0;32mif\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_InternalParse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    613\u001b[0m           \u001b[0;31m# The only reason _InternalParse would return early is if it\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    614\u001b[0m           \u001b[0;31m# encountered an end-group tag.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/python_message.py\u001b[0m in \u001b[0;36mInternalParse\u001b[0;34m(self, buffer, pos, end)\u001b[0m\n\u001b[1;32m   1118\u001b[0m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1119\u001b[0m       \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1120\u001b[0;31m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfield_decoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfield_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1121\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfield_desc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1122\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_UpdateOneofState\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfield_desc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/decoder.py\u001b[0m in \u001b[0;36mDecodeField\u001b[0;34m(buffer, pos, end, message, field_dict)\u001b[0m\n\u001b[1;32m    631\u001b[0m         \u001b[0;32mraise\u001b[0m \u001b[0m_DecodeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Truncated message.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    632\u001b[0m       \u001b[0;31m# Read sub-message.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 633\u001b[0;31m       \u001b[0;32mif\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_InternalParse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    634\u001b[0m         \u001b[0;31m# The only reason _InternalParse would return early is if it encountered\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    635\u001b[0m         \u001b[0;31m# an end-group tag.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/python_message.py\u001b[0m in \u001b[0;36mInternalParse\u001b[0;34m(self, buffer, pos, end)\u001b[0m\n\u001b[1;32m   1118\u001b[0m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1119\u001b[0m       \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1120\u001b[0;31m         \u001b[0mpos\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfield_decoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_pos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfield_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1121\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfield_desc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1122\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_UpdateOneofState\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfield_desc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/protobuf/internal/decoder.py\u001b[0m in \u001b[0;36mDecodePackedField\u001b[0;34m(buffer, pos, end, message, field_dict)\u001b[0m\n\u001b[1;32m    209\u001b[0m           \u001b[0;32mraise\u001b[0m \u001b[0m_DecodeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Truncated message.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    210\u001b[0m         \u001b[0;32mwhile\u001b[0m \u001b[0mpos\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mendpoint\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 211\u001b[0;31m           \u001b[0;34m(\u001b[0m\u001b[0melement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdecode_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    212\u001b[0m           \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0melement\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    213\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mpos\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0mendpoint\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "model.summary()\n",
    "print(\"Training Progress:\")\n",
    "model.fit(x_train, y_train, validation_data=(x_val, y_val),\n",
    "          epochs=200, batch_size=50,\n",
    "          callbacks=[tensorboard, model_checkpoints, lr_schedule])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save('ltsm-c.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
