{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Feature: Out-Of-Fold Predictions from a CNN (+Magic Inputs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In addition to the convolutional architecture, we'll append some of the leaky features to the intermediate feature layer."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<img src=\"assets/cnn-with-magic.png\" alt=\"Network Architecture\" style=\"height: 1200px;\" />"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This utility package imports `numpy`, `pandas`, `matplotlib` and a helper `kg` module into the root namespace."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from pygoose import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import gc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from keras import backend as K\n",
    "from keras.models import Model, Sequential\n",
    "from keras.layers import *\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Config"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Automatically discover the paths to various data folders and compose the project structure."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "project = kg.Project.discover()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Identifier for storing these features on disk and referring to them later."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "feature_list_id = 'oofp_nn_cnn_with_magic'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Make subsequent NN runs reproducible."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "RANDOM_SEED = 42"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "np.random.seed(RANDOM_SEED)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Read data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Word embedding lookup matrix."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "embedding_matrix = kg.io.load(project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Padded sequences of word indices for every question."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')\n",
    "X_train_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_test_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')\n",
    "X_test_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_train = kg.io.load(project.features_dir + 'y_train.pickle')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Magic features."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "magic_feature_lists = [\n",
    "    'magic_frequencies',\n",
    "    'magic_cooccurrence_matrix',\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_magic, X_test_magic, _ = project.load_feature_lists(magic_feature_lists)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_magic = X_train_magic.values\n",
    "X_test_magic = X_test_magic.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "scaler = StandardScaler()\n",
    "scaler.fit(np.vstack([X_train_magic, X_test_magic]))\n",
    "X_train_magic = scaler.transform(X_train_magic)\n",
    "X_test_magic = scaler.transform(X_test_magic)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Word embedding properties."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "EMBEDDING_DIM = embedding_matrix.shape[-1]\n",
    "VOCAB_LENGTH = embedding_matrix.shape[0]\n",
    "MAX_SEQUENCE_LENGTH = X_train_q1.shape[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "300 101564 30\n"
     ]
    }
   ],
   "source": [
    "print(EMBEDDING_DIM, VOCAB_LENGTH, MAX_SEQUENCE_LENGTH)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "init_weights = initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=2)\n",
    "init_bias = 'zeros'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_embedding_block():\n",
    "    input_seq = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')\n",
    "    \n",
    "    embedding_seq = Embedding(\n",
    "        VOCAB_LENGTH,\n",
    "        EMBEDDING_DIM,\n",
    "        weights=[embedding_matrix],\n",
    "        input_length=MAX_SEQUENCE_LENGTH,\n",
    "        trainable=False,\n",
    "    )(input_seq)\n",
    "    \n",
    "    output_seq = embedding_seq\n",
    "    return input_seq, output_seq"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_model_question_conv_branch(input_seq, params):\n",
    "    conv_1 = Conv1D(\n",
    "        params['num_conv_filters'],\n",
    "        kernel_size=params['conv_kernel_size'],\n",
    "        padding='same',\n",
    "    )(input_seq)\n",
    "    \n",
    "    bn_1 = BatchNormalization()(conv_1)\n",
    "    relu_1 = Activation('relu')(bn_1)\n",
    "    dropout_1 = Dropout(params['conv_dropout_rate'])(relu_1)\n",
    "\n",
    "    conv_2 = Conv1D(\n",
    "        params['num_conv_filters'],\n",
    "        kernel_size=params['conv_kernel_size'],\n",
    "        padding='same',\n",
    "    )(dropout_1)\n",
    "    \n",
    "    bn_2 = BatchNormalization()(conv_2)\n",
    "    relu_2 = Activation('relu')(bn_2)\n",
    "    dropout_2 = Dropout(params['conv_dropout_rate'])(relu_2)\n",
    "    \n",
    "    flatten = Flatten()(dropout_2)\n",
    "    output = flatten\n",
    "    \n",
    "    return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_model_question_timedist_max_branch(input_seq, params):\n",
    "    timedist = TimeDistributed(Dense(EMBEDDING_DIM))(input_seq)\n",
    "    bn = BatchNormalization()(timedist)\n",
    "    relu = Activation('relu')(bn)\n",
    "    dropout = Dropout(params['timedist_dropout_rate'])(relu)\n",
    "\n",
    "    lambda_max = Lambda(\n",
    "        lambda x: K.max(x, axis=1),\n",
    "        output_shape=(EMBEDDING_DIM, )\n",
    "    )(dropout)\n",
    "    \n",
    "    output = lambda_max\n",
    "    return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_dense_block(input_layer, num_units, dropout_rate):\n",
    "    dense = Dense(\n",
    "        num_units,\n",
    "        kernel_initializer=init_weights,\n",
    "        bias_initializer=init_bias,\n",
    "    )(input_layer)\n",
    "    bn = BatchNormalization()(dense)\n",
    "    relu = Activation('relu')(bn)\n",
    "    dropout = Dropout(dropout_rate)(relu)\n",
    "    output = dropout\n",
    "    \n",
    "    return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_model(params):\n",
    "    input_q1, emb_q1 = create_embedding_block()\n",
    "    input_q2, emb_q2 = create_embedding_block()\n",
    "    \n",
    "    # Feature extractors.\n",
    "    conv_q1_output = create_model_question_conv_branch(emb_q1, params)\n",
    "    conv_q2_output = create_model_question_conv_branch(emb_q2, params)\n",
    "    \n",
    "    timedist_q1_output = create_model_question_timedist_max_branch(emb_q1, params)\n",
    "    timedist_q2_output = create_model_question_timedist_max_branch(emb_q2, params)\n",
    "    \n",
    "    # Mid-level transforms.\n",
    "    conv_merged = concatenate([conv_q1_output, conv_q2_output])\n",
    "    conv_dense_1 = create_dense_block(conv_merged, params['num_dense_1'], params['dense_dropout_rate'])\n",
    "    conv_dense_2 = create_dense_block(conv_dense_1, params['num_dense_2'], params['dense_dropout_rate'])\n",
    "\n",
    "    td_merged = concatenate([timedist_q1_output, timedist_q2_output])\n",
    "    td_dense_1 = create_dense_block(td_merged, params['num_dense_1'], params['dense_dropout_rate'])\n",
    "    td_dense_2 = create_dense_block(td_dense_1, params['num_dense_2'], params['dense_dropout_rate'])\n",
    "\n",
    "    # Magic features.\n",
    "    magic_input = Input(shape=(X_train_magic.shape[-1], ))\n",
    "    \n",
    "    # Main dense block.\n",
    "    merged_main = concatenate([conv_dense_2, td_dense_2, magic_input])\n",
    "    dense_main_1 = create_dense_block(merged_main, params['num_dense_1'], params['dense_dropout_rate'])\n",
    "    dense_main_2 = create_dense_block(dense_main_1, params['num_dense_2'], params['dense_dropout_rate'])\n",
    "    dense_main_3 = create_dense_block(dense_main_2, params['num_dense_3'], params['dense_dropout_rate'])\n",
    "    \n",
    "    output = Dense(\n",
    "        1,\n",
    "        kernel_initializer=init_weights,\n",
    "        bias_initializer=init_bias,\n",
    "        activation='sigmoid',\n",
    "    )(dense_main_3)\n",
    "    \n",
    "    model = Model(\n",
    "        inputs=[input_q1, input_q2, magic_input],\n",
    "        outputs=output,\n",
    "    )\n",
    "    \n",
    "    model.compile(\n",
    "        loss='binary_crossentropy',\n",
    "        optimizer='nadam',\n",
    "        metrics=['accuracy']\n",
    "    )\n",
    "\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def predict(model, X_q1, X_q2, X_magic):\n",
    "    \"\"\"\n",
    "    Mirror the pairs, compute two separate predictions, and average them.\n",
    "    \"\"\"\n",
    "    \n",
    "    y1 = model.predict([X_q1, X_q2, X_magic], batch_size=1024, verbose=1).reshape(-1)   \n",
    "    y2 = model.predict([X_q2, X_q1, X_magic], batch_size=1024, verbose=1).reshape(-1)    \n",
    "    return (y1 + y2) / 2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Partition the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "NUM_FOLDS = 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "kfold = StratifiedKFold(\n",
    "    n_splits=NUM_FOLDS,\n",
    "    shuffle=True,\n",
    "    random_state=RANDOM_SEED\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create placeholders for out-of-fold predictions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_train_oofp = np.zeros_like(y_train, dtype='float64')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_test_oofp = np.zeros((len(X_test_q1), NUM_FOLDS))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define hyperparameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "BATCH_SIZE = 2048"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "MAX_EPOCHS = 200"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model_params = {\n",
    "    'num_conv_filters': 32,\n",
    "    'num_dense_1': 256,\n",
    "    'num_dense_2': 128,\n",
    "    'num_dense_3': 100,\n",
    "    'conv_kernel_size': 3,\n",
    "    'conv_dropout_rate': 0.25,\n",
    "    'timedist_dropout_rate': 0.25,\n",
    "    'dense_dropout_rate': 0.25,\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The path where the best weights of the current model will be saved."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model_checkpoint_path = project.temp_dir + 'fold-checkpoint-' + feature_list_id + '.h5'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Fit the folds and compute out-of-fold predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Fitting fold 1 of 5\n",
      "\n",
      "Train on 646862 samples, validate on 161718 samples\n",
      "Epoch 1/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.3594 - acc: 0.8420- ETA: 8s - loss: 0.3689 - acc: 0 - Epoch 00000: val_loss improved from inf to 0.41719, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 36s - loss: 0.3594 - acc: 0.8420 - val_loss: 0.4172 - val_acc: 0.7607\n",
      "Epoch 2/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.3105 - acc: 0.8605Epoch 00001: val_loss improved from 0.41719 to 0.30802, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.3104 - acc: 0.8605 - val_loss: 0.3080 - val_acc: 0.8545\n",
      "Epoch 3/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2916 - acc: 0.8694- ETA: 3s - loss: 0.2 - ETA: 1s - loss: 0.2918Epoch 00002: val_loss improved from 0.30802 to 0.28898, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2916 - acc: 0.8694 - val_loss: 0.2890 - val_acc: 0.8694\n",
      "Epoch 4/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2787 - acc: 0.8755Epoch 00003: val_loss improved from 0.28898 to 0.28317, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2787 - acc: 0.8755 - val_loss: 0.2832 - val_acc: 0.8730\n",
      "Epoch 5/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2687 - acc: 0.8806Epoch 00004: val_loss improved from 0.28317 to 0.27955, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2687 - acc: 0.8806 - val_loss: 0.2795 - val_acc: 0.8753\n",
      "Epoch 6/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2604 - acc: 0.8848Epoch 00005: val_loss improved from 0.27955 to 0.27764, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2604 - acc: 0.8848 - val_loss: 0.2776 - val_acc: 0.8750\n",
      "Epoch 7/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2533 - acc: 0.8883Epoch 00006: val_loss improved from 0.27764 to 0.27308, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2533 - acc: 0.8883 - val_loss: 0.2731 - val_acc: 0.8780\n",
      "Epoch 8/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2464 - acc: 0.8923Epoch 00007: val_loss improved from 0.27308 to 0.27079, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 34s - loss: 0.2464 - acc: 0.8923 - val_loss: 0.2708 - val_acc: 0.8788\n",
      "Epoch 9/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2410 - acc: 0.8948Epoch 00008: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2410 - acc: 0.8948 - val_loss: 0.2744 - val_acc: 0.8766\n",
      "Epoch 10/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2355 - acc: 0.8969Epoch 00009: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2355 - acc: 0.8968 - val_loss: 0.2711 - val_acc: 0.8791\n",
      "Epoch 11/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2305 - acc: 0.8995Epoch 00010: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2305 - acc: 0.8995 - val_loss: 0.2744 - val_acc: 0.8780\n",
      "Epoch 12/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2258 - acc: 0.9014Epoch 00011: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2258 - acc: 0.9013 - val_loss: 0.2712 - val_acc: 0.8811\n",
      "Epoch 00011: early stopping\n",
      "80859/80859 [==============================] - 1s     \n",
      "2345796/2345796 [==============================] - 39s    \n",
      "2344960/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 2 of 5\n",
      "\n",
      "Train on 646862 samples, validate on 161718 samples\n",
      "Epoch 1/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.3581 - acc: 0.8424Epoch 00000: val_loss improved from inf to 0.34933, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 36s - loss: 0.3580 - acc: 0.8425 - val_loss: 0.3493 - val_acc: 0.8389\n",
      "Epoch 2/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.3105 - acc: 0.8603Epoch 00001: val_loss improved from 0.34933 to 0.30979, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 35s - loss: 0.3105 - acc: 0.8603 - val_loss: 0.3098 - val_acc: 0.8569\n",
      "Epoch 3/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2905 - acc: 0.8699Epoch 00002: val_loss improved from 0.30979 to 0.29465, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 35s - loss: 0.2905 - acc: 0.8699 - val_loss: 0.2947 - val_acc: 0.8666\n",
      "Epoch 4/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2772 - acc: 0.8769Epoch 00003: val_loss improved from 0.29465 to 0.28098, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 35s - loss: 0.2772 - acc: 0.8769 - val_loss: 0.2810 - val_acc: 0.8750\n",
      "Epoch 5/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2670 - acc: 0.8820Epoch 00004: val_loss improved from 0.28098 to 0.28072, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646862/646862 [==============================] - 35s - loss: 0.2670 - acc: 0.8820 - val_loss: 0.2807 - val_acc: 0.8760\n",
      "Epoch 6/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2580 - acc: 0.8861Epoch 00005: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2581 - acc: 0.8861 - val_loss: 0.2819 - val_acc: 0.8753\n",
      "Epoch 7/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2512 - acc: 0.8894Epoch 00006: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2511 - acc: 0.8895 - val_loss: 0.2877 - val_acc: 0.8719\n",
      "Epoch 8/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2453 - acc: 0.8923Epoch 00007: val_loss did not improve\n",
      "646862/646862 [==============================] - 34s - loss: 0.2453 - acc: 0.8923 - val_loss: 0.2858 - val_acc: 0.8724\n",
      "Epoch 00007: early stopping\n",
      "2345796/2345796 [==============================] - 39s    \n",
      "2344960/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 3 of 5\n",
      "\n",
      "Train on 646864 samples, validate on 161716 samples\n",
      "Epoch 1/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.3590 - acc: 0.8422Epoch 00000: val_loss improved from inf to 0.34252, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 36s - loss: 0.3589 - acc: 0.8422 - val_loss: 0.3425 - val_acc: 0.8415\n",
      "Epoch 2/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.3109 - acc: 0.8605Epoch 00001: val_loss improved from 0.34252 to 0.29929, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "646864/646864 [==============================] - 35s - loss: 0.3108 - acc: 0.8605 - val_loss: 0.2993 - val_acc: 0.8657\n",
      "Epoch 3/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2918 - acc: 0.8698Epoch 00002: val_loss improved from 0.29929 to 0.28642, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2918 - acc: 0.8699 - val_loss: 0.2864 - val_acc: 0.8716\n",
      "Epoch 4/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2787 - acc: 0.8763Epoch 00003: val_loss improved from 0.28642 to 0.28345, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2787 - acc: 0.8763 - val_loss: 0.2835 - val_acc: 0.8724\n",
      "Epoch 5/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2691 - acc: 0.8813Epoch 00004: val_loss improved from 0.28345 to 0.28249, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2691 - acc: 0.8813 - val_loss: 0.2825 - val_acc: 0.8754\n",
      "Epoch 6/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2606 - acc: 0.8852Epoch 00005: val_loss improved from 0.28249 to 0.27647, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2606 - acc: 0.8852 - val_loss: 0.2765 - val_acc: 0.8769\n",
      "Epoch 7/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2531 - acc: 0.8889Epoch 00006: val_loss improved from 0.27647 to 0.27338, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2531 - acc: 0.8889 - val_loss: 0.2734 - val_acc: 0.8790\n",
      "Epoch 8/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2464 - acc: 0.8920Epoch 00007: val_loss improved from 0.27338 to 0.26881, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646864/646864 [==============================] - 35s - loss: 0.2464 - acc: 0.8919 - val_loss: 0.2688 - val_acc: 0.8802\n",
      "Epoch 9/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2410 - acc: 0.8947Epoch 00008: val_loss did not improve\n",
      "646864/646864 [==============================] - 34s - loss: 0.2410 - acc: 0.8946 - val_loss: 0.2780 - val_acc: 0.8736\n",
      "Epoch 10/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2352 - acc: 0.8973Epoch 00009: val_loss did not improve\n",
      "646864/646864 [==============================] - 34s - loss: 0.2352 - acc: 0.8973 - val_loss: 0.2724 - val_acc: 0.8810\n",
      "Epoch 11/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2305 - acc: 0.8998Epoch 00010: val_loss did not improve\n",
      "646864/646864 [==============================] - 34s - loss: 0.2306 - acc: 0.8998 - val_loss: 0.2738 - val_acc: 0.8765\n",
      "Epoch 12/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2258 - acc: 0.9018Epoch 00011: val_loss did not improve\n",
      "646864/646864 [==============================] - 34s - loss: 0.2258 - acc: 0.9018 - val_loss: 0.2813 - val_acc: 0.8786\n",
      "Epoch 00011: early stopping\n",
      "80858/80858 [==============================] - 1s     \n",
      "2344960/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 4 of 5\n",
      "\n",
      "Train on 646866 samples, validate on 161714 samples\n",
      "Epoch 1/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.3595 - acc: 0.8422Epoch 00000: val_loss improved from inf to 0.33843, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 36s - loss: 0.3594 - acc: 0.8423 - val_loss: 0.3384 - val_acc: 0.8495\n",
      "Epoch 2/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.3118 - acc: 0.8597Epoch 00001: val_loss improved from 0.33843 to 0.30925, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.3118 - acc: 0.8597 - val_loss: 0.3093 - val_acc: 0.8599\n",
      "Epoch 3/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2917 - acc: 0.8693Epoch 00002: val_loss improved from 0.30925 to 0.29044, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2917 - acc: 0.8693 - val_loss: 0.2904 - val_acc: 0.8682\n",
      "Epoch 4/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2790 - acc: 0.8762Epoch 00003: val_loss improved from 0.29044 to 0.28157, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2791 - acc: 0.8761 - val_loss: 0.2816 - val_acc: 0.8746\n",
      "Epoch 5/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2689 - acc: 0.8810Epoch 00004: val_loss improved from 0.28157 to 0.27513, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2690 - acc: 0.8810 - val_loss: 0.2751 - val_acc: 0.8777\n",
      "Epoch 6/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2606 - acc: 0.8848Epoch 00005: val_loss improved from 0.27513 to 0.27183, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2605 - acc: 0.8848 - val_loss: 0.2718 - val_acc: 0.8791\n",
      "Epoch 7/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2533 - acc: 0.8885Epoch 00006: val_loss improved from 0.27183 to 0.27021, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2532 - acc: 0.8885 - val_loss: 0.2702 - val_acc: 0.8800\n",
      "Epoch 8/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2466 - acc: 0.8915Epoch 00007: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2466 - acc: 0.8915 - val_loss: 0.2715 - val_acc: 0.8788\n",
      "Epoch 9/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2413 - acc: 0.8938Epoch 00008: val_loss improved from 0.27021 to 0.26384, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2413 - acc: 0.8939 - val_loss: 0.2638 - val_acc: 0.8830\n",
      "Epoch 10/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2355 - acc: 0.8972Epoch 00009: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2355 - acc: 0.8972 - val_loss: 0.2657 - val_acc: 0.8819\n",
      "Epoch 11/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2306 - acc: 0.8995Epoch 00010: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2306 - acc: 0.8995 - val_loss: 0.2658 - val_acc: 0.8829\n",
      "Epoch 12/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2260 - acc: 0.9017Epoch 00011: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2260 - acc: 0.9017 - val_loss: 0.2663 - val_acc: 0.8840\n",
      "Epoch 13/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2218 - acc: 0.9034Epoch 00012: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2218 - acc: 0.9034 - val_loss: 0.2716 - val_acc: 0.8832\n",
      "Epoch 00012: early stopping\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "80857/80857 [==============================] - 1s     \n",
      "2345796/2345796 [==============================] - 38s    \n",
      "2342912/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 5 of 5\n",
      "\n",
      "Train on 646866 samples, validate on 161714 samples\n",
      "Epoch 1/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.3588 - acc: 0.8420Epoch 00000: val_loss improved from inf to 0.38813, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 36s - loss: 0.3587 - acc: 0.8421 - val_loss: 0.3881 - val_acc: 0.7911\n",
      "Epoch 2/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.3109 - acc: 0.8605Epoch 00001: val_loss improved from 0.38813 to 0.30509, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.3109 - acc: 0.8605 - val_loss: 0.3051 - val_acc: 0.8650\n",
      "Epoch 3/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2915 - acc: 0.8695Epoch 00002: val_loss improved from 0.30509 to 0.28836, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2915 - acc: 0.8695 - val_loss: 0.2884 - val_acc: 0.8711\n",
      "Epoch 4/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2792 - acc: 0.8755Epoch 00003: val_loss improved from 0.28836 to 0.28503, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2792 - acc: 0.8755 - val_loss: 0.2850 - val_acc: 0.8725\n",
      "Epoch 5/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2694 - acc: 0.8807Epoch 00004: val_loss improved from 0.28503 to 0.27480, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2694 - acc: 0.8807 - val_loss: 0.2748 - val_acc: 0.8785\n",
      "Epoch 6/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2608 - acc: 0.8848Epoch 00005: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2608 - acc: 0.8848 - val_loss: 0.2778 - val_acc: 0.8751\n",
      "Epoch 7/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2530 - acc: 0.8882Epoch 00006: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2531 - acc: 0.8882 - val_loss: 0.2778 - val_acc: 0.8756\n",
      "Epoch 8/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2471 - acc: 0.8912Epoch 00007: val_loss improved from 0.27480 to 0.27173, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2471 - acc: 0.8912 - val_loss: 0.2717 - val_acc: 0.8783\n",
      "Epoch 9/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2413 - acc: 0.8941Epoch 00008: val_loss improved from 0.27173 to 0.26839, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 35s - loss: 0.2413 - acc: 0.8941 - val_loss: 0.2684 - val_acc: 0.8811\n",
      "Epoch 10/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2361 - acc: 0.8969Epoch 00009: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2362 - acc: 0.8968 - val_loss: 0.2718 - val_acc: 0.8774\n",
      "Epoch 11/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2312 - acc: 0.8995Epoch 00010: val_loss improved from 0.26839 to 0.26481, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_cnn_with_magic.h5\n",
      "646866/646866 [==============================] - 34s - loss: 0.2313 - acc: 0.8995 - val_loss: 0.2648 - val_acc: 0.8823\n",
      "Epoch 12/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2268 - acc: 0.9011Epoch 00011: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2268 - acc: 0.9011 - val_loss: 0.2696 - val_acc: 0.8792\n",
      "Epoch 13/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2226 - acc: 0.9028Epoch 00012: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2226 - acc: 0.9028 - val_loss: 0.2661 - val_acc: 0.8832\n",
      "Epoch 14/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2181 - acc: 0.9055Epoch 00013: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2182 - acc: 0.9055 - val_loss: 0.2735 - val_acc: 0.8777\n",
      "Epoch 15/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2154 - acc: 0.9067Epoch 00014: val_loss did not improve\n",
      "646866/646866 [==============================] - 34s - loss: 0.2154 - acc: 0.9067 - val_loss: 0.2687 - val_acc: 0.8837\n",
      "Epoch 00014: early stopping\n",
      "2345796/2345796 [==============================] - 38s    \n",
      "2342912/2345796 [============================>.] - ETA: 0sCPU times: user 24min 59s, sys: 3min 2s, total: 28min 2s\n",
      "Wall time: 42min 8s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "# Iterate through folds.\n",
    "for fold_num, (ix_train, ix_val) in enumerate(kfold.split(X_train_q1, y_train)):\n",
    "    \n",
    "    # Augment the training set by mirroring the pairs.\n",
    "    X_fold_train_q1 = np.vstack([X_train_q1[ix_train], X_train_q2[ix_train]])\n",
    "    X_fold_train_q2 = np.vstack([X_train_q2[ix_train], X_train_q1[ix_train]])\n",
    "    X_fold_train_magic = np.vstack([X_train_magic[ix_train], X_train_magic[ix_train]])\n",
    "\n",
    "    X_fold_val_q1 = np.vstack([X_train_q1[ix_val], X_train_q2[ix_val]])\n",
    "    X_fold_val_q2 = np.vstack([X_train_q2[ix_val], X_train_q1[ix_val]])\n",
    "    X_fold_val_magic = np.vstack([X_train_magic[ix_val], X_train_magic[ix_val]])\n",
    "\n",
    "    # Ground truth should also be \"mirrored\".\n",
    "    y_fold_train = np.concatenate([y_train[ix_train], y_train[ix_train]])\n",
    "    y_fold_val = np.concatenate([y_train[ix_val], y_train[ix_val]])\n",
    "    \n",
    "    print()\n",
    "    print(f'Fitting fold {fold_num + 1} of {kfold.n_splits}')\n",
    "    print()\n",
    "    \n",
    "    # Compile a new model.\n",
    "    model = create_model(model_params)\n",
    "\n",
    "    # Train.\n",
    "    model.fit(\n",
    "        [X_fold_train_q1, X_fold_train_q2, X_fold_train_magic], y_fold_train,\n",
    "        validation_data=([X_fold_val_q1, X_fold_val_q2, X_fold_val_magic], y_fold_val),\n",
    "\n",
    "        batch_size=BATCH_SIZE,\n",
    "        epochs=MAX_EPOCHS,\n",
    "        verbose=1,\n",
    "        \n",
    "        callbacks=[\n",
    "            # Stop training when the validation loss stops improving.\n",
    "            EarlyStopping(\n",
    "                monitor='val_loss',\n",
    "                min_delta=0.001,\n",
    "                patience=3,\n",
    "                verbose=1,\n",
    "                mode='auto',\n",
    "            ),\n",
    "            # Save the weights of the best epoch.\n",
    "            ModelCheckpoint(\n",
    "                model_checkpoint_path,\n",
    "                monitor='val_loss',\n",
    "                save_best_only=True,\n",
    "                verbose=2,\n",
    "            ),\n",
    "        ],\n",
    "    )\n",
    "        \n",
    "    # Restore the best epoch.\n",
    "    model.load_weights(model_checkpoint_path)\n",
    "    \n",
    "    # Compute out-of-fold predictions.\n",
    "    y_train_oofp[ix_val] = predict(model, X_train_q1[ix_val], X_train_q2[ix_val], X_train_magic[ix_val])\n",
    "    y_test_oofp[:, fold_num] = predict(model, X_test_q1, X_test_q2, X_test_magic)\n",
    "    \n",
    "    # Clear GPU memory.\n",
    "    K.clear_session()\n",
    "    del X_fold_train_q1\n",
    "    del X_fold_train_q2\n",
    "    del X_fold_val_q1\n",
    "    del X_fold_val_q2\n",
    "    del model\n",
    "    gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CV score: 0.264377776985\n"
     ]
    }
   ],
   "source": [
    "cv_score = log_loss(y_train, y_train_oofp)\n",
    "print('CV score:', cv_score)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Save features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "feature_names = [feature_list_id]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "features_train = y_train_oofp.reshape((-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "features_test = np.mean(y_test_oofp, axis=1).reshape((-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "project.save_features(features_train, features_test, feature_names, feature_list_id)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Explore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.axes._subplots.AxesSubplot at 0x7fe999025c50>"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqAAAAGoCAYAAACZh1c1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X90lPWZ///XTBIyM8SFhEBbWhpCaAtWTSCopKZAybG2\nNj8U06NGKcVFE0TZtkI1pRgsUnQl6UrbRfSARwOtLXElECmbokKhkJYYKRXE1s5Uu2VpM0SiTCaT\nmNzfP/xmPs4GwgDJeyaT5+Ocnp7c7ztzXdOLhFffN/c9NsuyLAEAAACG2CPdAAAAAIYWAigAAACM\nIoACAADAKAIoAAAAjCKAAgAAwCgCKAAAAIyKj3QDsai5+f0Br2Gz2TRq1HCdPOkTT9KKXswp+jGj\nwYE5RT9mNDiYnNPo0ZecdY0d0EHKbv/wD5GdCUY15hT9mNHgwJyiHzMaHKJlTvwxAQAAgFEEUAAA\nABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYxWfB\nAwAARMgdj7xspM7GB2af1/l/+tMxPfbYD+XxuPWpT31aS5aU67LLLu+3ftgBBQAAQFAgEND9939H\n119fqJ07d6u4+GY98MB31NbW1m81CKAAAAAIampqlM1m0403Fis+Pl75+UVKSUnRgQO/7bcaBFAA\nAAAEvfPOXzV+/ISQY5/+dJreeeev/VaDAAoAAIAgv98vh8MRciwx0aH29vZ+q0EABQAAQJDD4VAg\nEAg5Fgi0y+l09lsNAigAAACC0tLS9c47b4cce+edt5WePuEs33H+eAzTIFZwX22kW+h35/uYCAAA\n0L+ys69UZ2eHamqe0w03FGvnzhfV0tKiq67K6bca7IACAAAgaNiwYVqzZq127arXV786W88//ws9\n8khVv16CZwcUAAAgQqL1yt/EiZ/RE09sHLDXZwcUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQA\nAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFA\nAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAU\nARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGBURALo4cOHlZubG/z6xIkTuvvu\nu3X11Vfrmmuu0cqVK9XR0SFJsixLlZWVmj59uq688ko9/PDD6urqCn5vXV2d8vLylJWVpdLSUnm9\n3uDa0aNHVVxcrKysLBUVFenQoUPBtdbWVi1atEjZ2dmaNWuWtmzZElw7V00AAABcOKMB1LIs1dTU\n6I477lBnZ2fw+NKlS/Xxj39cv/nNb7R161b98Y9/1E9/+lNJ0ubNm7V7925t27ZNO3bsUFNTkzZu\n3ChJOnbsmCoqKlRVVaWGhgalpqaqvLxckhQIBFRWVqY5c+bo4MGDmjt3rhYuXCifzydJWr58uVwu\nl/bv36+1a9dqzZo1wYDaV00AAABcHKMB9IknntCzzz6rsrKy4LGOjg45nU4tXLhQiYmJGj16tAoK\nCvTaa69JkmprazVv3jyNGTNGo0ePVmlpqV544QVJ0vbt25WXl6fMzEw5HA4tWbJEe/fuldfrVUND\ng+x2u0pKSpSQkKDi4mKlpqZqz5498vl82rVrlxYvXqzExERdccUVys/P19atW89ZEwAAABcn3mSx\nm266SWVlZfr9738fPDZs2DA9+eSTIee98sormjRpkiTJ7XZr4sSJwbX09HR5PB5ZliW3260pU6YE\n15KTkzVixAh5PB55PB5lZGSEvG56errcbrfGjx+v+Ph4jRs3LmStvr7+nDVtNts536fNZpN9gKO9\n3X7uPgajuLjYel89c4rVecUCZjQ4MKfox4wGh2iZk9EAOmbMmD7XLcvSqlWr5Ha79dhjj0mS/H6/\nHA5H8Byn06nu7m51dHT0WutZ9/v9amtrk9PpDFlzOBxqb29XW1tbr+/rWTtXzcTExHO+z1GjhocV\nVNFbSkpSpFsYECNHDo90CzgHZjQ4MKfox4wGh0jPyWgA7Ut7e7u++93v6s0331R1dbVGjRol6cNg\nGAgEguf5/X7Fx8crMTExJDR+dN3lcsnpdPZaa29vD6599DU/unaumuE4edLHDugFamk5HekW+pXd\nbtPIkcN16pRP3d1WpNvBGTCjwYE5RT9mNDiYnFNfm0pREUBPnTqlBQsWyOVy6Re/+IVGjhwZXMvI\nyJDH41FmZqYkyePxaMKECSFrPVpaWtTa2qqMjAz5fD5t2rQppI7H41F+fr7S0tLU2dmp48ePa+zY\nscG1nsvufdUMh2VZ4qb5C9PVFZu/tLq7rZh9b7GCGQ0OzCn6MaPBIdJzivhzQC3L0r333qvU1FRt\n2LAhJHxKUmFhoTZs2KATJ07I6/Vq/fr1KioqkiTl5+ervr5ejY2NCgQCqqqq0owZM5ScnKycnBx1\ndHSourpanZ2dqqmpkdfrVW5urpKSkpSXl6fKykr5/X4dPnxYdXV1KigoOGdNAAAAXJyI74C+9tpr\n+v3vf6/ExERdddVVweOXXnqpNm/erJKSEnm9XhUXF6uzs1MFBQWaP3++JGny5MlauXKlli1bpubm\nZk2bNk2rV6+W9OHNTU899ZRWrFihqqoqpaWlad26dcHL7CtXrlRFRYVmzpwpl8ulpUuXBnc8+6oJ\nAACAi2OzLIt98n7W3Pz+gNeIi7Np3qqXBryOaRsfmB3pFvpVXJxNKSlJamk5zSWpKMWMBgfmFP2Y\n0eBgck6jR19y1rWIX4IHAADA0EIABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAA\nGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUA\nAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQ\nAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhF\nAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACA\nUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBREQmghw8fVm5ubvDr1tZWLVq0SNnZ2Zo1a5a2bNkS\nXLMsS5WVlZo+fbquvPJKPfzww+rq6gqu19XVKS8vT1lZWSotLZXX6w2uHT16VMXFxcrKylJRUZEO\nHTrULzUBAABw4YwGUMuyVFNTozvuuEOdnZ3B48uXL5fL5dL+/fu1du1arVmzJhgWN2/erN27d2vb\ntm3asWOHmpqatHHjRknSsWPHVFFRoaqqKjU0NCg1NVXl5eWSpEAgoLKyMs2ZM0cHDx7U3LlztXDh\nQvl8vouqCQAAgItjNIA+8cQTevbZZ1VWVhY85vP5tGvXLi1evFiJiYm64oorlJ+fr61bt0qSamtr\nNW/ePI0ZM0ajR49WaWmpXnjhBUnS9u3blZeXp8zMTDkcDi1ZskR79+6V1+tVQ0OD7Ha7SkpKlJCQ\noOLiYqWmpmrPnj0XVRMAAAAXJ95ksZtuukllZWX6/e9/Hzz29ttvKz4+XuPGjQseS09PV319vSTJ\n7XZr4sSJIWsej0eWZcntdmvKlCnBteTkZI0YMUIej0cej0cZGRkh9dPT0+V2uzV+/PgLrmmz2c75\nPm02m+wDHO3t9nP3MRjFxcXW++qZU6zOKxYwo8GBOUU/ZjQ4RMucjAbQMWPG9DrW1tYmh8MRcszh\ncKi9vV2S5Pf7Q9adTqe6u7vV0dHRa61n3e/3q62tTU6n84yvezE1ExMTz/k+R40aHlZQRW8pKUmR\nbmFAjBw5PNIt4ByY0eDAnKIfMxocIj0nowH0TJxOpwKBQMix9vZ2uVwuSR8Gw4+u+/1+xcfHKzEx\nMSQ0fnTd5XLJ6XT2Wut53YupGY6TJ33sgF6glpbTkW6hX9ntNo0cOVynTvnU3W1Fuh2cATMaHJhT\n9GNGg4PJOfW1qRTxAJqWlqbOzk4dP35cY8eOlSR5PJ7gJfCMjAx5PB5lZmYG1yZMmBCy1qOlpUWt\nra3KyMiQz+fTpk2bQmp5PB7l5+dfVM1wWJYlbpq/MF1dsflLq7vbitn3FiuY0eDAnKIfMxocIj2n\niD8HNCkpSXl5eaqsrJTf79fhw4dVV1engoICSVJhYaE2bNigEydOyOv1av369SoqKpIk5efnq76+\nXo2NjQoEAqqqqtKMGTOUnJysnJwcdXR0qLq6Wp2dnaqpqZHX61Vubu5F1QQAAMDFifgOqCStXLlS\nFRUVmjlzplwul5YuXRrcfSwpKZHX61VxcbE6OztVUFCg+fPnS5ImT56slStXatmyZWpubta0adO0\nevVqSdKwYcP01FNPacWKFaqqqlJaWprWrVsXvMx+oTUBAABwcWyWZbFP3s+am98f8BpxcTbNW/XS\ngNcxbeMDsyPdQr+Ki7MpJSVJLS2nuSQVpZjR4MCcoh8zGhxMzmn06EvOuhbxS/AAAAAYWgigAAAA\nMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoA\nAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowig\nAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCK\nAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAA\nowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKOiJoA2\nNTVpzpw5mjp1qq677jpt375dktTa2qpFixYpOztbs2bN0pYtW4LfY1mWKisrNX36dF155ZV6+OGH\n1dXVFVyvq6tTXl6esrKyVFpaKq/XG1w7evSoiouLlZWVpaKiIh06dCi41ldNAAAAXJyoCKBdXV1a\ntGiR7rrrLjU1NWnVqlV64IEH9D//8z9avny5XC6X9u/fr7Vr12rNmjXBsLh582bt3r1b27Zt044d\nO9TU1KSNGzdKko4dO6aKigpVVVWpoaFBqampKi8vlyQFAgGVlZVpzpw5OnjwoObOnauFCxfK5/NJ\nUp81AQAAcHGiIoC+9957amlpUVdXlyzLks1mU0JCguLi4rRr1y4tXrxYiYmJuuKKK5Sfn6+tW7dK\nkmprazVv3jyNGTNGo0ePVmlpqV544QVJ0vbt25WXl6fMzEw5HA4tWbJEe/fuldfrVUNDg+x2u0pK\nSpSQkKDi4mKlpqZqz5498vl8fdYEAADAxQk7gBYVFWnDhg06ceJEvzeRnJyskpISfec739HnP/95\n3XbbbVq+fLneffddxcfHa9y4ccFz09PT5Xa7JUlut1sTJ04MWfN4PLIsq9dacnKyRowYIY/HI4/H\no4yMjJAeel737bff7rMmAAAALk58uCd+/etf14svvqiqqipNnTpV+fn5+spXvqIRI0ZcdBPd3d1y\nOBx6/PHHNXv2bO3fv1/33Xef1q1bJ4fDEXKuw+FQe3u7JMnv94esO51OdXd3q6Ojo9daz7rf71db\nW5ucTucZX7etra3PmuGw2WyyD/Dest1uG9gCERIXF1vvq2dOsTqvWMCMBgfmFP2Y0eAQLXMKO4De\nfvvtuv322/X3v/9dO3bs0C9+8QutWrVKubm5KigoUF5enoYNG3ZBTdTX1+vw4cO6//77JUmzZs3S\nrFmz9OMf/1iBQCDk3Pb2drlcLkkfBsOPrvv9fsXHxysxMfGModHv98vlcsnpdPZa63ldp9PZZ81w\njBo1XDYbP4AXIiUlKdItDIiRI4dHugWcAzMaHJhT9GNGg0Ok5xR2AO3xyU9+UnfeeacKCwv13HPP\naePGjXr55ZeVlJSkG264Qffee+9574r+7//+rzo6OkIbi4/X5z//eb366qs6fvy4xo4dK0nyeDzB\nS+sZGRnyeDzKzMwMrk2YMCFkrUdLS4taW1uVkZEhn8+nTZs2hdTzeDzKz89XWlqaOjs7z1ozHCdP\n+tgBvUAtLacj3UK/stttGjlyuE6d8qm724p0OzgDZjQ4MKfox4wGB5Nz6mtT6bwCqNfr1c6dO7Vj\nxw4dOnRIkyZN0re+9S197WtfU3Nzs1atWqWysjL9/Oc/P68Gv/CFL6iyslLPP/988M70X//613rm\nmWf097//XZWVlXr44Yf15z//WXV1dXryySclSYWFhdqwYYOmT5+u+Ph4rV+/XkVFRZKk/Px83X77\n7brpppt0+eWXq6qqSjNmzFBycrJycnLU0dGh6upq3XLLLaqtrZXX61Vubq5cLpfy8vLOWjMclmXp\nI0+Dwnno6orNX1rd3VbMvrdYwYwGB+YU/ZjR4BDpOdksywqr+je+8Q29+uqr+vjHP678/HwVFhb2\nupFn586dWrZsmV599dXzbuTll1/W448/rr/97W8aO3as/u3f/k3XXnutTp06pYqKCh04cEAul0v3\n3HOPiouLJX34+Ka1a9fq+eefV2dnpwoKClReXq64uDhJ0o4dO/T444+rublZ06ZN0+rVqzVq1ChJ\nHz6macWKFXrzzTeVlpamFStWKCsrS5L6rBmO5ub3z/v9n6+4OJvmrXppwOuYtvGB2ZFuoV/FxdmU\nkpKklpbT/EKOUsxocGBO0Y8ZDQ4m5zR69CVnXQs7gD700EMqKCjQ1KlTz3pOS0uLfD5fyB3kQxEB\n9MIRQGEaMxocmFP0Y0aDQ7QE0LD/pWJFRYWam5v1yiuvBI8tX75cu3btCn6dkpIy5MMnAAAA+hZ2\nAH366adVXl6uU6dOBY/9y7/8i+6//34999xzA9IcAAAAYk/YAbS6ulqVlZW68cYbg8eWLl2qRx99\nVBs2bBiQ5gAAABB7wg6g7777rtLS0nodnzhxov75z3/2a1MAAACIXWEH0MzMTG3YsEFdH3m+kGVZ\nevbZZ3XppZcOSHMAAACIPWE/B/SBBx7QN7/5Te3bt0+TJ0+WJL355pvq6Og4r2dkAgAAYGgLO4BO\nmjRJv/rVr7Rjxw795S9/UUJCgmbOnKmCggIlJcXmxycCAACg/53XJyElJyfrtttuG6heAAAAMASE\nHUDfeecdrVmzRq+//ro6Ozv1f59fv2/fvn5vDgAAALEn7ABaXl6ulpYWzZ8/n0vuAAAAuGBhB9A/\n/vGPqqmp0Wc/+9mB7AcAAAAxLuzHMI0dO1anT58eyF4AAAAwBIS9A3rffffpoYce0j333KO0tDQl\nJCSErKenp/d7cwAAAIg9YQfQe++9N+S/Jclms8myLNlsNr3xxhv93x0AAABiTtgB9KWXXhrIPgAA\nADBEhB1AP/nJT0qS/vGPf8jj8SgrK0unT59WamrqgDUHAACA2BP2TUhtbW361re+pZkzZ+qOO+5Q\nc3OzHnzwQZWUlKilpWUgewQAAEAMCTuAPvbYY/rHP/6hX/3qV0pMTJT04Y1JgUBAP/zhDwesQQAA\nAMSWsAPoSy+9pPLy8pC73TMyMvTQQw9p7969A9IcAAAAYk/YAfT06dNn/AQku92uDz74oF+bAgAA\nQOwKO4Dm5ubqiSeeUFdXV/DYu+++q8cee0zXXHPNgDQHAACA2BN2AP3+97+vv/71r8rJyVF7e7sW\nLFigL33pS2ptbdWyZcsGskcAAADEkLAfwzRmzBj98pe/1IEDB+R2u/XBBx8oIyND11xzjWw220D2\nCAAAgBgSdgDtkZOTo5ycnIHoBQAAAENA2AF00qRJfe508lGcAAAACEfYAfSpp54K+bqrq0vvvPOO\nqqur9e1vf7vfGwMAAEBsCjuAfvGLXzzj8YkTJ6qyslLXX399vzUFAACA2BX2XfBn84lPfEJ//vOf\n+6MXAAAADAFh74Du27ev17HTp09r8+bNmjRpUr82BQAAgNgVdgBdsGBBr2MJCQm6/PLL9YMf/KBf\nmwIAAEDsCjuAHjt2bCD7AAAAwBARdgD1eDxhv2h6evoFNQMAAIDYF3YA/epXvxp8DqhlWZLU67mg\nlmXJZrPxTFAAAACcVdgB9Mc//rGqqqq0dOlSZWdnKyEhQUeOHNHKlSs1Z84cXXvttQPZJwAAAGJE\n2AF09erV+vd//3dNmzYteOzKK6/Uww8/rHvuuUff/OY3B6I/AAAAxJiwnwP63nvvadiwYb2Od3R0\nyO/392tTAAAAiF1hB9Brr71W3/ve97R//369++67amlp0e7du7Vs2TLdcMMNA9kjAAAAYkjYl+CX\nL1+uZcuW6c4771R3d7ekD58DOnfuXH3rW98asAYBAAAQW8IOoC6XSz/60Y/03nvv6a9//aucTqc+\n/elPKzExcSD7AwAAQIw5r8+CP3nypH72s5/pZz/7mVJSUvTSSy/pT3/600D1BgAAgBgUdgA9evSo\nrrvuOu3evVt1dXVqa2vTb3/7W33961/XgQMHBrJHAAAAxJCwA+jq1as1b948Pffcc0pISJAkrVq1\nSnPnztWaNWsGrEEAAADElrAD6JEjR1RYWNjr+M0336y//OUv/doUAAAAYlfYAXTEiBE6fvx4r+NH\njhxRSkpKvzYFAACA2BV2AL311lv14IMP6r//+78lSW+++aY2b96sFStW6Oabbx6wBgEAABBbwn4M\n01133aXhw4frkUcekd/v1z333KPU1FSVlZVp3rx5A9kjAAAAYkjYO6A7d+5UQUGBXnnlFTU1Neng\nwYPat2+fvvnNb8pms110IydOnFBpaammTp2qGTNm6Nlnn5Uktba2atGiRcrOztasWbO0ZcuW4PdY\nlqXKykpNnz49+Ln0XV1dwfW6ujrl5eUpKytLpaWl8nq9wbWjR4+quLhYWVlZKioq0qFDh4JrfdUE\nAADAxQk7gD744INqbm6W9OFD6S+55JJ+a8KyLN19992aMGGCfve732nDhg36yU9+oqamJi1fvlwu\nl0v79+/X2rVrtWbNmmBY3Lx5s3bv3q1t27Zpx44dampq0saNGyVJx44dU0VFhaqqqtTQ0KDU1FSV\nl5dLkgKBgMrKyjRnzhwdPHhQc+fO1cKFC+Xz+SSpz5oAAAC4OGEH0Msuu0y/+c1vBqSJP/zhD/rn\nP/+pJUuWKCEhQZ/5zGf03HPP6WMf+5h27dqlxYsXKzExUVdccYXy8/O1detWSVJtba3mzZunMWPG\naPTo0SotLdULL7wgSdq+fbvy8vKUmZkph8OhJUuWaO/evfJ6vWpoaJDdbldJSYkSEhJUXFys1NRU\n7dmzRz6fr8+aAAAAuDhh/xvQYcOG6dFHH9VPf/pTfepTn5LD4QhZf+655y64iSNHjugzn/mMHnvs\nMW3fvl1JSUkqKyvT5z73OcXHx2vcuHHBc9PT01VfXy9JcrvdmjhxYsiax+ORZVlyu92aMmVKcC05\nOVkjRoyQx+ORx+NRRkZGSA/p6elyu90aP358nzXDYbPZZD+vz5g6f3b7xf+zh2gUFxdb76tnTrE6\nr1jAjAYH5hT9mNHgEC1zCjuAXnbZZbrssssGpInW1lb97ne/0/Tp0/XKK6/o9ddf14IFC/Tkk0/2\nCroOh0Pt7e2SJL/fH7LudDrV3d2tjo6OXms9636/X21tbXI6nWd83ba2tj5rhmPUqOH98u9ih6KU\nlKRItzAgRo4cHukWcA7MaHBgTtGPGQ0OkZ5TnwH0qquu0s6dO5WSkqJ77rlH0of/tnLChAkaNmxY\nvzUxbNgwjRgxQqWlpZKkqVOn6rrrrtPatWsVCARCzm1vb5fL5ZL0YTD86Lrf71d8fLwSExPPGBr9\nfr9cLpecTmevtZ7XdTqdfdYMx8mTPnZAL1BLy+lIt9Cv7HabRo4crlOnfOrutiLdDs6AGQ0OzCn6\nMaPBweSc+tpU6jOAvvfee7Ks0OZKSkpUW1sbcon6YqWnp6urq0tdXV2Ki4uTJHV1denSSy9VY2Oj\njh8/rrFjx0qSPB5P8LJ7RkaGPB6PMjMzg2sTJkwIWevR0tKi1tZWZWRkyOfzadOmTSE9eDwe5efn\nKy0tTZ2dnWetGQ7LsvSRm/FxHrq6YvOXVne3FbPvLVYwo8GBOUU/ZjQ4RHpO571P938DaX+45ppr\n5HA49JOf/EQffPCBmpqa9Otf/1pf+cpXlJeXp8rKSvn9fh0+fFh1dXUqKCiQJBUWFmrDhg06ceKE\nvF6v1q9fr6KiIklSfn6+6uvr1djYqEAgoKqqKs2YMUPJycnKyclRR0eHqqur1dnZqZqaGnm9XuXm\n5iopKanPmgAAALg4Yf8b0IHkcDhUXV2tH/zgB/rCF76gpKQkff/731dWVpZWrlypiooKzZw5Uy6X\nS0uXLg3ueJaUlMjr9aq4uFidnZ0qKCjQ/PnzJUmTJ0/WypUrtWzZMjU3N2vatGlavXq1pA8v+T/1\n1FNasWKFqqqqlJaWpnXr1gUvs/dVEwAAABfHZvWxpTlp0iT99re/1ahRo4LHpkyZom3btvXrJfhY\n09z8/oDXiIuzad6qlwa8jmkbH5gd6Rb6VVycTSkpSWppOc0lqSjFjAYH5hT9mNHgYHJOo0ef/Znx\n59wBra2t1fDh/+9Oqe7ubtXV1SklJSXkPD4PHgAAAOHoM4COHTu21806o0aN6vXRlDabjQAKAACA\nsPQZQF9++WVTfQAAAGCIGOCnVQIAAAChCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCK\nAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAA\nowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAA\nADCKAAqTmPpIAAATfUlEQVQAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoA\nAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowig\nAAAAMIoACgAAAKMIoAAAADCKAAoAAACjoi6Aer1e5eTk6JVXXpEktba2atGiRcrOztasWbO0ZcuW\n4LmWZamyslLTp0/XlVdeqYcfflhdXV3B9bq6OuXl5SkrK0ulpaXyer3BtaNHj6q4uFhZWVkqKirS\noUOHgmt91QQAAMDFiboAumzZMp06dSr49fLly+VyubR//36tXbtWa9asCYbFzZs3a/fu3dq2bZt2\n7NihpqYmbdy4UZJ07NgxVVRUqKqqSg0NDUpNTVV5ebkkKRAIqKysTHPmzNHBgwc1d+5cLVy4UD6f\n75w1AQAAcHGiKoD+/Oc/l9Pp1Cc+8QlJks/n065du7R48WIlJibqiiuuUH5+vrZu3SpJqq2t1bx5\n8zRmzBiNHj1apaWleuGFFyRJ27dvV15enjIzM+VwOLRkyRLt3btXXq9XDQ0NstvtKikpUUJCgoqL\ni5Wamqo9e/acsyYAAAAuTnykG+jh8Xj09NNP65e//KXmzJkjSXr77bcVHx+vcePGBc9LT09XfX29\nJMntdmvixIkhax6PR5Zlye12a8qUKcG15ORkjRgxQh6PRx6PRxkZGSH109PT5Xa7NX78+D5rhsNm\ns8k+wNHebrcNbIEIiYuLrffVM6dYnVcsYEaDA3OKfsxocIiWOUVFAP3ggw/03e9+V8uWLdPIkSOD\nx9va2uRwOELOdTgcam9vlyT5/f6QdafTqe7ubnV0dPRa61n3+/1qa2uT0+k84+ueq2Y4Ro0aLpuN\nH8ALkZKSFOkWBsTIkcMj3QLOgRkNDswp+jGjwSHSc4qKAPqf//mfmjx5smbOnBly3Ol0KhAIhBxr\nb2+Xy+WS9GEw/Oi63+9XfHy8EhMTzxga/X6/XC6XnE5nr7We1z1XzXCcPOljB/QCtbScjnQL/cpu\nt2nkyOE6dcqn7m4r0u3gDJjR4MCcoh8zGhxMzqmvTaWoCKA7duxQc3OzduzYIUk6ffq0vvOd72jB\nggXq7OzU8ePHNXbsWEkfXqrvueyekZEhj8ejzMzM4NqECRNC1nq0tLSotbVVGRkZ8vl82rRpU0gP\nHo9H+fn5SktL67NmOCzL0kduxsd56OqKzV9a3d1WzL63WMGMBgfmFP2Y0eAQ6TlFxU1IO3fu1Kuv\nvqrGxkY1NjZq7Nixqqqq0qJFi5SXl6fKykr5/X4dPnxYdXV1KigokCQVFhZqw4YNOnHihLxer9av\nX6+ioiJJUn5+vurr69XY2KhAIKCqqirNmDFDycnJysnJUUdHh6qrq9XZ2amamhp5vV7l5uYqKSmp\nz5oAAAC4OFERQPuycuVKffDBB5o5c6YWL16spUuXBnc8S0pKNHv2bBUXF+trX/uapk6dqvnz50uS\nJk+erJUrV2rZsmXKycnRP//5T61evVqSNGzYMD311FN68cUXddVVV2nTpk1at25d8DJ7XzUBAABw\ncWyWZbFP3s+am98f8BpxcTbNW/XSgNcxbeMDsyPdQr+Ki7MpJSVJLS2nuSQVpZjR4MCcoh8zGhxM\nzmn06EvOuhb1O6AAAACILQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRUfEgeqDHHY+8HOkW+t32\nyqJItwAAQFRhBxQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUAB\nAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQB\nFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABG\nEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAA\nYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAAYFR8pBvo0djYqEcffVRut1vJ\nyclasGCBbrnlFrW2tup73/ueGhoadMkll2jRokX6+te/LkmyLEtVVVXasmWLurq6VFRUpPLycsXF\nxUmS6urq9KMf/UgnT57U1VdfrVWrVik1NVWSdPToUT344IN66623lJaWpoceekhZWVmS1GdN4HwV\n3Fcb6Rb63cYHZke6BQDAIBYVO6Ctra26++679Y1vfEMHDx7U448/rqqqKu3fv1/Lly+Xy+XS/v37\ntXbtWq1Zs0aHDh2SJG3evFm7d+/Wtm3btGPHDjU1NWnjxo2SpGPHjqmiokJVVVVqaGhQamqqysvL\nJUmBQEBlZWWaM2eODh48qLlz52rhwoXy+XyS1GdNAAAAXJyoCKDHjx/XzJkzVVBQILvdrs9//vO6\n+uqr1dTUpF27dmnx4sVKTEzUFVdcofz8fG3dulWSVFtbq3nz5mnMmDEaPXq0SktL9cILL0iStm/f\nrry8PGVmZsrhcGjJkiXau3evvF6vGhoaZLfbVVJSooSEBBUXFys1NVV79uyRz+frsyYAAAAuTlRc\ngp88ebIee+yx4Netra1qbGzU5z73OcXHx2vcuHHBtfT0dNXX10uS3G63Jk6cGLLm8XhkWZbcbrem\nTJkSXEtOTtaIESPk8Xjk8XiUkZER0kN6errcbrfGjx/fZ81w2Gw22Qc42tvttoEtAPQhLi52/vz1\n/CzxMxXdmFP0Y0aDQ7TMKSoC6Ee9//77KisrC+6CPvvssyHrDodD7e3tkiS/3y+HwxFcczqd6u7u\nVkdHR6+1nnW/36+2tjY5nc4zvm5bW1uv7/tozXCMGjVcNhs/gIhdKSlJkW6h340cOTzSLSAMzCn6\nMaPBIdJziqoA+re//U1lZWUaN26c/uM//kN/+ctfFAgEQs5pb2+Xy+WS9GEw/Oi63+9XfHy8EhMT\nzxga/X6/XC6XnE5nr7We13U6nX3WDMfJkz52QBHTWlpOR7qFfmO32zRy5HCdOuVTd7cV6XZwFswp\n+jGjwcHknPrarIiaAHrkyBEtWLBAhYWFuv/++2W325WWlqbOzk4dP35cY8eOlSR5PJ7gZfeMjAx5\nPB5lZmYG1yZMmBCy1qOlpUWtra3KyMiQz+fTpk2bQup7PB7l5+efs2Y4LMtSV9eF/28BRLuurtj7\ny6W724rJ9xVrmFP0Y0aDQ6TnFBU3IXm9Xi1YsEDz589XeXm57P//9mFSUpLy8vJUWVkpv9+vw4cP\nq66uTgUFBZKkwsJCbdiwQSdOnJDX69X69etVVFQkScrPz1d9fb0aGxsVCARUVVWlGTNmKDk5WTk5\nOero6FB1dbU6OztVU1Mjr9er3Nzcc9YEAADAxYmKHdCamhq1tLRo3bp1WrduXfD4N77xDa1cuVIV\nFRWaOXOmXC6Xli5dGtzxLCkpkdfrVXFxsTo7O1VQUKD58+dL+vDGppUrV2rZsmVqbm7WtGnTtHr1\naknSsGHD9NRTT2nFihWqqqpSWlqa1q1bF7zM3ldNAAAAXBybZVnsk/ez5ub3B7xGXJxN81a9NOB1\ngDOJpQfRx8XZlJKSpJaW01w2jGLMKfoxo8HB5JxGj77krGtRcQkeAAAAQwcBFAAAAEYRQAEAAGAU\nARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGRcVnwQMYXO545OVI\nt9CvtlcWRboFABhS2AEFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACA\nUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRfBY8gCGv4L7aSLfQ7zY+MDvSLQDAWbEDCgAAAKMI\noAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKN4DigAxKA7Hnk50i30u+2VRZFu\nAUA/YQcUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFI9hAgAMCgX31Ua6hX63\n8YHZkW4B58AjzQYGO6AAAAAwigAKAAAAowigAAAAMIoACgAAAKO4CQkAgAiJtRtcouHmFgwO7IAC\nAADAKAIoAAAAjCKAnsXRo0dVXFysrKwsFRUV6dChQ5FuCQAAICYQQM8gEAiorKxMc+bM0cGDBzV3\n7lwtXLhQPp8v0q0BAAAMetyEdAYNDQ2y2+0qKSmRJBUXF+uZZ57Rnj17dP3110e4OwAAolMsfloV\nBgY7oGfg8XiUkZERciw9PV1utztCHQEAAMQOdkDPoK2tTU6nM+SYw+FQe3t7WN9vs9lkH+Bob7fb\nBrYAAACIWZHOEQTQM3A6nb3CZnt7u1wuV1jfn5qaNBBt9cLz1gAAwIUYOXJ4ROtzCf4MJkyYII/H\nE3LM4/Fo4sSJEeoIAAAgdhBAzyAnJ0cdHR2qrq5WZ2enampq5PV6lZubG+nWAAAABj2bZVlWpJuI\nRseOHdOKFSv05ptvKi0tTStWrFBWVlak2wIAABj0CKAAAAAwikvwAAAAMIoACgAAAKMIoAAAADCK\nAAoAAACjCKBR7ujRoyouLlZWVpaKiop06NChM55XV1envLw8ZWVlqbS0VF6v13CnQ1e4M/rlL3+p\nL3/5y5o6dapuuukmNTY2Gu50aAt3Tj0OHDigSZMmyefzGeoQ4c6osbFRN954o6ZMmaKCggIdOHDA\ncKdDW7hz2rJli/Ly8pSdna1bbrlFr7/+uuFOcfjw4T4fIRnR7GAharW3t1tf/OIXrc2bN1sdHR3W\nli1brOnTp1unT58OOe+NN96wpk6dah06dMjy+/3W9773PWvBggUR6npoCXdGBw4csK6++mrr6NGj\nVldXl/Vf//VfVnZ2ttXS0hKhzoeWcOfU49SpU9asWbOsz372s2c9B/0r3BmdOHHCmjZtmrVz506r\nu7vb2r59u5WdnW35/f4IdT60nM/fS1dddZXldrutrq4ua/369dbs2bMj1PXQ093dbW3ZssXKzs62\nrrrqqjOeE+nswA5oFGtoaJDdbldJSYkSEhJUXFys1NRU7dmzJ+S87du3Ky8vT5mZmXI4HFqyZIn2\n7t3LLqgB4c7oxIkT+td//VdNnjxZdrtdN954o+Li4vTWW29FqPOhJdw59VixYoWuv/56w10ObeHO\nqLa2Vl/4whd03XXXyWazKT8/X88884zsdv46MyHcOb399tvq7u5WV1eXLMuS3W6Xw+GIUNdDzxNP\nPKFnn31WZWVlZz0n0tmBn9go5vF4lJGREXIsPT1dbrc75Jjb7Q75mNDk5GSNGDGi18eJov+FO6Mb\nbrhBd955Z/DrV199VT6fr9f3YmCEOydJ2rZtm9577z3deuutptqDwp/RkSNH9LGPfUyLFi3S1Vdf\nrZtvvlldXV0aNmyYyXaHrHDnlJubq/Hjx+trX/uaLr/8cq1fv15r1qwx2eqQdtNNN6m2tlaXX375\nWc+JdHYggEaxtrY2OZ3OkGMOh0Pt7e0hx/x+f6//Z+l0OuX3+we8x6Eu3Bl91FtvvaXFixdr8eLF\nSklJGegWofDndPz4cT3++OP64Q9/aLI9KPwZtba2asuWLbr11lu1b98+FRYW6q677lJra6vJdoes\ncOcUCAQ0ceJE1dTU6LXXXtO8efN0zz339Pm7Ef1nzJgxstlsfZ4T6exAAI1iTqez1w9re3u7XC5X\nyLGzhdL/ex76X7gz6rFv3z7deuutuu2223TXXXeZaBEKb07d3d26//779e1vf1sf+9jHTLc45IX7\nszRs2DDNmDFDubm5SkhI0G233SaXy6WmpiaT7Q5Z4c7pJz/5iT7+8Y/r8ssvV2JiohYtWqTOzk7t\n37/fZLvoQ6SzAwE0ik2YMKHXVrjH4wnZMpekjIyMkPNaWlrU2trK5V0Dwp2RJD3//PNavHixKioq\ndPfdd5tqEQpvTidOnNAf/vAHrVixQtOmTVNhYaEkaebMmTyxwIBwf5bS09PV0dERcqy7u1sWnypt\nRLhzOn78eMicbDab4uLiFBcXZ6RPnFukswMBNIrl5OSoo6ND1dXV6uzsVE1Njbxeb69HKuTn56u+\nvl6NjY0KBAKqqqrSjBkzlJycHKHOh45wZ3TgwAE99NBDevLJJ5Wfnx+hboeucOY0duxYHT58WI2N\njWpsbNS2bdskSXv27NG0adMi1fqQEe7PUlFRkfbt26fdu3eru7tb1dXVCgQCuvrqqyPU+dAS7pxm\nzZqlmpoaHTlyRB988IGefvppdXV1KTs7O0Kd4/+KeHYwdr89Lsgbb7xh3XzzzVZWVpZVVFRkvfba\na5ZlWdby5cut5cuXB8978cUXrS9/+cvWlClTrDvvvNPyer2RannICWdG8+fPtyZNmmRlZWWF/GfP\nnj2RbH1ICfdnqcff/vY3HsNkWLgz2rt3r1VUVGRlZWVZN954o3Xo0KFItTwkhTOn7u5ua/369daX\nvvQlKzs727r99tutN998M5JtD0kNDQ0hj2GKpuxgsyyuWwAAAMAcLsEDAADAKAIoAAAAjCKAAgAA\nwCgCKAAAAIwigAIAAMAoAigAAACMIoACAADAKAIoAAAAjCKAAgAAwKj/D5C88KhfQP6gAAAAAElF\nTkSuQmCC\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x7fe9990253c8>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "pd.DataFrame(features_test).plot.hist()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
