{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Feature: Out-Of-Fold Predictions from a Multi-Layer Perceptron (+Magic Inputs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In addition to the MLP architecture, we'll append some of the leaky features to the intermediate feature layer."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<img src=\"assets/mlp-with-magic.png\" alt=\"Network Architecture\" style=\"height: 900px;\" />"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This utility package imports `numpy`, `pandas`, `matplotlib` and a helper `kg` module into the root namespace."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from pygoose import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import gc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from keras import backend as K\n",
    "from keras.models import Model, Sequential\n",
    "from keras.layers import *\n",
    "from keras.optimizers import *\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Config"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Automatically discover the paths to various data folders and compose the project structure."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "project = kg.Project.discover()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Identifier for storing these features on disk and referring to them later."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "feature_list_id = 'oofp_nn_mlp_with_magic'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Make subsequent NN runs reproducible."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "RANDOM_SEED = 42"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "np.random.seed(RANDOM_SEED)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Read data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Word embedding lookup matrix."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "embedding_matrix = kg.io.load(project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Padded sequences of word indices for every question."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')\n",
    "X_train_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_test_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')\n",
    "X_test_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_train = kg.io.load(project.features_dir + 'y_train.pickle')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Magic features."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "magic_feature_lists = [\n",
    "    'magic_frequencies',\n",
    "    'magic_cooccurrence_matrix',\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_magic, X_test_magic, _ = project.load_feature_lists(magic_feature_lists)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "X_train_magic = X_train_magic.values\n",
    "X_test_magic = X_test_magic.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "scaler = StandardScaler()\n",
    "scaler.fit(np.vstack([X_train_magic, X_test_magic]))\n",
    "X_train_magic = scaler.transform(X_train_magic)\n",
    "X_test_magic = scaler.transform(X_test_magic)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Word embedding properties."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "EMBEDDING_DIM = embedding_matrix.shape[-1]\n",
    "VOCAB_LENGTH = embedding_matrix.shape[0]\n",
    "MAX_SEQUENCE_LENGTH = X_train_q1.shape[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "300 101564 30\n"
     ]
    }
   ],
   "source": [
    "print(EMBEDDING_DIM, VOCAB_LENGTH, MAX_SEQUENCE_LENGTH)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_model_question_branch():\n",
    "    input_q = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n",
    "    \n",
    "    embedding_q = Embedding(\n",
    "        VOCAB_LENGTH,\n",
    "        EMBEDDING_DIM,\n",
    "        weights=[embedding_matrix],\n",
    "        input_length=MAX_SEQUENCE_LENGTH,\n",
    "        trainable=False,\n",
    "    )(input_q)\n",
    "\n",
    "    timedist_q = TimeDistributed(Dense(\n",
    "        EMBEDDING_DIM,\n",
    "        activation='relu',\n",
    "    ))(embedding_q)\n",
    "\n",
    "    lambda_q = Lambda(\n",
    "        lambda x: K.max(x, axis=1),\n",
    "        output_shape=(EMBEDDING_DIM, )\n",
    "    )(timedist_q)\n",
    "    \n",
    "    output_q = lambda_q\n",
    "    return input_q, output_q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_model(params):\n",
    "    q1_input, q1_output = create_model_question_branch()\n",
    "    q2_input, q2_output = create_model_question_branch()\n",
    "    magic_input = Input(shape=(X_train_magic.shape[-1], ))\n",
    "    \n",
    "    merged_inputs = concatenate([q1_output, q2_output, magic_input])\n",
    "\n",
    "    dense_1 = Dense(params['num_dense_1'])(merged_inputs)\n",
    "    bn_1 = BatchNormalization()(dense_1)\n",
    "    relu_1 = Activation('relu')(bn_1)\n",
    "\n",
    "    dense_2 = Dense(params['num_dense_2'])(relu_1)\n",
    "    bn_2 = BatchNormalization()(dense_2)\n",
    "    relu_2 = Activation('relu')(bn_2)\n",
    "\n",
    "    dense_3 = Dense(params['num_dense_3'])(relu_2)\n",
    "    bn_3 = BatchNormalization()(dense_3)\n",
    "    relu_3 = Activation('relu')(bn_3)\n",
    "\n",
    "    dense_4 = Dense(params['num_dense_4'])(relu_3)\n",
    "    bn_4 = BatchNormalization()(dense_4)\n",
    "    relu_4 = Activation('relu')(bn_4)\n",
    "\n",
    "    bn_final = BatchNormalization()(relu_4)\n",
    "    output = Dense(1, activation='sigmoid')(bn_final)\n",
    "    \n",
    "    model = Model(\n",
    "        inputs=[q1_input, q2_input, magic_input],\n",
    "        outputs=output,\n",
    "    )\n",
    "\n",
    "    model.compile(\n",
    "        loss='binary_crossentropy', \n",
    "        optimizer=Adam(lr=0.01),\n",
    "        metrics=['accuracy']\n",
    "    )\n",
    "\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def predict(model, X_q1, X_q2, X_magic):\n",
    "    \"\"\"\n",
    "    Mirror the pairs, compute two separate predictions, and average them.\n",
    "    \"\"\"\n",
    "    \n",
    "    y1 = model.predict([X_q1, X_q2, X_magic], batch_size=1024, verbose=1).reshape(-1)   \n",
    "    y2 = model.predict([X_q2, X_q1, X_magic], batch_size=1024, verbose=1).reshape(-1)    \n",
    "    return (y1 + y2) / 2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Partition the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "NUM_FOLDS = 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "kfold = StratifiedKFold(\n",
    "    n_splits=NUM_FOLDS,\n",
    "    shuffle=True,\n",
    "    random_state=RANDOM_SEED\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create placeholders for out-of-fold predictions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_train_oofp = np.zeros_like(y_train, dtype='float64')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "y_test_oofp = np.zeros((len(X_test_q1), NUM_FOLDS))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define hyperparameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "BATCH_SIZE = 2048"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "MAX_EPOCHS = 200"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model_params = {\n",
    "    'num_dense_1': 400,\n",
    "    'num_dense_2': 200,\n",
    "    'num_dense_3': 400,\n",
    "    'num_dense_4': 100,\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The path where the best weights of the current model will be saved."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model_checkpoint_path = project.temp_dir + 'fold-checkpoint-' + feature_list_id + '.h5'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Fit the folds and compute out-of-fold predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Fitting fold 1 of 5\n",
      "\n",
      "Train on 646862 samples, validate on 161718 samples\n",
      "Epoch 1/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.3390 - acc: 0.8494Epoch 00000: val_loss improved from inf to 0.40987, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 17s - loss: 0.3388 - acc: 0.8495 - val_loss: 0.4099 - val_acc: 0.8459\n",
      "Epoch 2/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.2794 - acc: 0.8741Epoch 00001: val_loss improved from 0.40987 to 0.34372, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 15s - loss: 0.2794 - acc: 0.8742 - val_loss: 0.3437 - val_acc: 0.8561\n",
      "Epoch 3/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2526 - acc: 0.8874Epoch 00002: val_loss improved from 0.34372 to 0.30076, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 15s - loss: 0.2526 - acc: 0.8875 - val_loss: 0.3008 - val_acc: 0.8641\n",
      "Epoch 4/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.2304 - acc: 0.8984Epoch 00003: val_loss improved from 0.30076 to 0.29949, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 16s - loss: 0.2305 - acc: 0.8984 - val_loss: 0.2995 - val_acc: 0.8694\n",
      "Epoch 5/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.2115 - acc: 0.9075Epoch 00004: val_loss did not improve\n",
      "646862/646862 [==============================] - 15s - loss: 0.2115 - acc: 0.9075 - val_loss: 0.3067 - val_acc: 0.8655\n",
      "Epoch 6/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.1938 - acc: 0.9160Epoch 00005: val_loss did not improve\n",
      "646862/646862 [==============================] - 14s - loss: 0.1938 - acc: 0.9159 - val_loss: 0.3285 - val_acc: 0.8687\n",
      "Epoch 7/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.1772 - acc: 0.9241Epoch 00006: val_loss did not improve\n",
      "646862/646862 [==============================] - 15s - loss: 0.1773 - acc: 0.9241 - val_loss: 0.3963 - val_acc: 0.8235\n",
      "Epoch 8/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.1625 - acc: 0.9306Epoch 00007: val_loss did not improve\n",
      "646862/646862 [==============================] - 15s - loss: 0.1625 - acc: 0.9306 - val_loss: 0.3367 - val_acc: 0.8573\n",
      "Epoch 00007: early stopping\n",
      "2341888/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 2 of 5\n",
      "\n",
      "Train on 646862 samples, validate on 161718 samples\n",
      "Epoch 1/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.3408 - acc: 0.8499Epoch 00000: val_loss improved from inf to 0.47372, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 17s - loss: 0.3407 - acc: 0.8499 - val_loss: 0.4737 - val_acc: 0.8016\n",
      "Epoch 2/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2794 - acc: 0.8746Epoch 00001: val_loss improved from 0.47372 to 0.41548, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 16s - loss: 0.2794 - acc: 0.8746 - val_loss: 0.4155 - val_acc: 0.8243\n",
      "Epoch 3/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.2532 - acc: 0.8874Epoch 00002: val_loss improved from 0.41548 to 0.29717, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646862/646862 [==============================] - 16s - loss: 0.2532 - acc: 0.8874 - val_loss: 0.2972 - val_acc: 0.8668\n",
      "Epoch 4/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.2313 - acc: 0.8980Epoch 00003: val_loss did not improve\n",
      "646862/646862 [==============================] - 14s - loss: 0.2313 - acc: 0.8980 - val_loss: 0.3021 - val_acc: 0.8680\n",
      "Epoch 5/200\n",
      "643072/646862 [============================>.] - ETA: 0s - loss: 0.2129 - acc: 0.9071Epoch 00004: val_loss did not improve\n",
      "646862/646862 [==============================] - 14s - loss: 0.2128 - acc: 0.9071 - val_loss: 0.3781 - val_acc: 0.8348\n",
      "Epoch 6/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.1948 - acc: 0.9158Epoch 00005: val_loss did not improve\n",
      "646862/646862 [==============================] - 15s - loss: 0.1949 - acc: 0.9158 - val_loss: 0.3030 - val_acc: 0.8684\n",
      "Epoch 7/200\n",
      "645120/646862 [============================>.] - ETA: 0s - loss: 0.1780 - acc: 0.9232Epoch 00006: val_loss did not improve\n",
      "646862/646862 [==============================] - 14s - loss: 0.1780 - acc: 0.9232 - val_loss: 0.3978 - val_acc: 0.8614\n",
      "Epoch 00006: early stopping\n",
      "2344960/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 3 of 5\n",
      "\n",
      "Train on 646864 samples, validate on 161716 samples\n",
      "Epoch 1/200\n",
      "643072/646864 [============================>.] - ETA: 0s - loss: 0.3405 - acc: 0.8490Epoch 00000: val_loss improved from inf to 0.37872, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646864/646864 [==============================] - 16s - loss: 0.3403 - acc: 0.8491 - val_loss: 0.3787 - val_acc: 0.8446\n",
      "Epoch 2/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2803 - acc: 0.8739Epoch 00001: val_loss improved from 0.37872 to 0.30225, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646864/646864 [==============================] - 15s - loss: 0.2803 - acc: 0.8739 - val_loss: 0.3023 - val_acc: 0.8646\n",
      "Epoch 3/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2531 - acc: 0.8875Epoch 00002: val_loss did not improve\n",
      "646864/646864 [==============================] - 15s - loss: 0.2531 - acc: 0.8875 - val_loss: 0.3244 - val_acc: 0.8609\n",
      "Epoch 4/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2316 - acc: 0.8978Epoch 00003: val_loss did not improve\n",
      "646864/646864 [==============================] - 15s - loss: 0.2317 - acc: 0.8977 - val_loss: 0.3761 - val_acc: 0.8505\n",
      "Epoch 5/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.2123 - acc: 0.9071Epoch 00004: val_loss did not improve\n",
      "646864/646864 [==============================] - 14s - loss: 0.2123 - acc: 0.9071 - val_loss: 0.3054 - val_acc: 0.8679\n",
      "Epoch 6/200\n",
      "645120/646864 [============================>.] - ETA: 0s - loss: 0.1937 - acc: 0.9157Epoch 00005: val_loss did not improve\n",
      "646864/646864 [==============================] - 15s - loss: 0.1936 - acc: 0.9157 - val_loss: 0.4158 - val_acc: 0.8132\n",
      "Epoch 00005: early stopping\n",
      "2340864/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 4 of 5\n",
      "\n",
      "Train on 646866 samples, validate on 161714 samples\n",
      "Epoch 1/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.3403 - acc: 0.8487Epoch 00000: val_loss improved from inf to 0.37181, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 17s - loss: 0.3402 - acc: 0.8488 - val_loss: 0.3718 - val_acc: 0.8467\n",
      "Epoch 2/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2818 - acc: 0.8736Epoch 00001: val_loss improved from 0.37181 to 0.35005, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 16s - loss: 0.2818 - acc: 0.8737 - val_loss: 0.3501 - val_acc: 0.8523\n",
      "Epoch 3/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2547 - acc: 0.8865Epoch 00002: val_loss improved from 0.35005 to 0.31893, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 16s - loss: 0.2547 - acc: 0.8865 - val_loss: 0.3189 - val_acc: 0.8567\n",
      "Epoch 4/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2336 - acc: 0.8970Epoch 00003: val_loss did not improve\n",
      "646866/646866 [==============================] - 15s - loss: 0.2336 - acc: 0.8970 - val_loss: 0.3253 - val_acc: 0.8576\n",
      "Epoch 5/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.2147 - acc: 0.9060Epoch 00004: val_loss improved from 0.31893 to 0.31360, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 16s - loss: 0.2147 - acc: 0.9060 - val_loss: 0.3136 - val_acc: 0.8613\n",
      "Epoch 6/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.1969 - acc: 0.9143Epoch 00005: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.1969 - acc: 0.9143 - val_loss: 0.3318 - val_acc: 0.8528\n",
      "Epoch 7/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1807 - acc: 0.9219Epoch 00006: val_loss did not improve\n",
      "646866/646866 [==============================] - 15s - loss: 0.1807 - acc: 0.9219 - val_loss: 0.3349 - val_acc: 0.8568\n",
      "Epoch 8/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1653 - acc: 0.9290Epoch 00007: val_loss did not improve\n",
      "646866/646866 [==============================] - 15s - loss: 0.1653 - acc: 0.9290 - val_loss: 0.3466 - val_acc: 0.8613\n",
      "Epoch 9/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1509 - acc: 0.9358Epoch 00008: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.1509 - acc: 0.9358 - val_loss: 0.3473 - val_acc: 0.8648\n",
      "Epoch 00008: early stopping\n",
      "2341888/2345796 [============================>.] - ETA: 0s\n",
      "Fitting fold 5 of 5\n",
      "\n",
      "Train on 646866 samples, validate on 161714 samples\n",
      "Epoch 1/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.3346 - acc: 0.8508Epoch 00000: val_loss improved from inf to 0.41002, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 17s - loss: 0.3344 - acc: 0.8508 - val_loss: 0.4100 - val_acc: 0.8383\n",
      "Epoch 2/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.2783 - acc: 0.8744Epoch 00001: val_loss improved from 0.41002 to 0.30778, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 16s - loss: 0.2783 - acc: 0.8744 - val_loss: 0.3078 - val_acc: 0.8651\n",
      "Epoch 3/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.2515 - acc: 0.8882Epoch 00002: val_loss did not improve\n",
      "646866/646866 [==============================] - 15s - loss: 0.2515 - acc: 0.8882 - val_loss: 0.3671 - val_acc: 0.8534\n",
      "Epoch 4/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.2302 - acc: 0.8986Epoch 00003: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.2303 - acc: 0.8985 - val_loss: 0.3649 - val_acc: 0.8574\n",
      "Epoch 5/200\n",
      "643072/646866 [============================>.] - ETA: 0s - loss: 0.2113 - acc: 0.9077Epoch 00004: val_loss improved from 0.30778 to 0.30353, saving model to /home/yuriyguts/Projects/kaggle-quora-question-pairs/data/tmp/fold-checkpoint-oofp_nn_mlp_with_magic.h5\n",
      "646866/646866 [==============================] - 15s - loss: 0.2113 - acc: 0.9077 - val_loss: 0.3035 - val_acc: 0.8638\n",
      "Epoch 6/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1932 - acc: 0.9167Epoch 00005: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.1932 - acc: 0.9166 - val_loss: 0.3573 - val_acc: 0.8384\n",
      "Epoch 7/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1755 - acc: 0.9246Epoch 00006: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.1756 - acc: 0.9246 - val_loss: 0.3867 - val_acc: 0.8227\n",
      "Epoch 8/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1603 - acc: 0.9311- Epoch 00007: val_loss did not improve\n",
      "646866/646866 [==============================] - 15s - loss: 0.1603 - acc: 0.9311 - val_loss: 0.3439 - val_acc: 0.8549\n",
      "Epoch 9/200\n",
      "645120/646866 [============================>.] - ETA: 0s - loss: 0.1450 - acc: 0.9386Epoch 00008: val_loss did not improve\n",
      "646866/646866 [==============================] - 14s - loss: 0.1450 - acc: 0.9386 - val_loss: 0.3498 - val_acc: 0.8582\n",
      "Epoch 00008: early stopping\n",
      "80857/80857 [==============================] - 0s     \n",
      "2340864/2345796 [============================>.] - ETA: 0sCPU times: user 7min 23s, sys: 37.5 s, total: 8min 1s\n",
      "Wall time: 13min 45s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "# Iterate through folds.\n",
    "for fold_num, (ix_train, ix_val) in enumerate(kfold.split(X_train_q1, y_train)):\n",
    "    \n",
    "    # Augment the training set by mirroring the pairs.\n",
    "    X_fold_train_q1 = np.vstack([X_train_q1[ix_train], X_train_q2[ix_train]])\n",
    "    X_fold_train_q2 = np.vstack([X_train_q2[ix_train], X_train_q1[ix_train]])\n",
    "    X_fold_train_magic = np.vstack([X_train_magic[ix_train], X_train_magic[ix_train]])\n",
    "\n",
    "    X_fold_val_q1 = np.vstack([X_train_q1[ix_val], X_train_q2[ix_val]])\n",
    "    X_fold_val_q2 = np.vstack([X_train_q2[ix_val], X_train_q1[ix_val]])\n",
    "    X_fold_val_magic = np.vstack([X_train_magic[ix_val], X_train_magic[ix_val]])\n",
    "\n",
    "    # Ground truth should also be \"mirrored\".\n",
    "    y_fold_train = np.concatenate([y_train[ix_train], y_train[ix_train]])\n",
    "    y_fold_val = np.concatenate([y_train[ix_val], y_train[ix_val]])\n",
    "    \n",
    "    print()\n",
    "    print(f'Fitting fold {fold_num + 1} of {kfold.n_splits}')\n",
    "    print()\n",
    "    \n",
    "    # Compile a new model.\n",
    "    model = create_model(model_params)\n",
    "\n",
    "    # Train.\n",
    "    model.fit(\n",
    "        [X_fold_train_q1, X_fold_train_q2, X_fold_train_magic], y_fold_train,\n",
    "        validation_data=([X_fold_val_q1, X_fold_val_q2, X_fold_val_magic], y_fold_val),\n",
    "\n",
    "        batch_size=BATCH_SIZE,\n",
    "        epochs=MAX_EPOCHS,\n",
    "        verbose=1,\n",
    "        \n",
    "        callbacks=[\n",
    "            # Stop training when the validation loss stops improving.\n",
    "            EarlyStopping(\n",
    "                monitor='val_loss',\n",
    "                min_delta=0.001,\n",
    "                patience=3,\n",
    "                verbose=1,\n",
    "                mode='auto',\n",
    "            ),\n",
    "            # Save the weights of the best epoch.\n",
    "            ModelCheckpoint(\n",
    "                model_checkpoint_path,\n",
    "                monitor='val_loss',\n",
    "                save_best_only=True,\n",
    "                verbose=2,\n",
    "            ),\n",
    "        ],\n",
    "    )\n",
    "        \n",
    "    # Restore the best epoch.\n",
    "    model.load_weights(model_checkpoint_path)\n",
    "    \n",
    "    # Compute out-of-fold predictions.\n",
    "    y_train_oofp[ix_val] = predict(model, X_train_q1[ix_val], X_train_q2[ix_val], X_train_magic[ix_val])\n",
    "    y_test_oofp[:, fold_num] = predict(model, X_test_q1, X_test_q2, X_test_magic)\n",
    "    \n",
    "    # Clear GPU memory.\n",
    "    K.clear_session()\n",
    "    del X_fold_train_q1, X_fold_train_q2, X_fold_train_magic\n",
    "    del X_fold_val_q1, X_fold_val_q2, X_fold_val_magic\n",
    "    del model\n",
    "    gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CV score: 0.292694363155\n"
     ]
    }
   ],
   "source": [
    "cv_score = log_loss(y_train, y_train_oofp)\n",
    "print('CV score:', cv_score)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Save features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "feature_names = [feature_list_id]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "features_train = y_train_oofp.reshape((-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "features_test = np.mean(y_test_oofp, axis=1).reshape((-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "project.save_features(features_train, features_test, feature_names, feature_list_id)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Explore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.axes._subplots.AxesSubplot at 0x7fa076daa518>"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqAAAAGoCAYAAACZh1c1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X90lPWZ9/HPTBIyM+BCQkCLpTFMdEGFBAJIhAKSWk81\nIQrpUaPI4qIJoqxWUFOK4AaLPZLsSu0iesAjgVOVuBKI6FJUEBdSiYhUEB5rxh9bFmWMRJlMfpDc\nzx8+zOM0EEaSfGcyeb/O8fTkvu6Z75VeJH783tz32CzLsgQAAAAYYg93AwAAAOhZCKAAAAAwigAK\nAAAAowigAAAAMIoACgAAAKMIoAAAADAqNtwNRKNjx77t8jVsNpv69++tr77yiSdpRS7mFPmYUffA\nnCIfM+oeTM5pwIDzzlhjB7Sbstu/+0NkZ4IRjTlFPmbUPTCnyMeMuodImRN/TAAAAGAUARQAAABG\nEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARvFZ8AAA\nAGFy+2NvGFlnzUNTftD5/+f/HNLjj/9WHk+Nfvzjn2j+/CJdfvnwTuuHHVAAAAAENDY26sEHf6Vr\nr52q117brry8G/XQQ79SfX19p61BAAUAAEDA3r3VstlsuuGGPMXGxio7O1eJiYnavfu/O20NAigA\nAAACPvvsE1100ZCgYz/5SbI+++yTTluDAAoAAIAAv98vh8MRdCw+3qGGhoZOW4MACgAAgACHw6HG\nxsagY42NDXI6nZ22BgEUAAAAAcnJKfrss0+Djn322adKSRlyhlf8cDyGqRvLub8i3C10uh/6mAgA\nANC5MjLGqLm5SeXlz+v66/P02muvqLa2VmPHZnbaGuyAAgAAIKBXr15avnyFtm3bql/8YopeeukF\nPfZYaadegmcHFAAAIEwi9cpfaurFeuqpNV32/uyAAgAAwCgCKAAAAIwigAIAAMAoAigAAACMCksA\n3b9/vyZMmBD4+ujRo7rrrrt0xRVXaPz48SouLlZTU5MkybIslZSUaNy4cRozZoyWLl2qlpaWwGsr\nKyuVlZWl9PR0FRQUyOv1BmoHDx5UXl6e0tPTlZubq3379gVqdXV1mjt3rjIyMjR58mRt2LAhUDvb\nmgAAADh3RgOoZVkqLy/X7bffrubm5sDxBQsW6IILLtBbb72ljRs36i9/+Yv+8Ic/SJLWr1+v7du3\na9OmTdqyZYv27t2rNWu+uyvr0KFDWrx4sUpLS1VVVaWkpCQVFRVJkhobG1VYWKhp06Zpz549mjFj\nhubMmSOfzydJWrRokVwul3bt2qUVK1Zo+fLlgYDa3poAAADoGKMB9KmnntLatWtVWFgYONbU1CSn\n06k5c+YoPj5eAwYMUE5Ojt577z1JUkVFhWbOnKmBAwdqwIABKigo0MsvvyxJ2rx5s7KyspSWliaH\nw6H58+dr586d8nq9qqqqkt1uV35+vuLi4pSXl6ekpCTt2LFDPp9P27Zt07x58xQfH68RI0YoOztb\nGzduPOuaAAAA6BijzwGdPn26CgsL9c477wSO9erVS08//XTQeW+++aaGDh0qSaqpqVFqamqglpKS\nIo/HI8uyVFNTo5EjRwZqCQkJ6tu3rzwejzwej9xud9D7pqSkqKamRhdddJFiY2M1ePDgoNrWrVvP\nuqbNZjvr92mz2WTv4mhvt5+9j+4oJia6vq9Tc4rWeUUDZtQ9MKfIx4y6h0iZk9EAOnDgwHbrlmXp\n0UcfVU1NjR5//HFJkt/vl8PhCJzjdDrV2tqqpqamNrVTdb/fr/r6+jZP7Hc4HGpoaFB9fX2b152q\nnW3N+Pj4s36f/fv3Dimooq3ExD7hbqFL9OvXO9wt4CyYUffAnCIfM+oewj2niPkkpIaGBj3wwAM6\nfPiwysrK1L9/f0nfBcPGxsbAeX6/X7GxsYqPjw8Kjd+vu1wuOZ3ONrWGhoZA7fvv+f3a2dYMxVdf\n+dgBPUe1tSfC3UKnsttt6tevt44f96m11Qp3OzgNZtQ9MKfIx4y6B5Nzam9TKSIC6PHjxzV79my5\nXC698MIL6tevX6Dmdrvl8XiUlpYmSfJ4PBoyZEhQ7ZTa2lrV1dXJ7XbL5/Np3bp1Qet4PB5lZ2cr\nOTlZzc3NOnLkiAYNGhSonbrs3t6aobAsS9w0f25aWqLzl1ZrqxW131u0YEbdA3OKfMyoewj3nML+\nHFDLsnTPPfcoKSlJq1evDgqfkjR16lStXr1aR48eldfr1apVq5SbmytJys7O1tatW1VdXa3GxkaV\nlpZq4sSJSkhIUGZmppqamlRWVqbm5maVl5fL6/VqwoQJ6tOnj7KyslRSUiK/36/9+/ersrJSOTk5\nZ10TAAAAHRP2HdD33ntP77zzjuLj4zV27NjA8UsvvVTr169Xfn6+vF6v8vLy1NzcrJycHM2aNUuS\nNGzYMBUXF2vhwoU6duyYRo8erWXLlkn67uamZ555RkuWLFFpaamSk5O1cuXKwGX24uJiLV68WJMm\nTZLL5dKCBQsCO57trQkAAICOsVmWxT55Jzt27NsuXyMmxqaZj77e5euYtuahKeFuoVPFxNiUmNhH\ntbUnuCQVoZhR98CcIh8z6h5MzmnAgPPOWAv7JXgAAAD0LARQAAAAGEUABQAAgFEEUAAAABhFAAUA\nAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQ\nAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhF\nAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACA\nUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAA\nABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAF\nAACAUQRQAAAAGBWWALp//35NmDAh8HVdXZ3mzp2rjIwMTZ48WRs2bAjULMtSSUmJxo0bpzFjxmjp\n0qVqaWkJ1CsrK5WVlaX09HQVFBTI6/UGagcPHlReXp7S09OVm5urffv2dcqaAAAAOHdGA6hlWSov\nL9ftt9+u5ubmwPFFixbJ5XJp165dWrFihZYvXx4Ii+vXr9f27du1adMmbdmyRXv37tWaNWskSYcO\nHdLixYtVWlqqqqoqJSUlqaioSJLU2NiowsJCTZs2TXv27NGMGTM0Z84c+Xy+Dq0JAACAjjEaQJ96\n6imtXbtWhYWFgWM+n0/btm3TvHnzFB8frxEjRig7O1sbN26UJFVUVGjmzJkaOHCgBgwYoIKCAr38\n8suSpM2bNysrK0tpaWlyOByaP3++du7cKa/Xq6qqKtntduXn5ysuLk55eXlKSkrSjh07OrQmAAAA\nOibW5GLTp09XYWGh3nnnncCxTz/9VLGxsRo8eHDgWEpKirZu3SpJqqmpUWpqalDN4/HIsizV1NRo\n5MiRgVpCQoL69u0rj8cjj8cjt9sdtH5KSopqamp00UUXnfOaNpvtrN+nzWaTvYujvd1+9j66o5iY\n6Pq+Ts0pWucVDZhR98CcIh8z6h4iZU5GA+jAgQPbHKuvr5fD4Qg65nA41NDQIEny+/1BdafTqdbW\nVjU1NbWpnar7/X7V19fL6XSe9n07smZ8fPxZv8/+/XuHFFTRVmJin3C30CX69esd7hZwFsyoe2BO\nkY8ZdQ/hnpPRAHo6TqdTjY2NQccaGhrkcrkkfRcMv1/3+/2KjY1VfHx8UGj8ft3lcsnpdLapnXrf\njqwZiq++8rEDeo5qa0+Eu4VOZbfb1K9fbx0/7lNrqxXudnAazKh7YE6Rjxl1Dybn1N6mUtgDaHJy\nspqbm3XkyBENGjRIkuTxeAKXwN1utzwej9LS0gK1IUOGBNVOqa2tVV1dndxut3w+n9atWxe0lsfj\nUXZ2dofWDIVlWeKm+XPT0hKdv7RaW62o/d6iBTPqHphT5GNG3UO45xT254D26dNHWVlZKikpkd/v\n1/79+1VZWamcnBxJ0tSpU7V69WodPXpUXq9Xq1atUm5uriQpOztbW7duVXV1tRobG1VaWqqJEycq\nISFBmZmZampqUllZmZqbm1VeXi6v16sJEyZ0aE0AAAB0TNh3QCWpuLhYixcv1qRJk+RyubRgwYLA\n7mN+fr68Xq/y8vLU3NysnJwczZo1S5I0bNgwFRcXa+HChTp27JhGjx6tZcuWSZJ69eqlZ555RkuW\nLFFpaamSk5O1cuXKwGX2c10TAAAAHWOzLIt98k527Ni3Xb5GTIxNMx99vcvXMW3NQ1PC3UKniomx\nKTGxj2prT3BJKkIxo+6BOUU+ZtQ9mJzTgAHnnbEW9kvwAAAA6FkIoAAAADCKAAoAAACjCKAAAAAw\nigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAA\nAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAA\nAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoA\nCgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACj\nCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAA\nMIoACgAAAKMiJoDu3btX06ZN06hRo3TNNddo8+bNkqS6ujrNnTtXGRkZmjx5sjZs2BB4jWVZKikp\n0bhx4zRmzBgtXbpULS0tgXplZaWysrKUnp6ugoICeb3eQO3gwYPKy8tTenq6cnNztW/fvkCtvTUB\nAADQMRERQFtaWjR37lzdeeed2rt3rx599FE99NBD+p//+R8tWrRILpdLu3bt0ooVK7R8+fJAWFy/\nfr22b9+uTZs2acuWLdq7d6/WrFkjSTp06JAWL16s0tJSVVVVKSkpSUVFRZKkxsZGFRYWatq0adqz\nZ49mzJihOXPmyOfzSVK7awIAAKBjIiKAfvPNN6qtrVVLS4ssy5LNZlNcXJxiYmK0bds2zZs3T/Hx\n8RoxYoSys7O1ceNGSVJFRYVmzpypgQMHasCAASooKNDLL78sSdq8ebOysrKUlpYmh8Oh+fPna+fO\nnfJ6vaqqqpLdbld+fr7i4uKUl5enpKQk7dixQz6fr901AQAA0DEREUATEhKUn5+vX/3qV7rssst0\nyy23aNGiRfr6668VGxurwYMHB85NSUlRTU2NJKmmpkapqalBNY/HI8uy2tQSEhLUt29feTweeTwe\nud3uoB5Ove+nn37a7poAAADomNhwNyBJra2tcjgceuKJJzRlyhTt2rVL999/v1auXCmHwxF0rsPh\nUENDgyTJ7/cH1Z1Op1pbW9XU1NSmdqru9/tVX18vp9N52vetr69vd81Q2Gw22bs42tvttq5dIExi\nYqLr+zo1p2idVzRgRt0Dc4p8zKh7iJQ5RUQA3bp1q/bv368HH3xQkjR58mRNnjxZv//979XY2Bh0\nbkNDg1wul6TvguH3636/X7GxsYqPjz9taPT7/XK5XHI6nW1qp97X6XS2u2Yo+vfvLZuNH8BzkZjY\nJ9wtdIl+/XqHuwWcBTPqHphT5GNG3UO45xQRAfR///d/1dTUFHQsNjZWl112md59910dOXJEgwYN\nkiR5PJ7ApXW32y2Px6O0tLRAbciQIUG1U2pra1VXVye32y2fz6d169YFrefxeJSdna3k5GQ1Nzef\ncc1QfPWVjx3Qc1RbeyLcLXQqu92mfv166/hxn1pbrXC3g9NgRt0Dc4p8zKh7MDmn9jaVIiKAXnnl\nlSopKdFLL70UuDP9T3/6k5577jn97W9/U0lJiZYuXaqPPvpIlZWVevrppyVJU6dO1erVqzVu3DjF\nxsZq1apVys3NlSRlZ2fr1ltv1fTp0zV8+HCVlpZq4sSJSkhIUGZmppqamlRWVqabbrpJFRUV8nq9\nmjBhglwul7Kyss64Zigsy9L3ngaFH6ClJTp/abW2WlH7vUULZtQ9MKfIx4y6h3DPyWZZVkT8KXnj\njTf0xBNP6PPPP9egQYP0L//yL7r66qt1/PhxLV68WLt375bL5dLdd9+tvLw8Sd89vmnFihV66aWX\n1NzcrJycHBUVFSkmJkaStGXLFj3xxBM6duyYRo8erWXLlql///6SvntM05IlS3T48GElJydryZIl\nSk9Pl6R21wzFsWPfdvL/O23FxNg089HXu3wd09Y8NCXcLXSqmBibEhP7qLb2BL+QIxQz6h6YU+Rj\nRt2DyTkNGHDeGWsRE0CjCQH03BFAYRoz6h6YU+RjRt1DpATQiHgMEwAAAHoOAigAAACMIoACAADA\nKAIoAAAAjCKAAgAAwCgCKAAAAIwigAIAAMAoAigAAACMIoACAADAqJADaG5urlavXq2jR492ZT8A\nAACIciEH0F/+8pfatm2bsrKyNGPGDL3wwguqq6vryt4AAAAQhUIOoLfeeqv++Mc/auvWrZo4caJe\neOEF/fSnP9Vdd92lV199VU1NTV3ZJwAAAKJE7A99wYUXXqg77rhDU6dO1fPPP681a9bojTfeUJ8+\nfXT99dfrnnvuUd++fbuiVwAAAESBH3QTktfr1bp165Sfn6+rrrpKO3bs0L333qu33npLzz33nA4e\nPKjCwsKu6hUAAABRIOQd0Ntuu03vvvuuLrjgAmVnZ6u4uFhutztQHzhwoG677TYtXLiwSxoFAABA\ndAg5gLrdbt17770aNWrUGc8ZO3asNm7c2CmNAQAAIDqFfAl+8eLFOnbsmN58883AsUWLFmnbtm2B\nrxMTEzV48ODO7RAAAABRJeQA+uyzz6qoqEjHjx8PHPuHf/gHPfjgg3r++ee7pDkAAABEn5ADaFlZ\nmUpKSnTDDTcEji1YsEC/+93vtHr16i5pDgAAANEn5AD69ddfKzk5uc3x1NRUffnll53aFAAAAKJX\nyAE0LS1Nq1evVktLS+CYZVlau3atLr300i5pDgAAANEn5LvgH3roIf3TP/2T3n77bQ0bNkySdPjw\nYTU1Nenpp5/usgYBAAAQXUIOoEOHDtWrr76qLVu26OOPP1ZcXJwmTZqknJwc9enTpyt7BAAAQBT5\nQR/FmZCQoFtuuaWregEAAEAPEHIA/eyzz7R8+XJ98MEHam5ulmVZQfW3336705sDAABA9Ak5gBYV\nFam2tlazZs3ikjsAAADOWcgB9C9/+YvKy8t1ySWXdGU/AAAAiHIhP4Zp0KBBOnHiRFf2AgAAgB4g\n5B3Q+++/X4888ojuvvtuJScnKy4uLqiekpLS6c0BAAAg+oQcQO+5556g/5Ukm80my7Jks9n04Ycf\ndn53AAAAiDohB9DXX3+9K/sAAABADxFyAL3wwgslSV988YU8Ho/S09N14sQJJSUldVlzAAAAiD4h\n34RUX1+ve++9V5MmTdLtt9+uY8eO6eGHH1Z+fr5qa2u7skcAAABEkZAD6OOPP64vvvhCr776quLj\n4yV9d2NSY2Ojfvvb33ZZgwAAAIguIQfQ119/XUVFRUF3u7vdbj3yyCPauXNnlzQHAACA6BNyAD1x\n4sRpPwHJbrfr5MmTndoUAAAAolfIAXTChAl66qmn1NLSEjj29ddf6/HHH9f48eO7pDkAAABEn5AD\n6G9+8xt98sknyszMVENDg2bPnq2rrrpKdXV1WrhwYVf2CAAAgCgS8mOYBg4cqBdffFG7d+9WTU2N\nTp48KbfbrfHjx8tms3VljwAAAIgiIQfQUzIzM5WZmdkVvQAAAKAHCDmADh06tN2dTj6KEwAAAKEI\nOYA+88wzQV+3tLTos88+U1lZme67775ObwwAAADRKeQA+tOf/vS0x1NTU1VSUqJrr72205oCAABA\n9Ar5Lvgz+dGPfqSPPvqoM3oBAABADxDyDujbb7/d5tiJEye0fv16DR06tFObAgAAQPQKOYDOnj27\nzbG4uDgNHz5c//qv/9qpTQEAACB6hRxADx061JV9AAAAoIcIOYB6PJ6Q3zQlJeWcmgEAAED0CzmA\n/uIXvwg8B9SyLElq81xQy7Jks9l4JigAAADOKOQA+vvf/16lpaVasGCBMjIyFBcXpwMHDqi4uFjT\npk3T1Vdf3ZV9AgAAIEqE/BimZcuWqbi4WFOmTFHfvn3lcrk0ZswYLV26VGvWrNGFF14Y+OdcHD16\nVAUFBRo1apQmTpyotWvXSpLq6uo0d+5cZWRkaPLkydqwYUPgNZZlqaSkROPGjQv00tLSEqhXVlYq\nKytL6enpKigokNfrDdQOHjyovLw8paenKzc3V/v27QvU2lsTAAAAHRNyAP3mm2/Uq1evNsebmprk\n9/s71IRlWbrrrrs0ZMgQ/fnPf9bq1av15JNPau/evVq0aJFcLpd27dqlFStWaPny5YGwuH79em3f\nvl2bNm3Sli1btHfvXq1Zs0bSdzdNLV68WKWlpaqqqlJSUpKKiookSY2NjSosLNS0adO0Z88ezZgx\nQ3PmzJHP55OkdtcEAABAx4QcQK+++mr9+te/1q5du/T111+rtrZW27dv18KFC3X99dd3qIn3339f\nX375pebPn6+4uDhdfPHFev7553X++edr27ZtmjdvnuLj4zVixAhlZ2dr48aNkqSKigrNnDlTAwcO\n1IABA1RQUKCXX35ZkrR582ZlZWUpLS1NDodD8+fP186dO+X1elVVVSW73a78/HzFxcUpLy9PSUlJ\n2rFjh3w+X7trAgAAoGNCDqCLFi3SxRdfrDvuuENXXnmlxo8fr3nz5ulnP/uZHnjggQ41ceDAAV18\n8cV6/PHHNX78eF1zzTV6//33VVdXp9jYWA0ePDhwbkpKimpqaiRJNTU1Sk1NDap5PB5ZltWmlpCQ\noL59+8rj8cjj8cjtdgf1cOp9P/3003bXBAAAQMeEfBOSy+XSv/3bv+mbb77RJ598IqfTqZ/85CeK\nj4/vcBN1dXX685//rHHjxunNN9/UBx98oNmzZ+vpp5+Ww+EIOtfhcKihoUGS5Pf7g+pOp1Otra2B\nvxbw9691Op3y+/2qr6+X0+k87fvW19e3u2YobDab7B3+kNP22e22s5/UDcXERNf3dWpO0TqvaMCM\nugfmFPmYUfcQKXMKOYBK0ldffaUNGzbok08+0YIFC/T6668rNTVVl1xySYea6NWrl/r27auCggJJ\n0qhRo3TNNddoxYoVamxsDDq3oaFBLpdL0nfB8Pt1v9+v2NhYxcfHnzY0+v1+uVwuOZ3ONrVT7+t0\nOttdMxT9+/du84gqhCYxsU+4W+gS/fr1DncLOAtm1D0wp8jHjLqHcM8p5AB68OBB3XbbbUpNTdUH\nH3yguXPn6r//+79VVFSkp556SpmZmefcREpKilpaWtTS0qKYmBhJUktLiy699FJVV1fryJEjGjRo\nkKTvHoh/6tK62+2Wx+NRWlpaoDZkyJCg2im1tbWqq6uT2+2Wz+fTunXrgnrweDzKzs5WcnKympub\nz7hmKL76yscO6DmqrT0R7hY6ld1uU79+vXX8uE+trVa428FpMKPugTlFPmbUPZicU3ubSiEH0GXL\nlmnmzJm65557NHLkSEnSo48+qoSEBC1fvlwvvfTSOTc4fvx4ORwOPfnkk5o7d67279+vP/3pT3r2\n2Wf1t7/9TSUlJVq6dKk++ugjVVZW6umnn5YkTZ06VatXr9a4ceMUGxurVatWKTc3V5KUnZ2tW2+9\nVdOnT9fw4cNVWlqqiRMnKiEhQZmZmWpqalJZWZluuukmVVRUyOv1asKECXK5XMrKyjrjmqGwLEvf\nexoUfoCWluj8pdXaakXt9xYtmFH3wJwiHzPqHsI9p5D36Q4cOKCpU6e2OX7jjTfq448/7lATDodD\nZWVl2r9/v6688krNnz9fv/nNb5Senq7i4mKdPHlSkyZN0rx587RgwYLAjmd+fr6mTJmivLw8XXfd\ndRo1apRmzZolSRo2bJiKi4u1cOFCZWZm6ssvv9SyZcskfXfJ/5lnntErr7yisWPHat26dVq5cmXg\nMnt7awIAAKBjQt4B7du3r44cOaLk5OSg4wcOHFBiYmKHG0lOTtbq1avbHO/Xr5+eeOKJ074mJiZG\n9913n+67777T1q+99lpde+21p60NHTpUzz///Glr7a0JAACAjgl5B/Tmm2/Www8/rP/6r/+SJB0+\nfFjr16/XkiVLdOONN3ZZgwAAAIguIe+A3nnnnerdu7cee+wx+f1+3X333UpKSlJhYaFmzpzZlT0C\nAAAgioQcQF977TXl5OTolltuUX19vVpaWnTeeed1ZW8AAACIQiFfgn/44Yd17NgxSd89lJ7wCQAA\ngHMRcgC9/PLL9dZbb3VlLwAAAOgBQr4E36tXL/3ud7/TH/7wB/34xz9u83GVZ7qjHAAAAPi+kAPo\n5Zdfrssvv7wrewEAAEAP0G4AHTt2rF577TUlJibq7rvvliQdOnRIQ4YMUa9evYw0CAAAgOjS7t8B\n/eabb2RZwR/TlJ+fry+++KJLmwIAAED0CvkmpFP+PpACAAAAP8QPDqAAAABARxBAAQAAYNRZ74Kv\nqKhQ7969A1+3traqsrJSiYmJQefxefAAAAAIRbsBdNCgQVq3bl3Qsf79+2vDhg1Bx2w2GwEUAAAA\nIWk3gL7xxhum+gAAAEAPwd8BBQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUA\nBQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBR\nBFAAAAAYFRvuBoDvu/2xN8LdQqfbXJIb7hYAAIgo7IACAADAKAIoAAAAjCKAAgAAwCgCKAAAAIwi\ngAIAAMD97VssAAATUUlEQVQoAigAAACMIoACAADAKAIoAAAAjCKAAgAAwCgCKAAAAIwigAIAAMAo\nAigAAACMIoACAADAKAIoAAAAjCKAAgAAwCgCKAAAAIwigAIAAMAoAigAAACMirgA6vV6lZmZqTff\nfFOSVFdXp7lz5yojI0OTJ0/Whg0bAudalqWSkhKNGzdOY8aM0dKlS9XS0hKoV1ZWKisrS+np6Soo\nKJDX6w3UDh48qLy8PKWnpys3N1f79u0L1NpbEwAAAB0TcQF04cKFOn78eODrRYsWyeVyadeuXVqx\nYoWWL18eCIvr16/X9u3btWnTJm3ZskV79+7VmjVrJEmHDh3S4sWLVVpaqqqqKiUlJamoqEiS1NjY\nqMLCQk2bNk179uzRjBkzNGfOHPl8vrOuCQAAgI6JqAD6xz/+UU6nUz/60Y8kST6fT9u2bdO8efMU\nHx+vESNGKDs7Wxs3bpQkVVRUaObMmRo4cKAGDBiggoICvfzyy5KkzZs3KysrS2lpaXI4HJo/f752\n7twpr9erqqoq2e125efnKy4uTnl5eUpKStKOHTvOuiYAAAA6JjbcDZzi8Xj07LPP6sUXX9S0adMk\nSZ9++qliY2M1ePDgwHkpKSnaunWrJKmmpkapqalBNY/HI8uyVFNTo5EjRwZqCQkJ6tu3rzwejzwe\nj9xud9D6KSkpqqmp0UUXXdTumqGw2Wyyd3G0t9ttXbsAOhXzilynZsOMIhtzinzMqHuIlDlFRAA9\nefKkHnjgAS1cuFD9+vULHK+vr5fD4Qg61+FwqKGhQZLk9/uD6k6nU62trWpqampTO1X3+/2qr6+X\n0+k87fuebc1Q9O/fWzYbP4D4//r16x3uFnAWzKh7YE6Rjxl1D+GeU0QE0P/4j//QsGHDNGnSpKDj\nTqdTjY2NQccaGhrkcrkkfRcMv1/3+/2KjY1VfHz8aUOj3++Xy+WS0+lsUzv1vmdbMxRffeVjBxRB\njh/3qbXVCncbOA273aZ+/XozowjHnCIfM+oeTM4pMbHPGWsREUC3bNmiY8eOacuWLZKkEydO6Fe/\n+pVmz56t5uZmHTlyRIMGDZL03aX6U5fd3W63PB6P0tLSArUhQ4YE1U6pra1VXV2d3G63fD6f1q1b\nF9SDx+NRdna2kpOT210zFJZl6Xs34wNqbbXU0sIv5EjGjLoH5hT5mFH3EO45RcRNSK+99preffdd\nVVdXq7q6WoMGDVJpaanmzp2rrKwslZSUyO/3a//+/aqsrFROTo4kaerUqVq9erWOHj0qr9erVatW\nKTc3V5KUnZ2trVu3qrq6Wo2NjSotLdXEiROVkJCgzMxMNTU1qaysTM3NzSovL5fX69WECRPUp0+f\ndtcEAABAx0REAG1PcXGxTp48qUmTJmnevHlasGBBYMczPz9fU6ZMUV5enq677jqNGjVKs2bNkiQN\nGzZMxcXFWrhwoTIzM/Xll19q2bJlkqRevXrpmWee0SuvvKKxY8dq3bp1WrlyZeAye3trAgAAoGNs\nlmWxT97Jjh37tsvXiImxaeajr3f5Oui4zSW5qq09wSWpCBUTY1NiYh9mFOGYU+RjRt2DyTkNGHDe\nGWsRvwMKAACA6EIABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEE\nUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAY\nRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAA\ngFEEUAAAABhFAAUAAIBRBFAAAAAYRQAFAACAUQRQAAAAGEUABQAAgFEEUAAAABhFAAUAAIBRBFAA\nAAAYFRvuBoBol3N/Rbhb6HRrHpoS7hYAAN0YO6AAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAA\nADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAK\nAAAAoyImgFZXV+uXv/ylMjIy9LOf/UzPP/+8JKmurk5z585VRkaGJk+erA0bNgReY1mWSkpKNG7c\nOI0ZM0ZLly5VS0tLoF5ZWamsrCylp6eroKBAXq83UDt48KDy8vKUnp6u3Nxc7du3L1Brb00AAAB0\nTEQE0Lq6Ot1111267bbbtGfPHj3xxBMqLS3Vrl27tGjRIrlcLu3atUsrVqzQ8uXLA2Fx/fr12r59\nuzZt2qQtW7Zo7969WrNmjSTp0KFDWrx4sUpLS1VVVaWkpCQVFRVJkhobG1VYWKhp06Zpz549mjFj\nhubMmSOfzydJ7a4JAACAjomIAHrkyBFNmjRJOTk5stvtuuyyy3TFFVdo79692rZtm+bNm6f4+HiN\nGDFC2dnZ2rhxoySpoqJCM2fO1MCBAzVgwAAVFBTo5ZdfliRt3rxZWVlZSktLk8Ph0Pz587Vz5055\nvV5VVVXJbrcrPz9fcXFxysvLU1JSknbs2CGfz9fumgAAAOiY2HA3IEnDhg3T448/Hvi6rq5O1dXV\n+sd//EfFxsZq8ODBgVpKSoq2bt0qSaqpqVFqampQzePxyLIs1dTUaOTIkYFaQkKC+vbtK4/HI4/H\nI7fbHdRDSkqKampqdNFFF7W7ZihsNpvsXRzt7XZb1y4AtCMmJnr+/J36WeJnKrIxp8jHjLqHSJlT\nRATQ7/v2229VWFgY2AVdu3ZtUN3hcKihoUGS5Pf75XA4AjWn06nW1lY1NTW1qZ2q+/1+1dfXy+l0\nnvZ96+vr27zu+2uGon//3rLZ+AFE9EpM7BPuFjpdv369w90CQsCcIh8z6h7CPaeICqCff/65CgsL\nNXjwYP37v/+7Pv74YzU2Ngad09DQIJfLJem7YPj9ut/vV2xsrOLj408bGv1+v1wul5xOZ5vaqfd1\nOp3trhmKr77ysQOKqFZbeyLcLXQau92mfv166/hxn1pbrXC3gzNgTpGPGXUPJufU3mZFxATQAwcO\naPbs2Zo6daoefPBB2e12JScnq7m5WUeOHNGgQYMkSR6PJ3DZ3e12y+PxKC0tLVAbMmRIUO2U2tpa\n1dXVye12y+fzad26dUHrezweZWdnn3XNUFiWpe/djA9EnZaW6PuXS2urFZXfV7RhTpGPGXUP4Z5T\nRNyE5PV6NXv2bM2aNUtFRUWy/7/twz59+igrK0slJSXy+/3av3+/KisrlZOTI0maOnWqVq9eraNH\nj8rr9WrVqlXKzc2VJGVnZ2vr1q2qrq5WY2OjSktLNXHiRCUkJCgzM1NNTU0qKytTc3OzysvL5fV6\nNWHChLOuCQAAgI6JiB3Q8vJy1dbWauXKlVq5cmXg+G233abi4mItXrxYkyZNksvl0oIFCwI7nvn5\n+fJ6vcrLy1Nzc7NycnI0a9YsSd/d2FRcXKyFCxfq2LFjGj16tJYtWyZJ6tWrl5555hktWbJEpaWl\nSk5O1sqVKwOX2dtbEwAAAB1jsyyLffJOduzYt12+RkyMTTMffb3L1wFOZ81DU8LdQqeJibEpMbGP\namtPcNkwgjGnyMeMugeTcxow4Lwz1iLiEjwAAAB6DgIoAAAAjCKAAgAAwCgCKAAAAIwigAIAAMAo\nAigAAACMIoACAADAKAIoAAAAjIqIT0IC0L3c/tgb4W6hU20uyQ13CwDQo7ADCgAAAKMIoAAAADCK\nAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAA\nowigAAAAMIoACgAAAKMIoAAAADCKAAoAAACjCKAAAAAwigAKAAAAowigAAAAMIoACgAAAKNiw90A\nAIRbzv0V4W6h0615aEq4WwCAM2IHFAAAAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAA\nYBQBFAAAAEYRQAEAAGAUARQAAABGEUABAABgFB/FCQBR6PbH3gh3C51uc0luuFsA0EnYAQUAAIBR\nBFAAAAAYxSV4AACAM+Cvs3QNdkABAABgFAEUAAAARhFAAQAAYBQBFAAAAEZxExIAoFvIub8i3C10\nujUPTQl3C0BYsAMKAAAAowigZ3Dw4EHl5eUpPT1dubm52rdvX7hbAgAAiAoE0NNobGxUYWGhpk2b\npj179mjGjBmaM2eOfD5fuFsDAADo9vg7oKdRVVUlu92u/Px8SVJeXp6ee+457dixQ9dee22YuwMA\nRItoe8h5JDzgHN0DAfQ0PB6P3G530LGUlBTV1NSEqSMAACJfNN4ohq5BAD2N+vp6OZ3OoGMOh0MN\nDQ0hvd5ms8nexX+5wW63de0CAAAgaoU7RxBAT8PpdLYJmw0NDXK5XCG9PimpT1e01QaXOgAAwLno\n1693WNfnJqTTGDJkiDweT9Axj8ej1NTUMHUEAAAQPQigp5GZmammpiaVlZWpublZ5eXl8nq9mjBh\nQrhbAwAA6PZslmVZ4W4iEh06dEhLlizR4cOHlZycrCVLlig9PT3cbQEAAHR7BFAAAAAYxSV4AAAA\nGEUABQAAgFEEUAAAABhFAAUAAIBRBNAId/DgQeXl5Sk9PV25ubnat2/fac+rrKxUVlaW0tPTVVBQ\nIK/Xa7jTnivUGb344ov6+c9/rlGjRmn69Omqrq423GnPFuqcTtm9e7eGDh0qn89nqEOEOqPq6mrd\ncMMNGjlypHJycrR7927DnfZsoc5pw4YNysrKUkZGhm666SZ98MEHhjvF/v37232EZFizg4WI1dDQ\nYP30pz+11q9fbzU1NVkbNmywxo0bZ504cSLovA8//NAaNWqUtW/fPsvv91u//vWvrdmzZ4ep654l\n1Bnt3r3buuKKK6yDBw9aLS0t1n/+539aGRkZVm1tbZg671lCndMpx48ftyZPnmxdcsklZzwHnSvU\nGR09etQaPXq09dprr1mtra3W5s2brYyMDMvv94ep857lh/x7aezYsVZNTY3V0tJirVq1ypoyZUqY\nuu55WltbrQ0bNlgZGRnW2LFjT3tOuLMDO6ARrKqqSna7Xfn5+YqLi1NeXp6SkpK0Y8eOoPM2b96s\nrKwspaWlyeFwaP78+dq5cye7oAaEOqOjR4/qn//5nzVs2DDZ7XbdcMMNiomJ0V//+tcwdd6zhDqn\nU5YsWaJrr73WcJc9W6gzqqio0JVXXqlrrrlGNptN2dnZeu6552S3868zE0Kd06effqrW1la1tLTI\nsizZ7XY5HI4wdd3zPPXUU1q7dq0KCwvPeE64swM/sRHM4/HI7XYHHUtJSVFNTU3QsZqamqCPCU1I\nSFDfvn3bfJwoOl+oM7r++ut1xx13BL5+99135fP52rwWXSPUOUnSpk2b9M033+jmm2821R4U+owO\nHDig888/X3PnztUVV1yhG2+8US0tLerVq5fJdnusUOc0YcIEXXTRRbruuus0fPhwrVq1SsuXLzfZ\nao82ffp0VVRUaPjw4Wc8J9zZgQAawerr6+V0OoOOORwONTQ0BB3z+/1t/svS6XTK7/d3eY89Xagz\n+r6//vWvmjdvnubNm6fExMSubhEKfU5HjhzRE088od/+9rcm24NCn1FdXZ02bNigm2++WW+//bam\nTp2qO++8U3V1dSbb7bFCnVNjY6NSU1NVXl6u9957TzNnztTdd9/d7u9GdJ6BAwfKZrO1e064swMB\nNII5nc42P6wNDQ1yuVxBx84USv/+PHS+UGd0yttvv62bb75Zt9xyi+68804TLUKhzam1tVUPPvig\n7rvvPp1//vmmW+zxQv1Z6tWrlyZOnKgJEyYoLi5Ot9xyi1wul/bu3Wuy3R4r1Dk9+eSTuuCCCzR8\n+HDFx8dr7ty5am5u1q5du0y2i3aEOzsQQCPYkCFD2myFezyeoC1zSXK73UHn1dbWqq6ujsu7BoQ6\nI0l66aWXNG/ePC1evFh33XWXqRah0OZ09OhRvf/++1qyZIlGjx6tqVOnSpImTZrEEwsMCPVnKSUl\nRU1NTUHHWltbZfGp0kaEOqcjR44EzclmsykmJkYxMTFG+sTZhTs7EEAjWGZmppqamlRWVqbm5maV\nl5fL6/W2eaRCdna2tm7dqurqajU2Nqq0tFQTJ05UQkJCmDrvOUKd0e7du/XII4/o6aefVnZ2dpi6\n7blCmdOgQYO0f/9+VVdXq7q6Wps2bZIk7dixQ6NHjw5X6z1GqD9Lubm5evvtt7V9+3a1traqrKxM\njY2NuuKKK8LUec8S6pwmT56s8vJyHThwQCdPntSzzz6rlpYWZWRkhKlz/L2wZwdj99vjnHz44YfW\njTfeaKWnp1u5ubnWe++9Z1mWZS1atMhatGhR4LxXXnnF+vnPf26NHDnSuuOOOyyv1xuulnucUGY0\na9Ysa+jQoVZ6enrQPzt27Ahn6z1KqD9Lp3z++ec8hsmwUGe0c+dOKzc310pPT7duuOEGa9++feFq\nuUcKZU6tra3WqlWrrKuuusrKyMiwbr31Vuvw4cPhbLtHqqqqCnoMUyRlB5tlcd0CAAAA5nAJHgAA\nAEYRQAEAAGAUARQAAABGEUABAABgFAEUAAAARhFAAQAAYBQBFAAAAEYRQAEAAGAUARQAAABG/V9D\n2LYHfcbrggAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x7f9ff859d438>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "pd.DataFrame(features_test).plot.hist()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
