{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/kain/Workstation/PyEnv/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import os\n",
    "import gc\n",
    "import numpy as np \n",
    "import pandas as pd \n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn import preprocessing\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from keras.callbacks import Callback\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
    "import tensorflow as tf\n",
    "import keras as ks\n",
    "from keras import backend as K\n",
    "from keras.models import load_model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_stacked = pd.read_csv('../oofs/kain-train-features-v0.1.2.csv', index_col=0)\n",
    "test_stacked = pd.read_csv('../oofs/kain-test-features-v0.1.2.csv', index_col=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>LGB_cat_0_7956629719199435_csv</th>\n",
       "      <th>LGB_cat_0_796965615822933_csv</th>\n",
       "      <th>LGB_cat_0_7971758465559547_csv</th>\n",
       "      <th>LGB_cat_0_7971922012487902_csv</th>\n",
       "      <th>LGB_cat_0_7975664475835779_csv</th>\n",
       "      <th>LGB_cat_0_7982459487910958_csv</th>\n",
       "      <th>LGB_cat_0_7985379016892897_csv</th>\n",
       "      <th>LGB_cat_0_7986254088147751_csv</th>\n",
       "      <th>LGB_cat_0_7987384223756171_csv</th>\n",
       "      <th>LGB_cat_0_7988757981190066_csv</th>\n",
       "      <th>...</th>\n",
       "      <th>TARGET.72</th>\n",
       "      <th>TARGET.73</th>\n",
       "      <th>TARGET.74</th>\n",
       "      <th>TARGET.75</th>\n",
       "      <th>TARGET.76</th>\n",
       "      <th>TARGET.77</th>\n",
       "      <th>TARGET.78</th>\n",
       "      <th>TARGET.79</th>\n",
       "      <th>TARGET.80</th>\n",
       "      <th>TARGET.81</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.263165</td>\n",
       "      <td>0.210463</td>\n",
       "      <td>0.254966</td>\n",
       "      <td>0.272226</td>\n",
       "      <td>0.242135</td>\n",
       "      <td>0.217145</td>\n",
       "      <td>0.170371</td>\n",
       "      <td>0.194649</td>\n",
       "      <td>0.213564</td>\n",
       "      <td>0.166478</td>\n",
       "      <td>...</td>\n",
       "      <td>0.673457</td>\n",
       "      <td>0.577123</td>\n",
       "      <td>0.615324</td>\n",
       "      <td>0.409131</td>\n",
       "      <td>0.339133</td>\n",
       "      <td>0.310050</td>\n",
       "      <td>0.292603</td>\n",
       "      <td>0.274747</td>\n",
       "      <td>0.229829</td>\n",
       "      <td>0.377313</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.028534</td>\n",
       "      <td>0.022802</td>\n",
       "      <td>0.021364</td>\n",
       "      <td>0.027252</td>\n",
       "      <td>0.025308</td>\n",
       "      <td>0.024883</td>\n",
       "      <td>0.021984</td>\n",
       "      <td>0.021119</td>\n",
       "      <td>0.023010</td>\n",
       "      <td>0.023200</td>\n",
       "      <td>...</td>\n",
       "      <td>0.103917</td>\n",
       "      <td>0.039187</td>\n",
       "      <td>0.074826</td>\n",
       "      <td>0.037902</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.023711</td>\n",
       "      <td>0.018794</td>\n",
       "      <td>0.009405</td>\n",
       "      <td>0.019214</td>\n",
       "      <td>0.017564</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.044988</td>\n",
       "      <td>0.035597</td>\n",
       "      <td>0.037636</td>\n",
       "      <td>0.041098</td>\n",
       "      <td>0.039781</td>\n",
       "      <td>0.038767</td>\n",
       "      <td>0.047803</td>\n",
       "      <td>0.032391</td>\n",
       "      <td>0.034830</td>\n",
       "      <td>0.045556</td>\n",
       "      <td>...</td>\n",
       "      <td>0.098294</td>\n",
       "      <td>0.068005</td>\n",
       "      <td>0.105474</td>\n",
       "      <td>0.038344</td>\n",
       "      <td>0.055681</td>\n",
       "      <td>0.040151</td>\n",
       "      <td>0.030012</td>\n",
       "      <td>0.048608</td>\n",
       "      <td>0.041607</td>\n",
       "      <td>0.024725</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.028133</td>\n",
       "      <td>0.023076</td>\n",
       "      <td>0.025890</td>\n",
       "      <td>0.025638</td>\n",
       "      <td>0.025989</td>\n",
       "      <td>0.026900</td>\n",
       "      <td>0.027623</td>\n",
       "      <td>0.021785</td>\n",
       "      <td>0.027436</td>\n",
       "      <td>0.025201</td>\n",
       "      <td>...</td>\n",
       "      <td>0.035021</td>\n",
       "      <td>0.080346</td>\n",
       "      <td>0.060589</td>\n",
       "      <td>0.016141</td>\n",
       "      <td>0.048706</td>\n",
       "      <td>0.051309</td>\n",
       "      <td>0.037157</td>\n",
       "      <td>0.041925</td>\n",
       "      <td>0.030234</td>\n",
       "      <td>0.035943</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.061272</td>\n",
       "      <td>0.049731</td>\n",
       "      <td>0.047512</td>\n",
       "      <td>0.059441</td>\n",
       "      <td>0.059825</td>\n",
       "      <td>0.053356</td>\n",
       "      <td>0.059691</td>\n",
       "      <td>0.043630</td>\n",
       "      <td>0.050947</td>\n",
       "      <td>0.054172</td>\n",
       "      <td>...</td>\n",
       "      <td>0.129976</td>\n",
       "      <td>0.115208</td>\n",
       "      <td>0.147167</td>\n",
       "      <td>0.083344</td>\n",
       "      <td>0.075375</td>\n",
       "      <td>0.100184</td>\n",
       "      <td>0.088149</td>\n",
       "      <td>0.061036</td>\n",
       "      <td>0.056570</td>\n",
       "      <td>0.059047</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 154 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   LGB_cat_0_7956629719199435_csv  LGB_cat_0_796965615822933_csv  \\\n",
       "0                        0.263165                       0.210463   \n",
       "1                        0.028534                       0.022802   \n",
       "2                        0.044988                       0.035597   \n",
       "3                        0.028133                       0.023076   \n",
       "4                        0.061272                       0.049731   \n",
       "\n",
       "   LGB_cat_0_7971758465559547_csv  LGB_cat_0_7971922012487902_csv  \\\n",
       "0                        0.254966                        0.272226   \n",
       "1                        0.021364                        0.027252   \n",
       "2                        0.037636                        0.041098   \n",
       "3                        0.025890                        0.025638   \n",
       "4                        0.047512                        0.059441   \n",
       "\n",
       "   LGB_cat_0_7975664475835779_csv  LGB_cat_0_7982459487910958_csv  \\\n",
       "0                        0.242135                        0.217145   \n",
       "1                        0.025308                        0.024883   \n",
       "2                        0.039781                        0.038767   \n",
       "3                        0.025989                        0.026900   \n",
       "4                        0.059825                        0.053356   \n",
       "\n",
       "   LGB_cat_0_7985379016892897_csv  LGB_cat_0_7986254088147751_csv  \\\n",
       "0                        0.170371                        0.194649   \n",
       "1                        0.021984                        0.021119   \n",
       "2                        0.047803                        0.032391   \n",
       "3                        0.027623                        0.021785   \n",
       "4                        0.059691                        0.043630   \n",
       "\n",
       "   LGB_cat_0_7987384223756171_csv  LGB_cat_0_7988757981190066_csv    ...      \\\n",
       "0                        0.213564                        0.166478    ...       \n",
       "1                        0.023010                        0.023200    ...       \n",
       "2                        0.034830                        0.045556    ...       \n",
       "3                        0.027436                        0.025201    ...       \n",
       "4                        0.050947                        0.054172    ...       \n",
       "\n",
       "   TARGET.72  TARGET.73  TARGET.74  TARGET.75  TARGET.76  TARGET.77  \\\n",
       "0   0.673457   0.577123   0.615324   0.409131   0.339133   0.310050   \n",
       "1   0.103917   0.039187   0.074826   0.037902   0.000000   0.023711   \n",
       "2   0.098294   0.068005   0.105474   0.038344   0.055681   0.040151   \n",
       "3   0.035021   0.080346   0.060589   0.016141   0.048706   0.051309   \n",
       "4   0.129976   0.115208   0.147167   0.083344   0.075375   0.100184   \n",
       "\n",
       "   TARGET.78  TARGET.79  TARGET.80  TARGET.81  \n",
       "0   0.292603   0.274747   0.229829   0.377313  \n",
       "1   0.018794   0.009405   0.019214   0.017564  \n",
       "2   0.030012   0.048608   0.041607   0.024725  \n",
       "3   0.037157   0.041925   0.030234   0.035943  \n",
       "4   0.088149   0.061036   0.056570   0.059047  \n",
       "\n",
       "[5 rows x 154 columns]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_stacked.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = pd.read_csv('../../data/application_train.csv')\n",
    "y = train['TARGET']\n",
    "\n",
    "n_train = train.shape[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "selected_features = [2,\n",
    " 3,\n",
    " 6,\n",
    " 9,\n",
    " 10,\n",
    " 11,\n",
    " 13,\n",
    " 14,\n",
    " 15,\n",
    " 16,\n",
    " 17,\n",
    " 19,\n",
    " 20,\n",
    " 21,\n",
    " 22,\n",
    " 24,\n",
    " 25,\n",
    " 26,\n",
    " 27,\n",
    " 28,\n",
    " 32,\n",
    " 34,\n",
    " 36,\n",
    " 38,\n",
    " 39,\n",
    " 40,\n",
    " 41,\n",
    " 42,\n",
    " 43,\n",
    " 44,\n",
    " 47,\n",
    " 48,\n",
    " 49,\n",
    " 50,\n",
    " 52,\n",
    " 60,\n",
    " 61,\n",
    " 62,\n",
    " 64,\n",
    " 66,\n",
    " 70,\n",
    " 71,\n",
    " 72,\n",
    " 73,\n",
    " 74,\n",
    " 75,\n",
    " 82,\n",
    " 85,\n",
    " 89,\n",
    " 90,\n",
    " 91,\n",
    " 94,\n",
    " 95,\n",
    " 96,\n",
    " 102,\n",
    " 103,\n",
    " 104,\n",
    " 105,\n",
    " 106,\n",
    " 108] # for example we have 60 selecte oofs (for the best case it was about 80)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_features = train_stacked.iloc[:, selected_features].values\n",
    "test_features = test_stacked.iloc[:, selected_features].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RocAucEvaluation(Callback):\n",
    "    def __init__(self, validation_data=(), interval=1):\n",
    "        super(Callback, self).__init__()\n",
    "        self.aucs = []\n",
    "        self.interval = interval\n",
    "        self.X_val, self.y_val = validation_data\n",
    "\n",
    "    def on_epoch_end(self, epoch, logs={}):\n",
    "        if epoch % self.interval == 0:\n",
    "            y_pred = self.model.predict(self.X_val, verbose=0)\n",
    "            self.aucs.append(roc_auc_score(self.y_val, y_pred))\n",
    "            score = roc_auc_score(self.y_val, y_pred)\n",
    "            print(\"\\n ROC-AUC - epoch: {:d} - score: {:.6f}\".format(epoch+1, score))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from contextlib import contextmanager\n",
    "@contextmanager\n",
    "def timer(name):\n",
    "    t0 = time.time()\n",
    "    yield\n",
    "    print('[{' + name + '}] done in {' + str(round(time.time() - t0, 3)) + '} s')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Modeling Stage\n",
      "TRAIN:  [     0      1      2 ... 307505 307506 307508] TEST:  [     9     16     25 ... 307507 307509 307510]\n",
      "(246008, 60) (61503, 60) (48744, 60)\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 2s 6us/step - loss: 0.2425 - binary_crossentropy: 0.2425 - val_loss: 0.2324 - val_binary_crossentropy: 0.2324\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.801987\n",
      "\n",
      "Epoch 00001: val_loss improved from -inf to 0.23243, saving model to best_model.hdf5\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2387 - binary_crossentropy: 0.2387 - val_loss: 0.2363 - val_binary_crossentropy: 0.2363\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802078\n",
      "\n",
      "Epoch 00002: val_loss improved from 0.23243 to 0.23625, saving model to best_model.hdf5\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2390 - binary_crossentropy: 0.2390 - val_loss: 0.2349 - val_binary_crossentropy: 0.2349\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.801785\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23625\n",
      "[{pass 1}] done in {6.302} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2363 - binary_crossentropy: 0.2363 - val_loss: 0.2315 - val_binary_crossentropy: 0.2315\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802483\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23625\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2357 - binary_crossentropy: 0.2357 - val_loss: 0.2341 - val_binary_crossentropy: 0.2341\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802677\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23625\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2359 - binary_crossentropy: 0.2359 - val_loss: 0.2335 - val_binary_crossentropy: 0.2335\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802578\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23625\n",
      "[{pass 2}] done in {4.608} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2356 - binary_crossentropy: 0.2356 - val_loss: 0.2326 - val_binary_crossentropy: 0.2326\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802648\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23625\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2349 - binary_crossentropy: 0.2349 - val_loss: 0.2323 - val_binary_crossentropy: 0.2323\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802659\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23625\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2351 - binary_crossentropy: 0.2351 - val_loss: 0.2321 - val_binary_crossentropy: 0.2321\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802654\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23625\n",
      "Epoch 4/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2356 - binary_crossentropy: 0.2356 - val_loss: 0.2312 - val_binary_crossentropy: 0.2312\n",
      "\n",
      " ROC-AUC - epoch: 4 - score: 0.802684\n",
      "\n",
      "Epoch 00004: val_loss did not improve from 0.23625\n",
      "Epoch 5/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2346 - binary_crossentropy: 0.2346 - val_loss: 0.2316 - val_binary_crossentropy: 0.2316\n",
      "\n",
      " ROC-AUC - epoch: 5 - score: 0.802577\n",
      "\n",
      "Epoch 00005: val_loss did not improve from 0.23625\n",
      "Epoch 6/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2348 - binary_crossentropy: 0.2348 - val_loss: 0.2314 - val_binary_crossentropy: 0.2314\n",
      "\n",
      " ROC-AUC - epoch: 6 - score: 0.802701\n",
      "\n",
      "Epoch 00006: val_loss did not improve from 0.23625\n",
      "[{pass 3}] done in {8.601} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2348 - binary_crossentropy: 0.2348 - val_loss: 0.2311 - val_binary_crossentropy: 0.2311\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802803\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23625\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2346 - binary_crossentropy: 0.2346 - val_loss: 0.2320 - val_binary_crossentropy: 0.2320\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802734\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23625\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2343 - binary_crossentropy: 0.2343 - val_loss: 0.2312 - val_binary_crossentropy: 0.2312\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802801\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23625\n",
      "[{pass 4}] done in {4.329} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2342 - binary_crossentropy: 0.2342 - val_loss: 0.2311 - val_binary_crossentropy: 0.2311\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802832\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23625\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2342 - binary_crossentropy: 0.2342 - val_loss: 0.2316 - val_binary_crossentropy: 0.2316\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802722\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23625\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2341 - binary_crossentropy: 0.2341 - val_loss: 0.2314 - val_binary_crossentropy: 0.2314\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802897\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23625\n",
      "[{pass 5}] done in {4.449} s\n",
      "0.8028972132022534\n",
      "[{fit_predict}] done in {29.937} s\n",
      "(246008, 60) (61503, 60) (48744, 60)\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 2s 6us/step - loss: 0.2423 - binary_crossentropy: 0.2423 - val_loss: 0.2361 - val_binary_crossentropy: 0.2361\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.801745\n",
      "\n",
      "Epoch 00001: val_loss improved from -inf to 0.23612, saving model to best_model.hdf5\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2386 - binary_crossentropy: 0.2386 - val_loss: 0.2328 - val_binary_crossentropy: 0.2328\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.801974\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23612\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2389 - binary_crossentropy: 0.2389 - val_loss: 0.2327 - val_binary_crossentropy: 0.2327\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.801125\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23612\n",
      "Epoch 4/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2386 - binary_crossentropy: 0.2386 - val_loss: 0.2345 - val_binary_crossentropy: 0.2345\n",
      "\n",
      " ROC-AUC - epoch: 4 - score: 0.801996\n",
      "\n",
      "Epoch 00004: val_loss did not improve from 0.23612\n",
      "Epoch 5/10\n",
      "246008/246008 [==============================] - 1s 6us/step - loss: 0.2379 - binary_crossentropy: 0.2379 - val_loss: 0.2332 - val_binary_crossentropy: 0.2332\n",
      "\n",
      " ROC-AUC - epoch: 5 - score: 0.802218\n",
      "\n",
      "Epoch 00005: val_loss did not improve from 0.23612\n",
      "[{pass 1}] done in {10.258} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2365 - binary_crossentropy: 0.2365 - val_loss: 0.2327 - val_binary_crossentropy: 0.2327\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802442\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23612\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2354 - binary_crossentropy: 0.2354 - val_loss: 0.2328 - val_binary_crossentropy: 0.2328\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802481\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23612\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2360 - binary_crossentropy: 0.2360 - val_loss: 0.2323 - val_binary_crossentropy: 0.2323\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802469\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23612\n",
      "Epoch 4/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2354 - binary_crossentropy: 0.2354 - val_loss: 0.2318 - val_binary_crossentropy: 0.2318\n",
      "\n",
      " ROC-AUC - epoch: 4 - score: 0.802274\n",
      "\n",
      "Epoch 00004: val_loss did not improve from 0.23612\n",
      "Epoch 5/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2352 - binary_crossentropy: 0.2352 - val_loss: 0.2317 - val_binary_crossentropy: 0.2317\n",
      "\n",
      " ROC-AUC - epoch: 5 - score: 0.802473\n",
      "\n",
      "Epoch 00005: val_loss did not improve from 0.23612\n",
      "Epoch 6/10\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2357 - binary_crossentropy: 0.2357 - val_loss: 0.2317 - val_binary_crossentropy: 0.2317\n",
      "\n",
      " ROC-AUC - epoch: 6 - score: 0.801996\n",
      "\n",
      "Epoch 00006: val_loss did not improve from 0.23612\n",
      "Epoch 7/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2356 - binary_crossentropy: 0.2356 - val_loss: 0.2340 - val_binary_crossentropy: 0.2340\n",
      "\n",
      " ROC-AUC - epoch: 7 - score: 0.802732\n",
      "\n",
      "Epoch 00007: val_loss did not improve from 0.23612\n",
      "[{pass 2}] done in {10.695} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2346 - binary_crossentropy: 0.2346 - val_loss: 0.2311 - val_binary_crossentropy: 0.2311\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802694\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23612\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2347 - binary_crossentropy: 0.2347 - val_loss: 0.2312 - val_binary_crossentropy: 0.2312\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802306\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23612\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2347 - binary_crossentropy: 0.2347 - val_loss: 0.2324 - val_binary_crossentropy: 0.2324\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802822\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23612\n",
      "[{pass 3}] done in {4.373} s\n",
      "Train on 246008 samples, validate on 61503 samples\n",
      "Epoch 1/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2347 - binary_crossentropy: 0.2347 - val_loss: 0.2315 - val_binary_crossentropy: 0.2315\n",
      "\n",
      " ROC-AUC - epoch: 1 - score: 0.802703\n",
      "\n",
      "Epoch 00001: val_loss did not improve from 0.23612\n",
      "Epoch 2/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2341 - binary_crossentropy: 0.2341 - val_loss: 0.2321 - val_binary_crossentropy: 0.2321\n",
      "\n",
      " ROC-AUC - epoch: 2 - score: 0.802835\n",
      "\n",
      "Epoch 00002: val_loss did not improve from 0.23612\n",
      "Epoch 3/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2348 - binary_crossentropy: 0.2348 - val_loss: 0.2311 - val_binary_crossentropy: 0.2311\n",
      "\n",
      " ROC-AUC - epoch: 3 - score: 0.802810\n",
      "\n",
      "Epoch 00003: val_loss did not improve from 0.23612\n",
      "Epoch 4/10\n",
      "246008/246008 [==============================] - 1s 3us/step - loss: 0.2341 - binary_crossentropy: 0.2341 - val_loss: 0.2314 - val_binary_crossentropy: 0.2314\n",
      "\n",
      " ROC-AUC - epoch: 4 - score: 0.802670\n",
      "\n",
      "Epoch 00004: val_loss did not improve from 0.23612\n",
      "Epoch 5/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2341 - binary_crossentropy: 0.2341 - val_loss: 0.2310 - val_binary_crossentropy: 0.2310\n",
      "\n",
      " ROC-AUC - epoch: 5 - score: 0.802957\n",
      "\n",
      "Epoch 00005: val_loss did not improve from 0.23612\n",
      "Epoch 6/10\n",
      "246008/246008 [==============================] - 1s 4us/step - loss: 0.2341 - binary_crossentropy: 0.2341 - val_loss: 0.2311 - val_binary_crossentropy: 0.2311\n",
      "\n",
      " ROC-AUC - epoch: 6 - score: 0.802886\n",
      "\n",
      "Epoch 00006: val_loss did not improve from 0.23612\n",
      "Epoch 7/10\n",
      " 21504/246008 [=>............................] - ETA: 0s - loss: 0.2255 - binary_crossentropy: 0.2255"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-18-f4331d39f7e1>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     65\u001b[0m                             model.fit(x=x_train, y=train['TARGET'].iloc[train_index].values, batch_size=batch_size+(batch_size*(2*i)), epochs=epochs, \n\u001b[1;32m     66\u001b[0m                                 \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_valid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'TARGET'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mvalid_index\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mra_val\u001b[0m\u001b[0;34m,\u001b[0m  \u001b[0mearly_stop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcheck_point\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m                                       shuffle=True, class_weight={0:1, 1:1})\n\u001b[0m\u001b[1;32m     68\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     69\u001b[0m                     \u001b[0;31m#model = load_model(file_path)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m   1703\u001b[0m                               \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1704\u001b[0m                               \u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1705\u001b[0;31m                               validation_steps=validation_steps)\n\u001b[0m\u001b[1;32m   1706\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1707\u001b[0m     def evaluate(self, x=None, y=None,\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_fit_loop\u001b[0;34m(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)\u001b[0m\n\u001b[1;32m   1234\u001b[0m                         \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1235\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1236\u001b[0;31m                     \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mins_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1237\u001b[0m                     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1238\u001b[0m                         \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mouts\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m   2480\u001b[0m         \u001b[0msession\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_session\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2481\u001b[0m         updated = session.run(fetches=fetches, feed_dict=feed_dict,\n\u001b[0;32m-> 2482\u001b[0;31m                               **self.session_kwargs)\n\u001b[0m\u001b[1;32m   2483\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mupdated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2484\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    903\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    904\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 905\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    906\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    907\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1138\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1139\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1140\u001b[0;31m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m   1141\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1142\u001b[0m       \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1319\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1320\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1321\u001b[0;31m                            run_metadata)\n\u001b[0m\u001b[1;32m   1322\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1323\u001b[0m       \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1325\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1326\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1327\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1328\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1329\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1310\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1311\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m-> 1312\u001b[0;31m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m   1313\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1314\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Workstation/PyEnv/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m   1418\u001b[0m         return tf_session.TF_Run(\n\u001b[1;32m   1419\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1420\u001b[0;31m             status, run_metadata)\n\u001b[0m\u001b[1;32m   1421\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1422\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "aucs = []\n",
    "test_set = []\n",
    "validation_set = []\n",
    "print(\"\\nModeling Stage\")\n",
    "\n",
    "kf = KFold(n_splits=5, random_state=1002, shuffle=True)\n",
    "kf.get_n_splits(train_features)\n",
    "\n",
    "n_bagged = 6\n",
    "\n",
    "for train_index, valid_index in kf.split(train_features):\n",
    "    print(\"TRAIN: \", train_index, \"TEST: \", valid_index)\n",
    " \n",
    "    X = train_features\n",
    "    y_ = y.values\n",
    "    x_train, x_valid = X[train_index], X[valid_index] \n",
    "    y_train, y_valid = y_[train_index], y_[valid_index]\n",
    "    \n",
    "    scaler = preprocessing.StandardScaler()\n",
    "    scaler.fit(x_train)\n",
    "    x_train = scaler.transform(x_train) \n",
    "    x_valid = scaler.transform(x_valid) \n",
    "    x_test = scaler.transform(test_features)\n",
    "    \n",
    "    oof_baggs = np.zeros([n_bagged , x_valid.shape[0]])\n",
    "    preds_baggs = np.zeros([n_bagged , test_features.shape[0]])\n",
    "    \n",
    "    for _it in range(1, n_bagged):\n",
    "        \n",
    "        #tf.set_random_seed(_it)\n",
    "        \n",
    "        print(x_train.shape, x_valid.shape, x_test.shape)\n",
    "\n",
    "        file_path = \"best_model.hdf5\"\n",
    "        check_point = ModelCheckpoint(file_path, monitor=\"val_loss\", verbose=1,\n",
    "                                      save_best_only =True, save_weights_only=False, mode='max', period = 1)\n",
    "        ra_val = RocAucEvaluation(validation_data=(x_valid, y_valid), interval=1)\n",
    "        early_stop = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=2)\n",
    "        gc.collect()\n",
    "\n",
    "        config = tf.ConfigProto(\n",
    "        intra_op_parallelism_threads=4, use_per_session_threads=4, inter_op_parallelism_threads=6)\n",
    "        with tf.Session(graph=tf.Graph(), config=config) as sess, timer('fit_predict'):\n",
    "                    ks.backend.set_session(sess)\n",
    "                    model_in = ks.Input(shape=(x_train.shape[1],), dtype='float32', sparse=False)\n",
    "                    out = ks.layers.Dense(2 ** 7,  activation='sigmoid', kernel_initializer=\n",
    "                      ks.initializers.RandomNormal(mean=0.00, stddev=0.08, seed=_it))(model_in)\n",
    "                    out = ks.layers.Dropout(0.4)(out)\n",
    "#                     out =  ks.layers.Dense(2 ** 7, activation='sigmoid', kernel_initializer=\n",
    "#                       ks.initializers.RandomNormal(mean=0.00, stddev=0.05, seed=666))(out)\n",
    "#                     out = ks.layers.Dropout(0.4)(out)\n",
    "                    out =  ks.layers.Dense(2 ** 6, activation='relu', kernel_initializer=\n",
    "                      ks.initializers.RandomNormal(mean=0.00, stddev=0.05, seed=_it))(out)\n",
    "                    out = ks.layers.Dropout(0.4)(out)\n",
    "                    out = ks.layers.Dense(1, activation='sigmoid', kernel_initializer=\n",
    "                      ks.initializers.RandomNormal(mean=0.00, stddev=0.05, seed=_it))(out)\n",
    "                    model = ks.models.Model(model_in, out)\n",
    "                    model.compile(loss='binary_crossentropy',\n",
    "                                  optimizer=ks.optimizers.Adam(lr=8e-3), metrics=['binary_crossentropy'])\n",
    "                    batch_size = 2 ** 9\n",
    "                    epochs = 10\n",
    "                    nrounds = 5\n",
    "                    for i in range(nrounds):\n",
    "                        with timer('pass ' +  str(i + 1)):\n",
    "                            model.fit(x=x_train, y=train['TARGET'].iloc[train_index].values, batch_size=batch_size+(batch_size*(2*i)), epochs=epochs, \n",
    "                                validation_data=(x_valid, train['TARGET'].iloc[valid_index].values), callbacks=[ra_val,  early_stop, check_point],\n",
    "                                      shuffle=True, class_weight={0:1, 1:1})\n",
    " \n",
    "                    #model = load_model(file_path)\n",
    "                    y_pred = model.predict(x_valid).reshape(-1, 1)\n",
    "                    print(roc_auc_score(y_true=train['TARGET'].iloc[valid_index].values, y_score=y_pred))\n",
    "                    \n",
    "                    oof_baggs[_it, :] = model.predict(x_valid)[:, 0]\n",
    "                    preds_baggs[_it, :] = model.predict(x_test)[:, 0]\n",
    "                    \n",
    "                    del model\n",
    "                    # os.remove(file_path)\n",
    "                    gc.collect()\n",
    "\n",
    "    val_preds = pd.DataFrame(oof_baggs).T\n",
    "    test_preds = pd.DataFrame(preds_baggs).T\n",
    "    \n",
    "    print('Fold AUC :', roc_auc_score(y_valid, val_preds.rank(axis=0, method='min').mul(val_preds.shape[1] * [1 / val_preds.shape[1]]).sum(1) / val_preds.shape[0]))\n",
    "    aucs.append(roc_auc_score(y_valid, val_preds.rank(axis=0, method='min').mul(val_preds.shape[1] * [1 / val_preds.shape[1]]).sum(1) / val_preds.shape[0]))\n",
    "    test_set.append(test_preds.rank(axis=0, method='min').mul(test_preds.shape[1] * [1 / test_preds.shape[1]]).sum(1) / test_preds.shape[0])\n",
    "    gc.collect()\n",
    "\n",
    "print('AVERAGED AUC :', np.mean(aucs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_preds = pd.DataFrame(test_set).T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 290,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(48744, 5)"
      ]
     },
     "execution_count": 290,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_preds.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 291,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>SK_ID_CURR</th>\n",
       "      <th>TARGET</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>100001</td>\n",
       "      <td>0.331692</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>100005</td>\n",
       "      <td>0.821771</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>100013</td>\n",
       "      <td>0.375628</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>100028</td>\n",
       "      <td>0.366006</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>100038</td>\n",
       "      <td>0.785926</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   SK_ID_CURR    TARGET\n",
       "0      100001  0.331692\n",
       "1      100005  0.821771\n",
       "2      100013  0.375628\n",
       "3      100028  0.366006\n",
       "4      100038  0.785926"
      ]
     },
     "execution_count": 291,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_hat = test_preds.rank(axis=0, method='min').mul(test_preds.shape[1] * [1 / test_preds.shape[1]]).sum(1) / test_preds.shape[0] \n",
    "    \n",
    "\n",
    "\n",
    "\n",
    "sampl_sub = pd.read_csv('../data/sample_submission.csv')\n",
    "\n",
    "\n",
    "sampl_sub['TARGET'] = y_hat.values\n",
    "\n",
    "sampl_sub.to_csv(\"ann-stack-submission.csv\", index=False)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "sampl_sub.head()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
