{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "# First ensemble with NSL-KDD\n",
    "# Parameters\n",
    "# Few parameters are not fully implemented yet\n",
    "\n",
    "#----------------------------------------------\n",
    "# 0 for not using it as base learner\n",
    "# 1 for using it as base learner\n",
    "# not implemented but in the code in someparts\n",
    "use_model_ada = 1 \n",
    "use_model_dnn = 1 \n",
    "use_model_mlp = 1 \n",
    "use_model_lgbm = 1 \n",
    "use_model_rf = 1 \n",
    "use_model_svm = 1\n",
    "use_model_knn = 1 \n",
    "#----------------------------------------------\n",
    "# 0 for training the model\n",
    "# 1 for using the saved version of the model\n",
    "\n",
    "# load_model_ada = 0 \n",
    "# load_model_dnn = 0 \n",
    "# load_model_mlp = 0 \n",
    "# load_model_lgbm = 0 \n",
    "# load_model_rf = 0 \n",
    "# load_model_svm = 0\n",
    "# load_model_knn = 0 \n",
    "#----------------------------------------------\n",
    "# not implemented but in the code in someparts\n",
    "load_model_ada = 1\n",
    "load_model_dnn = 1 \n",
    "load_model_mlp = 1 \n",
    "load_model_lgbm = 1 \n",
    "load_model_rf = 1                               \n",
    "load_model_svm = 1\n",
    "load_model_knn = 1 \n",
    "#----------------------------------------------\n",
    "\n",
    "# Implemented\n",
    "#----------------------------------------------\n",
    "feature_selection_bit = 0 # OFF\n",
    "# feature_selection_bit = 1# On\n",
    "pick_prob = 1 # set equal one to choose the dataset with probabilities, set to 0 to choose one with the classes.\n",
    "# pick_prob = 0\n",
    "generate_feature_importance = 0 # Generate Shap graphs\n",
    "\n",
    "\n",
    "# column_features = [\n",
    "#                     # 'dnn',\n",
    "#                 #    'rf',\n",
    "#                    'lgbm',\n",
    "#                 #    'ada',\n",
    "#                    'knn',\n",
    "#                    'mlp',\n",
    "#                    'svm',\n",
    "#                 #    'cat',\n",
    "#                 #    'xgb',\n",
    "#                    'lr',\n",
    "#                    'dt',\n",
    "#                    'label']\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Specify the name of the output text file\n",
    "if feature_selection_bit == 0:\n",
    "\n",
    "    if pick_prob == 0:\n",
    "        output_file_name = \"ensemble_level_01_all_features_classes.txt\"\n",
    "        with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "        with open(output_file_name, \"a\") as f: print('----ensemble_level_01_all_features_classes--', file = f)\n",
    "\n",
    "    elif pick_prob == 1:\n",
    "        output_file_name = \"ensemble_level_01_all_features_probabilites.txt\"\n",
    "        with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "        with open(output_file_name, \"a\") as f: print('----ensemble_level_01_all_features_probabilites--', file = f)\n",
    "\n",
    "elif feature_selection_bit == 1:\n",
    "    if pick_prob == 0:\n",
    "        output_file_name = \"ensemble_level_01_feature_selection_classes.txt\"\n",
    "        with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "        with open(output_file_name, \"a\") as f: print('----ensemble_level_01_feature_selection_classes--', file = f)\n",
    "    elif pick_prob == 1:\n",
    "        output_file_name = \"ensemble_level_01_feature_selection_probabilites.txt\"\n",
    "        with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "        with open(output_file_name, \"a\") as f: print('----ensemble_level_01_feature_selection_probabilites--', file = f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python\n",
    "# coding: utf-8\n",
    "\n",
    "# In[1]:\n",
    "# importing required libraries\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pickle # saving and loading trained model\n",
    "from os import path\n",
    "\n",
    "\n",
    "# importing required libraries for normalizing data\n",
    "from sklearn import preprocessing\n",
    "from sklearn.preprocessing import (StandardScaler, OrdinalEncoder,LabelEncoder, MinMaxScaler, OneHotEncoder)\n",
    "from sklearn.preprocessing import Normalizer, MaxAbsScaler , RobustScaler, PowerTransformer\n",
    "\n",
    "# importing library for plotting\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import accuracy_score # for calculating accuracy of model\n",
    "from sklearn.model_selection import train_test_split # for splitting the dataset for training and testing\n",
    "from sklearn.metrics import classification_report # for generating a classification report of model\n",
    "\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "\n",
    "from keras.layers import Dense # importing dense layer\n",
    "\n",
    "from keras.layers import Input\n",
    "from keras.models import Model\n",
    "# representation of model layers\n",
    "#from keras.utils import plot_model\n",
    "from sklearn.metrics import confusion_matrix\n",
    "import shap\n",
    "import time\n",
    "\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.metrics import balanced_accuracy_score\n",
    "from sklearn.metrics import matthews_corrcoef\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "import joblib\n",
    "from sklearn.model_selection import train_test_split\n",
    "import sklearn\n",
    "from tabulate import tabulate\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python\n",
    "# coding: utf-8\n",
    "\n",
    "# In[1]:\n",
    "# importing required libraries\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pickle # saving and loading trained model\n",
    "from os import path\n",
    "\n",
    "\n",
    "# importing required libraries for normalizing data\n",
    "from sklearn import preprocessing\n",
    "from sklearn.preprocessing import (StandardScaler, OrdinalEncoder,LabelEncoder, MinMaxScaler, OneHotEncoder)\n",
    "from sklearn.preprocessing import Normalizer, MaxAbsScaler , RobustScaler, PowerTransformer\n",
    "\n",
    "# importing library for plotting\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import accuracy_score # for calculating accuracy of model\n",
    "from sklearn.model_selection import train_test_split # for splitting the dataset for training and testing\n",
    "from sklearn.metrics import classification_report # for generating a classification report of model\n",
    "\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "\n",
    "from keras.layers import Dense # importing dense layer\n",
    "\n",
    "from keras.layers import Input\n",
    "from keras.models import Model\n",
    "# representation of model layers\n",
    "#from keras.utils import plot_model\n",
    "from sklearn.metrics import confusion_matrix\n",
    "import shap\n",
    "\n",
    "import time\n",
    "start_program = time.time()\n",
    "\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.metrics import balanced_accuracy_score\n",
    "from sklearn.metrics import matthews_corrcoef\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "import joblib\n",
    "from sklearn.model_selection import train_test_split\n",
    "import sklearn\n",
    "from tabulate import tabulate\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def confusion_metrics (name_model,predictions,true_labels):\n",
    "\n",
    "    name = name_model\n",
    "    pred_label = predictions\n",
    "    y_test_01 = true_labels \n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print(name, file = f)\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('CONFUSION MATRIX')\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "    # pred_label = label[ypred]\n",
    "\n",
    "    confusion_matrix = pd.crosstab(y_test_01, pred_label,rownames=['Actual ALERT'],colnames = ['Predicted ALERT'], dropna=False).sort_index(axis=0).sort_index(axis=1)\n",
    "    all_unique_values = sorted(set(pred_label) | set(y_test_01))\n",
    "    z = np.zeros((len(all_unique_values), len(all_unique_values)))\n",
    "    rows, cols = confusion_matrix.shape\n",
    "    z[:rows, :cols] = confusion_matrix\n",
    "    confusion_matrix  = pd.DataFrame(z, columns=all_unique_values, index=all_unique_values)\n",
    "    # confusion_matrix.to_csv('Ensemble_conf_matrix.csv')\n",
    "    # with open(output_file_name, \"a\") as f:print(confusion_matrix,file=f)\n",
    "    print(confusion_matrix)\n",
    "    with open(output_file_name, \"a\") as f: print('Confusion Matrix', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print(confusion_matrix, file = f)\n",
    "\n",
    "\n",
    "    FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)\n",
    "    FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)\n",
    "    TP = np.diag(confusion_matrix)\n",
    "    TN = confusion_matrix.values.sum() - (FP + FN + TP)\n",
    "    TP_total = sum(TP)\n",
    "    TN_total = sum(TN)\n",
    "    FP_total = sum(FP)\n",
    "    FN_total = sum(FN)\n",
    "\n",
    "    TP_total = np.array(TP_total,dtype=np.float64)\n",
    "    TN_total = np.array(TN_total,dtype=np.float64)\n",
    "    FP_total = np.array(FP_total,dtype=np.float64)\n",
    "    FN_total = np.array(FN_total,dtype=np.float64)\n",
    "\n",
    "\n",
    "\n",
    "    #----------------------------------------------------------------#----------------------------------------------------------------\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('METRICS')\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "    Acc = accuracy_score(y_test_01, pred_label)\n",
    "    Precision = precision_score(y_test_01, pred_label, average='macro')\n",
    "    Recall = recall_score(y_test_01, pred_label, average='macro')\n",
    "    F1 =  f1_score(y_test_01, pred_label, average='macro')\n",
    "    BACC = balanced_accuracy_score(y_test_01, pred_label)\n",
    "    MCC = matthews_corrcoef(y_test_01, pred_label)\n",
    "\n",
    "\n",
    "    # voting_acc_01 = Acc\n",
    "    # voting_pre_01 = Precision\n",
    "    # weighed_avg_rec_01 = Recall\n",
    "    # weighed_avg_f1_01 = F1\n",
    "    # weighed_avg_bacc_01 = BACC\n",
    "    # weighed_avg_mcc_01 = MCC\n",
    "    # with open(output_file_name, \"a\") as f:print('Accuracy total: ', Acc,file=f)\n",
    "    print('Accuracy total: ', Acc)\n",
    "    print('Precision total: ', Precision )\n",
    "    print('Recall total: ', Recall )\n",
    "    print('F1 total: ', F1 )\n",
    "    print('BACC total: ', BACC)\n",
    "    print('MCC total: ', MCC)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Accuracy total: ', Acc, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Precision total: ', Precision, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Recall total: ', Recall , file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('F1 total: ', F1, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('BACC total: ', BACC , file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('MCC total: ', MCC, file = f)\n",
    "\n",
    "    return Acc, Precision, Recall, F1, BACC, MCC\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "df_level_00_1=pd.read_csv('base_models_prob_feature_selection.csv')\n",
    "df_level_00_0=pd.read_csv('base_models_class_feature_selection.csv')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.993925</td>\n",
       "      <td>0.953462</td>\n",
       "      <td>0.999990</td>\n",
       "      <td>0.241649</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.872094</td>\n",
       "      <td>0.996421</td>\n",
       "      <td>0.993838</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.968751</td>\n",
       "      <td>0.755381</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.229428</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000205</td>\n",
       "      <td>0.972762</td>\n",
       "      <td>0.987883</td>\n",
       "      <td>0.974823</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.980010</td>\n",
       "      <td>0.931826</td>\n",
       "      <td>0.999936</td>\n",
       "      <td>0.247061</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999999</td>\n",
       "      <td>0.125468</td>\n",
       "      <td>0.990564</td>\n",
       "      <td>0.979741</td>\n",
       "      <td>0.996046</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.698289</td>\n",
       "      <td>0.847230</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.242380</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.028396</td>\n",
       "      <td>0.988092</td>\n",
       "      <td>0.993120</td>\n",
       "      <td>0.966385</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.991085</td>\n",
       "      <td>0.966415</td>\n",
       "      <td>0.999992</td>\n",
       "      <td>0.255779</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.916044</td>\n",
       "      <td>0.996865</td>\n",
       "      <td>0.994128</td>\n",
       "      <td>0.999776</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44551</th>\n",
       "      <td>0.999963</td>\n",
       "      <td>0.883479</td>\n",
       "      <td>0.999994</td>\n",
       "      <td>0.259007</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.968749</td>\n",
       "      <td>0.994279</td>\n",
       "      <td>0.996916</td>\n",
       "      <td>0.999781</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44552</th>\n",
       "      <td>0.983328</td>\n",
       "      <td>0.983763</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.316873</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.009048</td>\n",
       "      <td>0.994402</td>\n",
       "      <td>0.997916</td>\n",
       "      <td>0.999954</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44553</th>\n",
       "      <td>0.984077</td>\n",
       "      <td>0.767309</td>\n",
       "      <td>0.999807</td>\n",
       "      <td>0.254683</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.944438</td>\n",
       "      <td>0.983802</td>\n",
       "      <td>0.976063</td>\n",
       "      <td>0.956723</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44554</th>\n",
       "      <td>0.343180</td>\n",
       "      <td>0.559281</td>\n",
       "      <td>0.999908</td>\n",
       "      <td>0.292847</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000832</td>\n",
       "      <td>0.962144</td>\n",
       "      <td>0.976610</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44555</th>\n",
       "      <td>0.999987</td>\n",
       "      <td>0.657648</td>\n",
       "      <td>0.999899</td>\n",
       "      <td>0.237328</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.997933</td>\n",
       "      <td>0.977548</td>\n",
       "      <td>0.988660</td>\n",
       "      <td>0.999815</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>44556 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            dnn        rf      lgbm       ada  knn       mlp       svm  \\\n",
       "0      0.993925  0.953462  0.999990  0.241649  1.0  1.000000  0.872094   \n",
       "1      0.968751  0.755381  1.000000  0.229428  1.0  1.000000  0.000205   \n",
       "2      0.980010  0.931826  0.999936  0.247061  1.0  0.999999  0.125468   \n",
       "3      0.698289  0.847230  1.000000  0.242380  1.0  1.000000  0.028396   \n",
       "4      0.991085  0.966415  0.999992  0.255779  1.0  1.000000  0.916044   \n",
       "...         ...       ...       ...       ...  ...       ...       ...   \n",
       "44551  0.999963  0.883479  0.999994  0.259007  1.0  1.000000  0.968749   \n",
       "44552  0.983328  0.983763  1.000000  0.316873  1.0  1.000000  0.009048   \n",
       "44553  0.984077  0.767309  0.999807  0.254683  1.0  1.000000  0.944438   \n",
       "44554  0.343180  0.559281  0.999908  0.292847  1.0  1.000000  0.000832   \n",
       "44555  0.999987  0.657648  0.999899  0.237328  1.0  1.000000  0.997933   \n",
       "\n",
       "            cat       xgb        lr   dt  label  \n",
       "0      0.996421  0.993838  0.999991  1.0    0.0  \n",
       "1      0.972762  0.987883  0.974823  1.0    1.0  \n",
       "2      0.990564  0.979741  0.996046  1.0    0.0  \n",
       "3      0.988092  0.993120  0.966385  1.0    1.0  \n",
       "4      0.996865  0.994128  0.999776  1.0    0.0  \n",
       "...         ...       ...       ...  ...    ...  \n",
       "44551  0.994279  0.996916  0.999781  1.0    0.0  \n",
       "44552  0.994402  0.997916  0.999954  1.0    1.0  \n",
       "44553  0.983802  0.976063  0.956723  1.0    0.0  \n",
       "44554  0.962144  0.976610  0.999991  1.0    2.0  \n",
       "44555  0.977548  0.988660  0.999815  1.0    0.0  \n",
       "\n",
       "[44556 rows x 12 columns]"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_level_00_1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "y1 = df_level_00_1.pop('label')\n",
    "X1 = df_level_00_1\n",
    "df_level_00_1 = X1.assign(label = y1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [],
   "source": [
    "y0 = df_level_00_0.pop('label')\n",
    "X0 = df_level_00_0\n",
    "df_level_00_0 = X0.assign(label = y0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "if feature_selection_bit == 1:\n",
    "\n",
    "    from sklearn.feature_selection import mutual_info_classif\n",
    "    %matplotlib inline\n",
    "\n",
    "    # Compute information gain using mutual information\n",
    "    importances0 = mutual_info_classif(X0, y0)\n",
    "    importances1 = mutual_info_classif(X1, y1)\n",
    "\n",
    "\n",
    "    feat_importances0 = pd.Series(importances0, df_level_00_0.columns[0:len(df_level_00_0.columns)-1])\n",
    "    feat_importances1= pd.Series(importances1, df_level_00_1.columns[0:len(df_level_00_1.columns)-1])\n",
    "\n",
    "    # feat_importances.plot(kind='barh', color = 'teal')\n",
    "        \n",
    "    feat_importances_sorted0 = feat_importances0.sort_values( ascending=False)\n",
    "    feat_importances_sorted1 = feat_importances1.sort_values( ascending=False)\n",
    "\n",
    "\n",
    "    # Print or use the sorted DataFrame\n",
    "    print(feat_importances_sorted0)\n",
    "    print(feat_importances_sorted1)\n",
    "\n",
    "    # feat_importances_sorted.plot(kind='barh', color = 'teal')\n",
    "    # feat_importances_sorted\n",
    "    top_features0 = feat_importances_sorted0.nlargest(5)\n",
    "    top_features1 = feat_importances_sorted1.nlargest(5)\n",
    "\n",
    "    top_feature_names0 = top_features0.index.tolist()\n",
    "    top_feature_names1 = top_features1.index.tolist()\n",
    "\n",
    "\n",
    "    print(\"Top 5 feature names:\")\n",
    "    print(top_feature_names0)\n",
    "    print(top_feature_names1)\n",
    "\n",
    "    column_features0 = top_feature_names0\n",
    "    column_features1 = top_feature_names1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Assuming df is your DataFrame\n",
    "# if feature_selection_bit == 1:\n",
    "#     df_level_00_1=pd.read_csv('base_models_prob_feature_selection.csv',names=column_features)\n",
    "#     df_level_00_0=pd.read_csv('base_models_class_feature_selection.csv',names=column_features)\n",
    "\n",
    "# if feature_selection_bit == 0:\n",
    "\n",
    "#     df_level_00_1=pd.read_csv('base_models_prob_feature_selection.csv')\n",
    "#     df_level_00_0=pd.read_csv('base_models_class_feature_selection.csv')\n",
    "\n",
    "# df_level_00_1=pd.read_csv('base_models_prob_feature_selection.csv')\n",
    "# df_level_00_0=pd.read_csv('base_models_class_feature_selection.csv')\n",
    "\n",
    "if feature_selection_bit == 1:\n",
    "    df_level_00_0 = df_level_00_0[column_features0]\n",
    "    df_level_00_1 = df_level_00_1[column_features1]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.993925</td>\n",
       "      <td>0.953462</td>\n",
       "      <td>0.999990</td>\n",
       "      <td>0.241649</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.872094</td>\n",
       "      <td>0.996421</td>\n",
       "      <td>0.993838</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.968751</td>\n",
       "      <td>0.755381</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.229428</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000205</td>\n",
       "      <td>0.972762</td>\n",
       "      <td>0.987883</td>\n",
       "      <td>0.974823</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.980010</td>\n",
       "      <td>0.931826</td>\n",
       "      <td>0.999936</td>\n",
       "      <td>0.247061</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999999</td>\n",
       "      <td>0.125468</td>\n",
       "      <td>0.990564</td>\n",
       "      <td>0.979741</td>\n",
       "      <td>0.996046</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.698289</td>\n",
       "      <td>0.847230</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.242380</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.028396</td>\n",
       "      <td>0.988092</td>\n",
       "      <td>0.993120</td>\n",
       "      <td>0.966385</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.991085</td>\n",
       "      <td>0.966415</td>\n",
       "      <td>0.999992</td>\n",
       "      <td>0.255779</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.916044</td>\n",
       "      <td>0.996865</td>\n",
       "      <td>0.994128</td>\n",
       "      <td>0.999776</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44551</th>\n",
       "      <td>0.999963</td>\n",
       "      <td>0.883479</td>\n",
       "      <td>0.999994</td>\n",
       "      <td>0.259007</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.968749</td>\n",
       "      <td>0.994279</td>\n",
       "      <td>0.996916</td>\n",
       "      <td>0.999781</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44552</th>\n",
       "      <td>0.983328</td>\n",
       "      <td>0.983763</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.316873</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.009048</td>\n",
       "      <td>0.994402</td>\n",
       "      <td>0.997916</td>\n",
       "      <td>0.999954</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44553</th>\n",
       "      <td>0.984077</td>\n",
       "      <td>0.767309</td>\n",
       "      <td>0.999807</td>\n",
       "      <td>0.254683</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.944438</td>\n",
       "      <td>0.983802</td>\n",
       "      <td>0.976063</td>\n",
       "      <td>0.956723</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44554</th>\n",
       "      <td>0.343180</td>\n",
       "      <td>0.559281</td>\n",
       "      <td>0.999908</td>\n",
       "      <td>0.292847</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000832</td>\n",
       "      <td>0.962144</td>\n",
       "      <td>0.976610</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44555</th>\n",
       "      <td>0.999987</td>\n",
       "      <td>0.657648</td>\n",
       "      <td>0.999899</td>\n",
       "      <td>0.237328</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.997933</td>\n",
       "      <td>0.977548</td>\n",
       "      <td>0.988660</td>\n",
       "      <td>0.999815</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>44556 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            dnn        rf      lgbm       ada  knn       mlp       svm  \\\n",
       "0      0.993925  0.953462  0.999990  0.241649  1.0  1.000000  0.872094   \n",
       "1      0.968751  0.755381  1.000000  0.229428  1.0  1.000000  0.000205   \n",
       "2      0.980010  0.931826  0.999936  0.247061  1.0  0.999999  0.125468   \n",
       "3      0.698289  0.847230  1.000000  0.242380  1.0  1.000000  0.028396   \n",
       "4      0.991085  0.966415  0.999992  0.255779  1.0  1.000000  0.916044   \n",
       "...         ...       ...       ...       ...  ...       ...       ...   \n",
       "44551  0.999963  0.883479  0.999994  0.259007  1.0  1.000000  0.968749   \n",
       "44552  0.983328  0.983763  1.000000  0.316873  1.0  1.000000  0.009048   \n",
       "44553  0.984077  0.767309  0.999807  0.254683  1.0  1.000000  0.944438   \n",
       "44554  0.343180  0.559281  0.999908  0.292847  1.0  1.000000  0.000832   \n",
       "44555  0.999987  0.657648  0.999899  0.237328  1.0  1.000000  0.997933   \n",
       "\n",
       "            cat       xgb        lr   dt  label  \n",
       "0      0.996421  0.993838  0.999991  1.0    0.0  \n",
       "1      0.972762  0.987883  0.974823  1.0    1.0  \n",
       "2      0.990564  0.979741  0.996046  1.0    0.0  \n",
       "3      0.988092  0.993120  0.966385  1.0    1.0  \n",
       "4      0.996865  0.994128  0.999776  1.0    0.0  \n",
       "...         ...       ...       ...  ...    ...  \n",
       "44551  0.994279  0.996916  0.999781  1.0    0.0  \n",
       "44552  0.994402  0.997916  0.999954  1.0    1.0  \n",
       "44553  0.983802  0.976063  0.956723  1.0    0.0  \n",
       "44554  0.962144  0.976610  0.999991  1.0    2.0  \n",
       "44555  0.977548  0.988660  0.999815  1.0    0.0  \n",
       "\n",
       "[44556 rows x 12 columns]"
      ]
     },
     "execution_count": 86,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_level_00_1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44551</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44552</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44553</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44554</th>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44555</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>44556 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       dnn   rf  lgbm  ada  knn  mlp  svm  cat  xgb   lr   dt  label\n",
       "0      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "1      1.0  1.0   1.0  2.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "2      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "3      1.0  1.0   1.0  2.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "4      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "...    ...  ...   ...  ...  ...  ...  ...  ...  ...  ...  ...    ...\n",
       "44551  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "44552  1.0  1.0   1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "44553  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "44554  1.0  2.0   2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0    2.0\n",
       "44555  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "\n",
       "[44556 rows x 12 columns]"
      ]
     },
     "execution_count": 87,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_level_00_0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [],
   "source": [
    "# df_level_01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [],
   "source": [
    "if pick_prob == 1:\n",
    "    df_level_01 = df_level_00_1\n",
    "else: \n",
    "    df_level_01 = df_level_00_0\n",
    "\n",
    "df_level_01 = df_level_01.assign(label = y1)\n",
    "\n",
    "y_01 = df_level_01.pop('label') \n",
    "    \n",
    "X_01 = df_level_01\n",
    "df_level_01 = df_level_01.assign(label = y_01)\n",
    "\n",
    "\n",
    "split = 0.7\n",
    "X_train_01,X_test_01, y_train_01, y_test_01 = sklearn.model_selection.train_test_split(X_01, y_01, train_size=split)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "# df_level_02 = pd.read_csv('base_models_class_feature_selection.csv')\n",
    "\n",
    "# df_level_02\n",
    "\n",
    "# y_02 = df_level_02.pop('label')\n",
    "# X_02 = df_level_02\n",
    "# df_level_02 = df_level_02.assign(label = y_01)\n",
    "\n",
    "\n",
    "# split = 0.7\n",
    "# X_train_02,X_test_02, y_train_02, y_test_02 = sklearn.model_selection.train_test_split(X_02, y_02, train_size=split)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Training the stronger model - STACK level 01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [],
   "source": [
    "#----------------------------------------------------------------\n",
    "with open(output_file_name, \"a\") as f: print('Stack model - Strong learner - level 01', file = f)\n",
    "with open(output_file_name, \"a\") as f: print('-------------------------------------------------------', file = f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>8862</th>\n",
       "      <td>0.976060</td>\n",
       "      <td>0.946073</td>\n",
       "      <td>0.999992</td>\n",
       "      <td>0.257206</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999801</td>\n",
       "      <td>0.911599</td>\n",
       "      <td>0.996007</td>\n",
       "      <td>0.992883</td>\n",
       "      <td>0.993596</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14341</th>\n",
       "      <td>0.999962</td>\n",
       "      <td>0.919764</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.259007</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.876629</td>\n",
       "      <td>0.993732</td>\n",
       "      <td>0.995437</td>\n",
       "      <td>0.999919</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25787</th>\n",
       "      <td>0.996377</td>\n",
       "      <td>0.994399</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.304599</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.815440</td>\n",
       "      <td>0.997242</td>\n",
       "      <td>0.998120</td>\n",
       "      <td>0.999406</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9267</th>\n",
       "      <td>0.968506</td>\n",
       "      <td>0.983763</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.293912</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.007044</td>\n",
       "      <td>0.995750</td>\n",
       "      <td>0.997724</td>\n",
       "      <td>0.999973</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>32801</th>\n",
       "      <td>0.907357</td>\n",
       "      <td>0.460995</td>\n",
       "      <td>0.997817</td>\n",
       "      <td>0.268321</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999913</td>\n",
       "      <td>0.703217</td>\n",
       "      <td>0.816847</td>\n",
       "      <td>0.649103</td>\n",
       "      <td>0.914879</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26242</th>\n",
       "      <td>0.969311</td>\n",
       "      <td>0.814978</td>\n",
       "      <td>0.999980</td>\n",
       "      <td>0.238667</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.469229</td>\n",
       "      <td>0.989234</td>\n",
       "      <td>0.987848</td>\n",
       "      <td>0.999842</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23205</th>\n",
       "      <td>0.980311</td>\n",
       "      <td>0.994399</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.328195</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000070</td>\n",
       "      <td>0.997407</td>\n",
       "      <td>0.998120</td>\n",
       "      <td>0.999989</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24266</th>\n",
       "      <td>0.978855</td>\n",
       "      <td>0.716758</td>\n",
       "      <td>0.999814</td>\n",
       "      <td>0.229870</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.986135</td>\n",
       "      <td>0.937242</td>\n",
       "      <td>0.965306</td>\n",
       "      <td>0.999780</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12025</th>\n",
       "      <td>0.282879</td>\n",
       "      <td>0.704289</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.273797</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.131946</td>\n",
       "      <td>0.982226</td>\n",
       "      <td>0.990095</td>\n",
       "      <td>0.968952</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6009</th>\n",
       "      <td>0.976823</td>\n",
       "      <td>0.847230</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.242380</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.819126</td>\n",
       "      <td>0.988092</td>\n",
       "      <td>0.993120</td>\n",
       "      <td>0.989770</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>13367 rows × 11 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            dnn        rf      lgbm       ada  knn       mlp       svm  \\\n",
       "8862   0.976060  0.946073  0.999992  0.257206  1.0  0.999801  0.911599   \n",
       "14341  0.999962  0.919764  1.000000  0.259007  1.0  1.000000  0.876629   \n",
       "25787  0.996377  0.994399  1.000000  0.304599  1.0  1.000000  0.815440   \n",
       "9267   0.968506  0.983763  1.000000  0.293912  1.0  1.000000  0.007044   \n",
       "32801  0.907357  0.460995  0.997817  0.268321  1.0  0.999913  0.703217   \n",
       "...         ...       ...       ...       ...  ...       ...       ...   \n",
       "26242  0.969311  0.814978  0.999980  0.238667  1.0  1.000000  0.469229   \n",
       "23205  0.980311  0.994399  1.000000  0.328195  1.0  1.000000  0.000070   \n",
       "24266  0.978855  0.716758  0.999814  0.229870  1.0  1.000000  0.986135   \n",
       "12025  0.282879  0.704289  1.000000  0.273797  1.0  1.000000  0.131946   \n",
       "6009   0.976823  0.847230  1.000000  0.242380  1.0  1.000000  0.819126   \n",
       "\n",
       "            cat       xgb        lr   dt  \n",
       "8862   0.996007  0.992883  0.993596  1.0  \n",
       "14341  0.993732  0.995437  0.999919  1.0  \n",
       "25787  0.997242  0.998120  0.999406  1.0  \n",
       "9267   0.995750  0.997724  0.999973  1.0  \n",
       "32801  0.816847  0.649103  0.914879  1.0  \n",
       "...         ...       ...       ...  ...  \n",
       "26242  0.989234  0.987848  0.999842  1.0  \n",
       "23205  0.997407  0.998120  0.999989  1.0  \n",
       "24266  0.937242  0.965306  0.999780  1.0  \n",
       "12025  0.982226  0.990095  0.968952  1.0  \n",
       "6009   0.988092  0.993120  0.989770  1.0  \n",
       "\n",
       "[13367 rows x 11 columns]"
      ]
     },
     "execution_count": 92,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_test_01"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decision tree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6898.0    46.0    18.0    3.0  0.0\n",
      "1.0    43.0  4762.0     9.0    0.0  0.0\n",
      "2.0     9.0    18.0  1231.0    4.0  1.0\n",
      "3.0     2.0     2.0     4.0  307.0  2.0\n",
      "4.0     1.0     0.0     0.0    2.0  5.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9877309792773248\n",
      "Precision total:  0.9100748584210173\n",
      "Recall total:  0.9095392808150187\n",
      "F1 total:  0.9098059910093065\n",
      "BACC total:  0.9095392808150187\n",
      "MCC total:  0.9791856561523012\n",
      "0.25205564498901367\n"
     ]
    }
   ],
   "source": [
    "\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "start = time.time()\n",
    "\n",
    "# Create a Decision Tree Classifier\n",
    "dt_classifier = DecisionTreeClassifier(random_state=42)\n",
    "# Train the classifier on the training data\n",
    "dt_classifier.fit(X_train_01, y_train_01)\n",
    "# Make predictions on the test data\n",
    "preds_dt = dt_classifier.predict(X_test_01)\n",
    "# Evaluate the accuracy of the model\n",
    "preds_dt_prob = dt_classifier.predict_proba(X_test_01)\n",
    "\n",
    "\n",
    "pred_label = preds_dt\n",
    "name = 'dt'\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n",
    "print(time_taken)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Voting"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [],
   "source": [
    "start = time.time()\n",
    "    \n",
    "if pick_prob == 0:\n",
    "    # Voting start\n",
    "\n",
    "    import pandas as pd\n",
    "    from scipy.stats import mode\n",
    "\n",
    "    # Assuming 'df' is your original DataFrame with columns 'dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm', 'label'\n",
    "    df = X_test_01\n",
    "    # Extract predictions columns\n",
    "    \n",
    "    # predictions = df[['dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm','cat','xgb']]\n",
    "        # selected_columns = df.loc[:, ~df.columns.isin(['rf'])]\n",
    "    predictions = df.loc[:, ~df.columns.isin(['label'])] #df[column_features]\n",
    "\n",
    "    # Use the mode function along axis 1 to get the most common prediction for each row\n",
    "    ensemble_predictions, _ = mode(predictions.values, axis=1)\n",
    "\n",
    "    # Add the ensemble predictions to the DataFrame\n",
    "    df['ensemble'] = ensemble_predictions.astype(int)\n",
    "\n",
    "    # Display the DataFrame with ensemble predictions\n",
    "    print(df)\n",
    "\n",
    "    pred_label = df ['ensemble'].values\n",
    "    df.pop('ensemble')\n",
    "\n",
    "    #testing metrics def\n",
    "    name = 'voting'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "   \n",
    "else:\n",
    "    name = 'voting'\n",
    "    globals()[f\"{name}_acc_01\"] = 0\n",
    "    globals()[f\"{name}_pre_01\"] = 0\n",
    "    globals()[f\"{name}_rec_01\"] = 0\n",
    "    globals()[f\"{name}_f1_01\"] = 0\n",
    "    globals()[f\"{name}_bacc_01\"] = 0\n",
    "    globals()[f\"{name}_mcc_01\"] = 0\n",
    "    globals()[f\"{name}_time_01\"] = 9999\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "voting_acc_01\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Average"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "            dnn        rf      lgbm       ada  knn       mlp       svm  \\\n",
      "8862   0.976060  0.946073  0.999992  0.257206  1.0  0.999801  0.911599   \n",
      "14341  0.999962  0.919764  1.000000  0.259007  1.0  1.000000  0.876629   \n",
      "25787  0.996377  0.994399  1.000000  0.304599  1.0  1.000000  0.815440   \n",
      "9267   0.968506  0.983763  1.000000  0.293912  1.0  1.000000  0.007044   \n",
      "32801  0.907357  0.460995  0.997817  0.268321  1.0  0.999913  0.703217   \n",
      "...         ...       ...       ...       ...  ...       ...       ...   \n",
      "26242  0.969311  0.814978  0.999980  0.238667  1.0  1.000000  0.469229   \n",
      "23205  0.980311  0.994399  1.000000  0.328195  1.0  1.000000  0.000070   \n",
      "24266  0.978855  0.716758  0.999814  0.229870  1.0  1.000000  0.986135   \n",
      "12025  0.282879  0.704289  1.000000  0.273797  1.0  1.000000  0.131946   \n",
      "6009   0.976823  0.847230  1.000000  0.242380  1.0  1.000000  0.819126   \n",
      "\n",
      "            cat       xgb        lr   dt  results  \n",
      "8862   0.996007  0.992883  0.993596  1.0        1  \n",
      "14341  0.993732  0.995437  0.999919  1.0        1  \n",
      "25787  0.997242  0.998120  0.999406  1.0        1  \n",
      "9267   0.995750  0.997724  0.999973  1.0        1  \n",
      "32801  0.816847  0.649103  0.914879  1.0        1  \n",
      "...         ...       ...       ...  ...      ...  \n",
      "26242  0.989234  0.987848  0.999842  1.0        1  \n",
      "23205  0.997407  0.998120  0.999989  1.0        1  \n",
      "24266  0.937242  0.965306  0.999780  1.0        1  \n",
      "12025  0.982226  0.990095  0.968952  1.0        1  \n",
      "6009   0.988092  0.993120  0.989770  1.0        1  \n",
      "\n",
      "[13367 rows x 12 columns]\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "      0.0     1.0  2.0  3.0  4.0\n",
      "0.0  41.0  6924.0  0.0  0.0  0.0\n",
      "1.0  11.0  4803.0  0.0  0.0  0.0\n",
      "2.0  26.0  1237.0  0.0  0.0  0.0\n",
      "3.0  35.0   282.0  0.0  0.0  0.0\n",
      "4.0   6.0     2.0  0.0  0.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.36238497793072494\n",
      "Precision total:  0.14141662099622457\n",
      "Recall total:  0.20072031473170945\n",
      "F1 total:  0.10868203448301947\n",
      "BACC total:  0.20072031473170945\n",
      "MCC total:  0.007960181525447982\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "# if pick_prob == 0:\n",
    "if 0 == 0:\n",
    "    # Average start\n",
    "\n",
    "    import pandas as pd\n",
    "    from scipy.stats import mode\n",
    "\n",
    "    # Assuming 'df' is your original DataFrame with columns 'dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm', 'label'\n",
    "    df = X_test_01\n",
    "    predictions = df.loc[:, ~df.columns.isin(['label'])] #df[column_features]\n",
    "\n",
    "   \n",
    "\n",
    "    column_sums = df.sum(axis=1)\n",
    "    row_average = df.mean(axis=1)\n",
    "\n",
    "    # Approximate the result to the closest integer\n",
    "    rounded_average = row_average.round().astype(int)\n",
    "\n",
    "    # print(rounded_average)\n",
    "\n",
    "    df['results'] = rounded_average\n",
    "    print(df)\n",
    " \n",
    "    pred_label = df ['results'].values\n",
    "\n",
    "    # pred_label = df ['ensemble'].values\n",
    "    # df.pop('ensemble')\n",
    "    df.pop('results')\n",
    "\n",
    "    # df.pop('column_sums')\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    name = 'avg'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    \n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Weighed Average"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_features\n",
    "# move this up with column_features\n",
    "\n",
    "#important update this as you need to select the important features,\n",
    "#  the left is the least important while the right is the most important\n",
    "# needs automation\n",
    "if pick_prob == 1:\n",
    "    column_features = column_features1\n",
    "else: column_features = column_features0\n",
    "feature_selection_columns_in_order_of_importance = column_features[:-1]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.0, 0.3333333333333333, 0.6666666666666666, 1.0]\n",
      "[0.0, 0.3333333333333333, 0.6666666666666666, 1.0]\n",
      "             rf       ada       xgb       cat\n",
      "8862   0.946073  0.257206  0.992883  0.996007\n",
      "14341  0.919764  0.259007  0.995437  0.993732\n",
      "25787  0.994399  0.304599  0.998120  0.997242\n",
      "9267   0.983763  0.293912  0.997724  0.995750\n",
      "32801  0.460995  0.268321  0.649103  0.816847\n",
      "...         ...       ...       ...       ...\n",
      "26242  0.814978  0.238667  0.987848  0.989234\n",
      "23205  0.994399  0.328195  0.998120  0.997407\n",
      "24266  0.716758  0.229870  0.965306  0.937242\n",
      "12025  0.704289  0.273797  0.990095  0.982226\n",
      "6009   0.847230  0.242380  0.993120  0.988092\n",
      "\n",
      "[13367 rows x 4 columns]\n",
      "8862     0.871832\n",
      "14341    0.871846\n",
      "25787    0.882094\n",
      "9267     0.879435\n",
      "32801    0.669511\n",
      "           ...   \n",
      "26242    0.863677\n",
      "23205    0.886109\n",
      "24266    0.828701\n",
      "12025    0.866777\n",
      "6009     0.865482\n",
      "Length: 13367, dtype: float64\n",
      "8862     1\n",
      "14341    1\n",
      "25787    1\n",
      "9267     1\n",
      "32801    1\n",
      "        ..\n",
      "26242    1\n",
      "23205    1\n",
      "24266    1\n",
      "12025    1\n",
      "6009     1\n",
      "Length: 13367, dtype: int64\n",
      "             rf       ada       xgb       cat  results\n",
      "8862   0.946073  0.257206  0.992883  0.996007        1\n",
      "14341  0.919764  0.259007  0.995437  0.993732        1\n",
      "25787  0.994399  0.304599  0.998120  0.997242        1\n",
      "9267   0.983763  0.293912  0.997724  0.995750        1\n",
      "32801  0.460995  0.268321  0.649103  0.816847        1\n",
      "...         ...       ...       ...       ...      ...\n",
      "26242  0.814978  0.238667  0.987848  0.989234        1\n",
      "23205  0.994399  0.328195  0.998120  0.997407        1\n",
      "24266  0.716758  0.229870  0.965306  0.937242        1\n",
      "12025  0.704289  0.273797  0.990095  0.982226        1\n",
      "6009   0.847230  0.242380  0.993120  0.988092        1\n",
      "\n",
      "[13367 rows x 5 columns]\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "      0.0     1.0  2.0  3.0  4.0\n",
      "0.0  28.0  6937.0  0.0  0.0  0.0\n",
      "1.0  14.0  4800.0  0.0  0.0  0.0\n",
      "2.0  23.0  1240.0  0.0  0.0  0.0\n",
      "3.0  16.0   301.0  0.0  0.0  0.0\n",
      "4.0   3.0     5.0  0.0  0.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.3611880002992444\n",
      "Precision total:  0.1389394965996637\n",
      "Recall total:  0.20022238320810531\n",
      "F1 total:  0.10768381071681518\n",
      "BACC total:  0.20022238320810531\n",
      "MCC total:  0.0004210387409554317\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "# if pick_prob == 0:\n",
    "if 0 == 0:\n",
    "    # Average start\n",
    "\n",
    "    import pandas as pd\n",
    "    from scipy.stats import mode\n",
    "\n",
    "    # Assuming 'df' is your original DataFrame with columns 'dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm', 'label'\n",
    "    # df = X_test_01\n",
    "    df = X_test_01[feature_selection_columns_in_order_of_importance]\n",
    "    # Extract predictions columns\n",
    "    \n",
    "    # predictions = df[['dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm','cat','xgb']]\n",
    "        # selected_columns = df.loc[:, ~df.columns.isin(['rf'])]\n",
    "    predictions = df.loc[:, ~df.columns.isin(['label'])] #df[column_features]\n",
    "\n",
    "    # weight\n",
    "    weights_values = []\n",
    "\n",
    "    # linear weight distribution\n",
    "    for i in range(0,len(~df.columns.isin(['label']))):\n",
    "        weights_values.append(i/(len(~df.columns.isin(['label']))-1))\n",
    "    print(weights_values)\n",
    "    # weights_values = [10,3,2,2.3]\n",
    "    print(weights_values)\n",
    "    print(df)\n",
    "    weighted_average = df.multiply(weights_values).sum(axis=1) / sum(weights_values)\n",
    "    print(weighted_average)\n",
    "    # Approximate the result to the closest integer\n",
    "    rounded_weighted_average = weighted_average.round().astype(int)\n",
    "\n",
    "    print(rounded_weighted_average)\n",
    "\n",
    "    # print(rounded_average)\n",
    "\n",
    "    df['results'] = rounded_weighted_average\n",
    "    print(df)\n",
    " \n",
    "    pred_label = df ['results'].values\n",
    "\n",
    "    # pred_label = df ['ensemble'].values\n",
    "    # df.pop('ensemble')\n",
    "    df.pop('results')\n",
    "\n",
    "    # df.pop('column_sums')\n",
    "\n",
    "    #testing metrics def\n",
    "    name = 'weighed_avg'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "    \n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging  with DT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6923.0    29.0     9.0    4.0  0.0\n",
      "1.0    43.0  4760.0    10.0    1.0  0.0\n",
      "2.0     8.0    12.0  1241.0    2.0  0.0\n",
      "3.0     3.0     0.0     4.0  309.0  1.0\n",
      "4.0     0.0     0.0     1.0    1.0  6.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9904241789481559\n",
      "Precision total:  0.9593308656666544\n",
      "Recall total:  0.9380194258478645\n",
      "F1 total:  0.9479602279983268\n",
      "BACC total:  0.9380194258478645\n",
      "MCC total:  0.9837485468301952\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import BaggingClassifier\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "\n",
    "start = time.time()\n",
    "base_classifier = DecisionTreeClassifier(random_state=42)\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_01, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_dt'\n",
    "pred_label = y_pred\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging  with SVM\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6665.0   188.0   110.0    2.0  0.0\n",
      "1.0  1036.0  3471.0   243.0   64.0  0.0\n",
      "2.0    38.0     4.0  1151.0   70.0  0.0\n",
      "3.0    10.0     0.0   119.0  188.0  0.0\n",
      "4.0     1.0     0.0     0.0    7.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8584573950774295\n",
      "Precision total:  0.6169480616565182\n",
      "Recall total:  0.6364663398499582\n",
      "F1 total:  0.6205381710024421\n",
      "BACC total:  0.6364663398499582\n",
      "MCC total:  0.7665685362892327\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "## bagging  with SVM\n",
    "from sklearn.ensemble import BaggingClassifier\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "# Instantiate the SGDClassifier with additional hyperparameters\n",
    "svm_01 = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "\n",
    "# # Define the base classifier (Decision Tree in this case)\n",
    "base_classifier = svm_01\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_svm'\n",
    "pred_label = y_pred\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging with DNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from tensorflow.keras.models import Sequential\n",
    "# from tensorflow.keras.layers import Dense\n",
    "\n",
    "# #Model Parameters\n",
    "# dropout_rate = 0.2\n",
    "# nodes = 3\n",
    "# out_layer = 5\n",
    "# optimizer='adam'\n",
    "# loss='sparse_categorical_crossentropy'\n",
    "# epochs=100\n",
    "# batch_size=128\n",
    "\n",
    "\n",
    "# num_columns = X_train_01.shape[1]\n",
    "\n",
    "# dnn_01 = tf.keras.Sequential()\n",
    "\n",
    "# # Input layer\n",
    "# dnn_01.add(tf.keras.Input(shape=(num_columns,)))\n",
    "\n",
    "# # Dense layers with dropout\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# # Output layer\n",
    "# # dnn_01.add(tf.keras.layers.Dense(out_layer))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "\n",
    "\n",
    "# dnn_01.compile(optimizer=optimizer, loss=loss,metrics=['accuracy'])\n",
    "\n",
    "# base_classifier = dnn_01\n",
    "\n",
    "# # Define the BaggingClassifier\n",
    "# bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# # Train the BaggingClassifier\n",
    "# bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# # Make predictions on the test set\n",
    "# y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# # Evaluate accuracy\n",
    "# # accuracy = accuracy_score(y_test_01, y_pred)\n",
    "# # print(f'Accuracy: {accuracy}')\n",
    "\n",
    "# with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "# with open(output_file_name, \"a\") as f: print('Bagging with DNN', file = f)\n",
    "\n",
    "\n",
    "# print('---------------------------------------------------------------------------------')\n",
    "# print('CONFUSION MATRIX')\n",
    "# print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "# pred_label = y_pred\n",
    "\n",
    "# confusion_matrix = pd.crosstab(y_test_01, pred_label,rownames=['Actual ALERT'],colnames = ['Predicted ALERT'], dropna=False).sort_index(axis=0).sort_index(axis=1)\n",
    "# all_unique_values = sorted(set(pred_label) | set(y_test_01))\n",
    "# z = np.zeros((len(all_unique_values), len(all_unique_values)))\n",
    "# rows, cols = confusion_matrix.shape\n",
    "# z[:rows, :cols] = confusion_matrix\n",
    "# confusion_matrix  = pd.DataFrame(z, columns=all_unique_values, index=all_unique_values)\n",
    "# # confusion_matrix.to_csv('Ensemble_conf_matrix.csv')\n",
    "# # with open(output_file_name, \"a\") as f:print(confusion_matrix,file=f)\n",
    "# print(confusion_matrix)\n",
    "# with open(output_file_name, \"a\") as f: print('Confusion Matrix', file = f)\n",
    "\n",
    "# with open(output_file_name, \"a\") as f: print(confusion_matrix, file = f)\n",
    "\n",
    "\n",
    "# FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)\n",
    "# FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)\n",
    "# TP = np.diag(confusion_matrix)\n",
    "# TN = confusion_matrix.values.sum() - (FP + FN + TP)\n",
    "# TP_total = sum(TP)\n",
    "# TN_total = sum(TN)\n",
    "# FP_total = sum(FP)\n",
    "# FN_total = sum(FN)\n",
    "\n",
    "# TP_total = np.array(TP_total,dtype=np.float64)\n",
    "# TN_total = np.array(TN_total,dtype=np.float64)\n",
    "# FP_total = np.array(FP_total,dtype=np.float64)\n",
    "# FN_total = np.array(FN_total,dtype=np.float64)\n",
    "\n",
    "\n",
    "\n",
    "# #----------------------------------------------------------------#----------------------------------------------------------------\n",
    "\n",
    "# print('---------------------------------------------------------------------------------')\n",
    "# print('METRICS')\n",
    "# print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "# Acc = accuracy_score(y_test_01, pred_label)\n",
    "# Precision = precision_score(y_test_01, pred_label, average='macro')\n",
    "# Recall = recall_score(y_test_01, pred_label, average='macro')\n",
    "# F1 =  f1_score(y_test_01, pred_label, average='macro')\n",
    "# BACC = balanced_accuracy_score(y_test_01, pred_label)\n",
    "# MCC = matthews_corrcoef(y_test_01, pred_label)\n",
    "\n",
    "\n",
    "# bag_dnn_acc_01 = Acc\n",
    "# bag_dnn_pre_01 = Precision\n",
    "# bag_dnn_rec_01 = Recall\n",
    "# bag_dnn_f1_01 = F1\n",
    "# bag_dnn_bacc_01 = BACC\n",
    "# bag_dnn_mcc_01 = MCC\n",
    "# # with open(output_file_name, \"a\") as f:print('Accuracy total: ', Acc,file=f)\n",
    "# print('Accuracy total: ', Acc)\n",
    "# print('Precision total: ', Precision )\n",
    "# print('Recall total: ', Recall )\n",
    "# print('F1 total: ', F1 )\n",
    "# print('BACC total: ', BACC)\n",
    "# print('MCC total: ', MCC)\n",
    "\n",
    "# with open(output_file_name, \"a\") as f: print('Accuracy total: ', Acc, file = f)\n",
    "# with open(output_file_name, \"a\") as f: print('Precision total: ', Precision, file = f)\n",
    "# with open(output_file_name, \"a\") as f: print('Recall total: ', Recall , file = f)\n",
    "# with open(output_file_name, \"a\") as f: print('F1 total: ', F1, file = f)\n",
    "# with open(output_file_name, \"a\") as f: print('BACC total: ', BACC , file = f)\n",
    "# with open(output_file_name, \"a\") as f: print('MCC total: ', MCC, file = f)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging with MLP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n",
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6787.0   156.0    17.0    5.0  0.0\n",
      "1.0   345.0  4427.0    30.0   12.0  0.0\n",
      "2.0    18.0    13.0  1187.0   45.0  0.0\n",
      "3.0     0.0    11.0    18.0  287.0  1.0\n",
      "4.0     0.0     0.0     0.0    4.0  4.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9495025061719159\n",
      "Precision total:  0.8942548037702851\n",
      "Recall total:  0.8478483413525437\n",
      "F1 total:  0.8635049402797026\n",
      "BACC total:  0.8478483413525437\n",
      "MCC total:  0.914420172516734\n"
     ]
    }
   ],
   "source": [
    "from sklearn.neural_network import MLPClassifier\n",
    "start = time.time()\n",
    "\n",
    "# create MLPClassifier instance\n",
    "mlp_01 = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "\n",
    "base_classifier = mlp_01\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_01, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_mlp'\n",
    "pred_label = y_pred\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging knn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6791.0   137.0    26.0   11.0  0.0\n",
      "1.0   130.0  4661.0    16.0    7.0  0.0\n",
      "2.0     8.0    11.0  1214.0   30.0  0.0\n",
      "3.0     0.0     3.0    15.0  298.0  1.0\n",
      "4.0     0.0     0.0     2.0    6.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9698511259070847\n",
      "Precision total:  0.749789504063456\n",
      "Recall total:  0.7689004441016847\n",
      "F1 total:  0.7588516583596314\n",
      "BACC total:  0.7689004441016847\n",
      "MCC total:  0.9489691177041348\n"
     ]
    }
   ],
   "source": [
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_01=KNeighborsClassifier(n_neighbors = 5)\n",
    "start = time.time()\n",
    "\n",
    "base_classifier = knn_01\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_01, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_knn'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging LogRegression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining baggin Logistic Regression Model\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6643.0   252.0    67.0    3.0  0.0\n",
      "1.0   714.0  3925.0   113.0   62.0  0.0\n",
      "2.0    23.0    76.0  1107.0   56.0  1.0\n",
      "3.0     3.0     3.0    84.0  227.0  0.0\n",
      "4.0     1.0     0.0     0.0    7.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8904017356175656\n",
      "Precision total:  0.6537503551193142\n",
      "Recall total:  0.6723344039061567\n",
      "F1 total:  0.6615101492062252\n",
      "BACC total:  0.6723344039061567\n",
      "MCC total:  0.8153046244972549\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "start = time.time()\n",
    "\n",
    "#Logistic Regression\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining baggin Logistic Regression Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "logreg_01 = LogisticRegression()\n",
    "\n",
    "\n",
    "base_classifier = logreg_01\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_01, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_lr'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging ADA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "      0.0     1.0   2.0   3.0  4.0\n",
      "0.0  35.0  6911.0   2.0  17.0  0.0\n",
      "1.0  35.0  4765.0  10.0   3.0  1.0\n",
      "2.0  41.0  1161.0  34.0  27.0  0.0\n",
      "3.0   8.0   183.0  35.0  91.0  0.0\n",
      "4.0   0.0     0.0   5.0   0.0  3.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.3686691104959976\n",
      "Precision total:  0.4929724393100468\n",
      "Recall total:  0.33676655154771956\n",
      "F1 total:  0.29893233583199236\n",
      "BACC total:  0.33676655154771956\n",
      "MCC total:  0.07143246700044578\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "import time\n",
    "ada = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "\n",
    "base_classifier = ada\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_ada'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging CAT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.3043844\ttotal: 12.7ms\tremaining: 1.25s\n",
      "1:\tlearn: 1.1005793\ttotal: 24.5ms\tremaining: 1.2s\n",
      "2:\tlearn: 0.9529950\ttotal: 35.7ms\tremaining: 1.16s\n",
      "3:\tlearn: 0.8377989\ttotal: 46.5ms\tremaining: 1.12s\n",
      "4:\tlearn: 0.7413180\ttotal: 57.1ms\tremaining: 1.08s\n",
      "5:\tlearn: 0.6639412\ttotal: 66.7ms\tremaining: 1.04s\n",
      "6:\tlearn: 0.6008973\ttotal: 75.9ms\tremaining: 1.01s\n",
      "7:\tlearn: 0.5459315\ttotal: 84.8ms\tremaining: 975ms\n",
      "8:\tlearn: 0.5008889\ttotal: 93.6ms\tremaining: 947ms\n",
      "9:\tlearn: 0.4574999\ttotal: 102ms\tremaining: 915ms\n",
      "10:\tlearn: 0.4188063\ttotal: 110ms\tremaining: 886ms\n",
      "11:\tlearn: 0.3882150\ttotal: 116ms\tremaining: 852ms\n",
      "12:\tlearn: 0.3593815\ttotal: 122ms\tremaining: 820ms\n",
      "13:\tlearn: 0.3351558\ttotal: 128ms\tremaining: 789ms\n",
      "14:\tlearn: 0.3113896\ttotal: 134ms\tremaining: 760ms\n",
      "15:\tlearn: 0.2898745\ttotal: 140ms\tremaining: 735ms\n",
      "16:\tlearn: 0.2720052\ttotal: 146ms\tremaining: 711ms\n",
      "17:\tlearn: 0.2547167\ttotal: 152ms\tremaining: 691ms\n",
      "18:\tlearn: 0.2398275\ttotal: 157ms\tremaining: 670ms\n",
      "19:\tlearn: 0.2249342\ttotal: 163ms\tremaining: 651ms\n",
      "20:\tlearn: 0.2123262\ttotal: 168ms\tremaining: 632ms\n",
      "21:\tlearn: 0.2010012\ttotal: 174ms\tremaining: 615ms\n",
      "22:\tlearn: 0.1904415\ttotal: 179ms\tremaining: 599ms\n",
      "23:\tlearn: 0.1815029\ttotal: 184ms\tremaining: 584ms\n",
      "24:\tlearn: 0.1716324\ttotal: 190ms\tremaining: 569ms\n",
      "25:\tlearn: 0.1633273\ttotal: 195ms\tremaining: 555ms\n",
      "26:\tlearn: 0.1560956\ttotal: 200ms\tremaining: 541ms\n",
      "27:\tlearn: 0.1488765\ttotal: 205ms\tremaining: 528ms\n",
      "28:\tlearn: 0.1423811\ttotal: 211ms\tremaining: 516ms\n",
      "29:\tlearn: 0.1363278\ttotal: 216ms\tremaining: 503ms\n",
      "30:\tlearn: 0.1307151\ttotal: 221ms\tremaining: 491ms\n",
      "31:\tlearn: 0.1250723\ttotal: 226ms\tremaining: 481ms\n",
      "32:\tlearn: 0.1204135\ttotal: 231ms\tremaining: 470ms\n",
      "33:\tlearn: 0.1160044\ttotal: 236ms\tremaining: 459ms\n",
      "34:\tlearn: 0.1127884\ttotal: 241ms\tremaining: 448ms\n",
      "35:\tlearn: 0.1102546\ttotal: 246ms\tremaining: 437ms\n",
      "36:\tlearn: 0.1071736\ttotal: 251ms\tremaining: 428ms\n",
      "37:\tlearn: 0.1043108\ttotal: 256ms\tremaining: 418ms\n",
      "38:\tlearn: 0.1008721\ttotal: 262ms\tremaining: 409ms\n",
      "39:\tlearn: 0.0990869\ttotal: 267ms\tremaining: 400ms\n",
      "40:\tlearn: 0.0961581\ttotal: 272ms\tremaining: 391ms\n",
      "41:\tlearn: 0.0940362\ttotal: 277ms\tremaining: 383ms\n",
      "42:\tlearn: 0.0909578\ttotal: 283ms\tremaining: 375ms\n",
      "43:\tlearn: 0.0885871\ttotal: 288ms\tremaining: 366ms\n",
      "44:\tlearn: 0.0863244\ttotal: 293ms\tremaining: 358ms\n",
      "45:\tlearn: 0.0853068\ttotal: 298ms\tremaining: 350ms\n",
      "46:\tlearn: 0.0839713\ttotal: 303ms\tremaining: 342ms\n",
      "47:\tlearn: 0.0823817\ttotal: 308ms\tremaining: 334ms\n",
      "48:\tlearn: 0.0805116\ttotal: 313ms\tremaining: 326ms\n",
      "49:\tlearn: 0.0790287\ttotal: 318ms\tremaining: 318ms\n",
      "50:\tlearn: 0.0771019\ttotal: 323ms\tremaining: 311ms\n",
      "51:\tlearn: 0.0759832\ttotal: 328ms\tremaining: 303ms\n",
      "52:\tlearn: 0.0742935\ttotal: 333ms\tremaining: 296ms\n",
      "53:\tlearn: 0.0727330\ttotal: 338ms\tremaining: 288ms\n",
      "54:\tlearn: 0.0718861\ttotal: 343ms\tremaining: 281ms\n",
      "55:\tlearn: 0.0704450\ttotal: 348ms\tremaining: 274ms\n",
      "56:\tlearn: 0.0692338\ttotal: 353ms\tremaining: 266ms\n",
      "57:\tlearn: 0.0674496\ttotal: 358ms\tremaining: 260ms\n",
      "58:\tlearn: 0.0664673\ttotal: 363ms\tremaining: 252ms\n",
      "59:\tlearn: 0.0657201\ttotal: 368ms\tremaining: 245ms\n",
      "60:\tlearn: 0.0653070\ttotal: 373ms\tremaining: 238ms\n",
      "61:\tlearn: 0.0640775\ttotal: 378ms\tremaining: 232ms\n",
      "62:\tlearn: 0.0630437\ttotal: 383ms\tremaining: 225ms\n",
      "63:\tlearn: 0.0623948\ttotal: 388ms\tremaining: 218ms\n",
      "64:\tlearn: 0.0613609\ttotal: 393ms\tremaining: 212ms\n",
      "65:\tlearn: 0.0601915\ttotal: 424ms\tremaining: 218ms\n",
      "66:\tlearn: 0.0593727\ttotal: 429ms\tremaining: 211ms\n",
      "67:\tlearn: 0.0583535\ttotal: 434ms\tremaining: 204ms\n",
      "68:\tlearn: 0.0568639\ttotal: 439ms\tremaining: 197ms\n",
      "69:\tlearn: 0.0564195\ttotal: 444ms\tremaining: 190ms\n",
      "70:\tlearn: 0.0559998\ttotal: 449ms\tremaining: 183ms\n",
      "71:\tlearn: 0.0548438\ttotal: 454ms\tremaining: 177ms\n",
      "72:\tlearn: 0.0543087\ttotal: 459ms\tremaining: 170ms\n",
      "73:\tlearn: 0.0540640\ttotal: 464ms\tremaining: 163ms\n",
      "74:\tlearn: 0.0531351\ttotal: 469ms\tremaining: 156ms\n",
      "75:\tlearn: 0.0523135\ttotal: 474ms\tremaining: 150ms\n",
      "76:\tlearn: 0.0512831\ttotal: 480ms\tremaining: 143ms\n",
      "77:\tlearn: 0.0505223\ttotal: 485ms\tremaining: 137ms\n",
      "78:\tlearn: 0.0499903\ttotal: 490ms\tremaining: 130ms\n",
      "79:\tlearn: 0.0493566\ttotal: 495ms\tremaining: 124ms\n",
      "80:\tlearn: 0.0487561\ttotal: 500ms\tremaining: 117ms\n",
      "81:\tlearn: 0.0480308\ttotal: 506ms\tremaining: 111ms\n",
      "82:\tlearn: 0.0473638\ttotal: 511ms\tremaining: 105ms\n",
      "83:\tlearn: 0.0465331\ttotal: 516ms\tremaining: 98.3ms\n",
      "84:\tlearn: 0.0459670\ttotal: 521ms\tremaining: 92ms\n",
      "85:\tlearn: 0.0452995\ttotal: 527ms\tremaining: 85.7ms\n",
      "86:\tlearn: 0.0448396\ttotal: 532ms\tremaining: 79.5ms\n",
      "87:\tlearn: 0.0446056\ttotal: 537ms\tremaining: 73.2ms\n",
      "88:\tlearn: 0.0443289\ttotal: 541ms\tremaining: 66.9ms\n",
      "89:\tlearn: 0.0440929\ttotal: 546ms\tremaining: 60.7ms\n",
      "90:\tlearn: 0.0436750\ttotal: 551ms\tremaining: 54.5ms\n",
      "91:\tlearn: 0.0434345\ttotal: 557ms\tremaining: 48.4ms\n",
      "92:\tlearn: 0.0430665\ttotal: 562ms\tremaining: 42.3ms\n",
      "93:\tlearn: 0.0419718\ttotal: 568ms\tremaining: 36.2ms\n",
      "94:\tlearn: 0.0415296\ttotal: 573ms\tremaining: 30.2ms\n",
      "95:\tlearn: 0.0412973\ttotal: 578ms\tremaining: 24.1ms\n",
      "96:\tlearn: 0.0410471\ttotal: 583ms\tremaining: 18ms\n",
      "97:\tlearn: 0.0406724\ttotal: 588ms\tremaining: 12ms\n",
      "98:\tlearn: 0.0403432\ttotal: 593ms\tremaining: 5.99ms\n",
      "99:\tlearn: 0.0400663\ttotal: 598ms\tremaining: 0us\n",
      "0:\tlearn: 1.2982412\ttotal: 20.3ms\tremaining: 2.01s\n",
      "1:\tlearn: 1.1006993\ttotal: 33ms\tremaining: 1.61s\n",
      "2:\tlearn: 0.9507999\ttotal: 46.3ms\tremaining: 1.5s\n",
      "3:\tlearn: 0.8344172\ttotal: 58.7ms\tremaining: 1.41s\n",
      "4:\tlearn: 0.7389095\ttotal: 70.7ms\tremaining: 1.34s\n",
      "5:\tlearn: 0.6598148\ttotal: 82.6ms\tremaining: 1.29s\n",
      "6:\tlearn: 0.5963627\ttotal: 93ms\tremaining: 1.24s\n",
      "7:\tlearn: 0.5424555\ttotal: 104ms\tremaining: 1.19s\n",
      "8:\tlearn: 0.4967719\ttotal: 114ms\tremaining: 1.15s\n",
      "9:\tlearn: 0.4555125\ttotal: 123ms\tremaining: 1.1s\n",
      "10:\tlearn: 0.4166816\ttotal: 132ms\tremaining: 1.07s\n",
      "11:\tlearn: 0.3836334\ttotal: 141ms\tremaining: 1.03s\n",
      "12:\tlearn: 0.3548917\ttotal: 150ms\tremaining: 1s\n",
      "13:\tlearn: 0.3301267\ttotal: 159ms\tremaining: 974ms\n",
      "14:\tlearn: 0.3067513\ttotal: 166ms\tremaining: 939ms\n",
      "15:\tlearn: 0.2862813\ttotal: 173ms\tremaining: 909ms\n",
      "16:\tlearn: 0.2682065\ttotal: 180ms\tremaining: 880ms\n",
      "17:\tlearn: 0.2506539\ttotal: 188ms\tremaining: 855ms\n",
      "18:\tlearn: 0.2352928\ttotal: 194ms\tremaining: 829ms\n",
      "19:\tlearn: 0.2213548\ttotal: 201ms\tremaining: 805ms\n",
      "20:\tlearn: 0.2089545\ttotal: 208ms\tremaining: 782ms\n",
      "21:\tlearn: 0.1974110\ttotal: 214ms\tremaining: 760ms\n",
      "22:\tlearn: 0.1864987\ttotal: 221ms\tremaining: 741ms\n",
      "23:\tlearn: 0.1767057\ttotal: 228ms\tremaining: 721ms\n",
      "24:\tlearn: 0.1675050\ttotal: 234ms\tremaining: 702ms\n",
      "25:\tlearn: 0.1603119\ttotal: 241ms\tremaining: 686ms\n",
      "26:\tlearn: 0.1532635\ttotal: 247ms\tremaining: 667ms\n",
      "27:\tlearn: 0.1458210\ttotal: 253ms\tremaining: 650ms\n",
      "28:\tlearn: 0.1394058\ttotal: 258ms\tremaining: 633ms\n",
      "29:\tlearn: 0.1333509\ttotal: 264ms\tremaining: 616ms\n",
      "30:\tlearn: 0.1278079\ttotal: 269ms\tremaining: 599ms\n",
      "31:\tlearn: 0.1223462\ttotal: 275ms\tremaining: 584ms\n",
      "32:\tlearn: 0.1174792\ttotal: 280ms\tremaining: 568ms\n",
      "33:\tlearn: 0.1140244\ttotal: 285ms\tremaining: 553ms\n",
      "34:\tlearn: 0.1090612\ttotal: 290ms\tremaining: 539ms\n",
      "35:\tlearn: 0.1055878\ttotal: 295ms\tremaining: 525ms\n",
      "36:\tlearn: 0.1029859\ttotal: 300ms\tremaining: 511ms\n",
      "37:\tlearn: 0.1010762\ttotal: 305ms\tremaining: 498ms\n",
      "38:\tlearn: 0.0976757\ttotal: 310ms\tremaining: 486ms\n",
      "39:\tlearn: 0.0940324\ttotal: 316ms\tremaining: 473ms\n",
      "40:\tlearn: 0.0918710\ttotal: 320ms\tremaining: 461ms\n",
      "41:\tlearn: 0.0898828\ttotal: 325ms\tremaining: 449ms\n",
      "42:\tlearn: 0.0870708\ttotal: 330ms\tremaining: 438ms\n",
      "43:\tlearn: 0.0838650\ttotal: 335ms\tremaining: 427ms\n",
      "44:\tlearn: 0.0814673\ttotal: 341ms\tremaining: 416ms\n",
      "45:\tlearn: 0.0796554\ttotal: 345ms\tremaining: 405ms\n",
      "46:\tlearn: 0.0785650\ttotal: 350ms\tremaining: 395ms\n",
      "47:\tlearn: 0.0775356\ttotal: 355ms\tremaining: 384ms\n",
      "48:\tlearn: 0.0756256\ttotal: 359ms\tremaining: 374ms\n",
      "49:\tlearn: 0.0747408\ttotal: 364ms\tremaining: 364ms\n",
      "50:\tlearn: 0.0724735\ttotal: 370ms\tremaining: 355ms\n",
      "51:\tlearn: 0.0713547\ttotal: 375ms\tremaining: 346ms\n",
      "52:\tlearn: 0.0697057\ttotal: 380ms\tremaining: 337ms\n",
      "53:\tlearn: 0.0679901\ttotal: 385ms\tremaining: 328ms\n",
      "54:\tlearn: 0.0670261\ttotal: 390ms\tremaining: 319ms\n",
      "55:\tlearn: 0.0660325\ttotal: 395ms\tremaining: 310ms\n",
      "56:\tlearn: 0.0653242\ttotal: 400ms\tremaining: 302ms\n",
      "57:\tlearn: 0.0638702\ttotal: 405ms\tremaining: 293ms\n",
      "58:\tlearn: 0.0627110\ttotal: 410ms\tremaining: 285ms\n",
      "59:\tlearn: 0.0619900\ttotal: 415ms\tremaining: 276ms\n",
      "60:\tlearn: 0.0616445\ttotal: 420ms\tremaining: 268ms\n",
      "61:\tlearn: 0.0607327\ttotal: 425ms\tremaining: 260ms\n",
      "62:\tlearn: 0.0602545\ttotal: 430ms\tremaining: 252ms\n",
      "63:\tlearn: 0.0589215\ttotal: 435ms\tremaining: 245ms\n",
      "64:\tlearn: 0.0576182\ttotal: 440ms\tremaining: 237ms\n",
      "65:\tlearn: 0.0567292\ttotal: 445ms\tremaining: 229ms\n",
      "66:\tlearn: 0.0556696\ttotal: 451ms\tremaining: 222ms\n",
      "67:\tlearn: 0.0547309\ttotal: 456ms\tremaining: 214ms\n",
      "68:\tlearn: 0.0537729\ttotal: 461ms\tremaining: 207ms\n",
      "69:\tlearn: 0.0534800\ttotal: 466ms\tremaining: 200ms\n",
      "70:\tlearn: 0.0522789\ttotal: 471ms\tremaining: 193ms\n",
      "71:\tlearn: 0.0513373\ttotal: 477ms\tremaining: 185ms\n",
      "72:\tlearn: 0.0509003\ttotal: 482ms\tremaining: 178ms\n",
      "73:\tlearn: 0.0505755\ttotal: 486ms\tremaining: 171ms\n",
      "74:\tlearn: 0.0499738\ttotal: 491ms\tremaining: 164ms\n",
      "75:\tlearn: 0.0493752\ttotal: 497ms\tremaining: 157ms\n",
      "76:\tlearn: 0.0480758\ttotal: 502ms\tremaining: 150ms\n",
      "77:\tlearn: 0.0473596\ttotal: 507ms\tremaining: 143ms\n",
      "78:\tlearn: 0.0466341\ttotal: 513ms\tremaining: 136ms\n",
      "79:\tlearn: 0.0459319\ttotal: 518ms\tremaining: 129ms\n",
      "80:\tlearn: 0.0457129\ttotal: 522ms\tremaining: 123ms\n",
      "81:\tlearn: 0.0449434\ttotal: 527ms\tremaining: 116ms\n",
      "82:\tlearn: 0.0447238\ttotal: 532ms\tremaining: 109ms\n",
      "83:\tlearn: 0.0440037\ttotal: 537ms\tremaining: 102ms\n",
      "84:\tlearn: 0.0432139\ttotal: 543ms\tremaining: 95.8ms\n",
      "85:\tlearn: 0.0425681\ttotal: 548ms\tremaining: 89.1ms\n",
      "86:\tlearn: 0.0423359\ttotal: 552ms\tremaining: 82.5ms\n",
      "87:\tlearn: 0.0419566\ttotal: 557ms\tremaining: 76ms\n",
      "88:\tlearn: 0.0411265\ttotal: 563ms\tremaining: 69.5ms\n",
      "89:\tlearn: 0.0406118\ttotal: 568ms\tremaining: 63.1ms\n",
      "90:\tlearn: 0.0403072\ttotal: 573ms\tremaining: 56.6ms\n",
      "91:\tlearn: 0.0398954\ttotal: 577ms\tremaining: 50.2ms\n",
      "92:\tlearn: 0.0392824\ttotal: 583ms\tremaining: 43.9ms\n",
      "93:\tlearn: 0.0388534\ttotal: 588ms\tremaining: 37.5ms\n",
      "94:\tlearn: 0.0382103\ttotal: 593ms\tremaining: 31.2ms\n",
      "95:\tlearn: 0.0378245\ttotal: 598ms\tremaining: 24.9ms\n",
      "96:\tlearn: 0.0375210\ttotal: 603ms\tremaining: 18.6ms\n",
      "97:\tlearn: 0.0372990\ttotal: 608ms\tremaining: 12.4ms\n",
      "98:\tlearn: 0.0370376\ttotal: 612ms\tremaining: 6.19ms\n",
      "99:\tlearn: 0.0365318\ttotal: 618ms\tremaining: 0us\n",
      "0:\tlearn: 1.3040147\ttotal: 12.8ms\tremaining: 1.26s\n",
      "1:\tlearn: 1.1063578\ttotal: 25.1ms\tremaining: 1.23s\n",
      "2:\tlearn: 0.9567498\ttotal: 37.6ms\tremaining: 1.22s\n",
      "3:\tlearn: 0.8399566\ttotal: 48.7ms\tremaining: 1.17s\n",
      "4:\tlearn: 0.7475213\ttotal: 60.1ms\tremaining: 1.14s\n",
      "5:\tlearn: 0.6678330\ttotal: 71ms\tremaining: 1.11s\n",
      "6:\tlearn: 0.6031643\ttotal: 81.7ms\tremaining: 1.08s\n",
      "7:\tlearn: 0.5468311\ttotal: 91.5ms\tremaining: 1.05s\n",
      "8:\tlearn: 0.5032124\ttotal: 100ms\tremaining: 1.01s\n",
      "9:\tlearn: 0.4624100\ttotal: 109ms\tremaining: 985ms\n",
      "10:\tlearn: 0.4225068\ttotal: 118ms\tremaining: 957ms\n",
      "11:\tlearn: 0.3885267\ttotal: 126ms\tremaining: 924ms\n",
      "12:\tlearn: 0.3597367\ttotal: 135ms\tremaining: 902ms\n",
      "13:\tlearn: 0.3353051\ttotal: 142ms\tremaining: 874ms\n",
      "14:\tlearn: 0.3116847\ttotal: 150ms\tremaining: 848ms\n",
      "15:\tlearn: 0.2890023\ttotal: 157ms\tremaining: 825ms\n",
      "16:\tlearn: 0.2698123\ttotal: 164ms\tremaining: 802ms\n",
      "17:\tlearn: 0.2527981\ttotal: 172ms\tremaining: 782ms\n",
      "18:\tlearn: 0.2375796\ttotal: 179ms\tremaining: 761ms\n",
      "19:\tlearn: 0.2226262\ttotal: 186ms\tremaining: 744ms\n",
      "20:\tlearn: 0.2102835\ttotal: 192ms\tremaining: 724ms\n",
      "21:\tlearn: 0.2001445\ttotal: 199ms\tremaining: 704ms\n",
      "22:\tlearn: 0.1894349\ttotal: 205ms\tremaining: 687ms\n",
      "23:\tlearn: 0.1801813\ttotal: 212ms\tremaining: 671ms\n",
      "24:\tlearn: 0.1711478\ttotal: 218ms\tremaining: 654ms\n",
      "25:\tlearn: 0.1626928\ttotal: 224ms\tremaining: 638ms\n",
      "26:\tlearn: 0.1560518\ttotal: 230ms\tremaining: 621ms\n",
      "27:\tlearn: 0.1488344\ttotal: 235ms\tremaining: 605ms\n",
      "28:\tlearn: 0.1422966\ttotal: 246ms\tremaining: 603ms\n",
      "29:\tlearn: 0.1357146\ttotal: 253ms\tremaining: 591ms\n",
      "30:\tlearn: 0.1310220\ttotal: 260ms\tremaining: 578ms\n",
      "31:\tlearn: 0.1252624\ttotal: 266ms\tremaining: 566ms\n",
      "32:\tlearn: 0.1208144\ttotal: 273ms\tremaining: 554ms\n",
      "33:\tlearn: 0.1163917\ttotal: 279ms\tremaining: 541ms\n",
      "34:\tlearn: 0.1131083\ttotal: 285ms\tremaining: 528ms\n",
      "35:\tlearn: 0.1100432\ttotal: 290ms\tremaining: 516ms\n",
      "36:\tlearn: 0.1069951\ttotal: 296ms\tremaining: 504ms\n",
      "37:\tlearn: 0.1040789\ttotal: 302ms\tremaining: 493ms\n",
      "38:\tlearn: 0.1022796\ttotal: 307ms\tremaining: 481ms\n",
      "39:\tlearn: 0.0994612\ttotal: 313ms\tremaining: 469ms\n",
      "40:\tlearn: 0.0966775\ttotal: 318ms\tremaining: 457ms\n",
      "41:\tlearn: 0.0943253\ttotal: 323ms\tremaining: 446ms\n",
      "42:\tlearn: 0.0913593\ttotal: 328ms\tremaining: 435ms\n",
      "43:\tlearn: 0.0894178\ttotal: 333ms\tremaining: 424ms\n",
      "44:\tlearn: 0.0870994\ttotal: 339ms\tremaining: 414ms\n",
      "45:\tlearn: 0.0852651\ttotal: 344ms\tremaining: 403ms\n",
      "46:\tlearn: 0.0840275\ttotal: 349ms\tremaining: 393ms\n",
      "47:\tlearn: 0.0822823\ttotal: 354ms\tremaining: 383ms\n",
      "48:\tlearn: 0.0804408\ttotal: 359ms\tremaining: 374ms\n",
      "49:\tlearn: 0.0787560\ttotal: 365ms\tremaining: 365ms\n",
      "50:\tlearn: 0.0766527\ttotal: 372ms\tremaining: 357ms\n",
      "51:\tlearn: 0.0746153\ttotal: 377ms\tremaining: 348ms\n",
      "52:\tlearn: 0.0726107\ttotal: 383ms\tremaining: 339ms\n",
      "53:\tlearn: 0.0718075\ttotal: 388ms\tremaining: 330ms\n",
      "54:\tlearn: 0.0702077\ttotal: 393ms\tremaining: 321ms\n",
      "55:\tlearn: 0.0685498\ttotal: 398ms\tremaining: 313ms\n",
      "56:\tlearn: 0.0673756\ttotal: 403ms\tremaining: 304ms\n",
      "57:\tlearn: 0.0666359\ttotal: 408ms\tremaining: 296ms\n",
      "58:\tlearn: 0.0657911\ttotal: 413ms\tremaining: 287ms\n",
      "59:\tlearn: 0.0647038\ttotal: 419ms\tremaining: 279ms\n",
      "60:\tlearn: 0.0640061\ttotal: 423ms\tremaining: 271ms\n",
      "61:\tlearn: 0.0632364\ttotal: 428ms\tremaining: 263ms\n",
      "62:\tlearn: 0.0625522\ttotal: 433ms\tremaining: 255ms\n",
      "63:\tlearn: 0.0619047\ttotal: 438ms\tremaining: 247ms\n",
      "64:\tlearn: 0.0607123\ttotal: 444ms\tremaining: 239ms\n",
      "65:\tlearn: 0.0598159\ttotal: 473ms\tremaining: 243ms\n",
      "66:\tlearn: 0.0587512\ttotal: 478ms\tremaining: 235ms\n",
      "67:\tlearn: 0.0583193\ttotal: 483ms\tremaining: 227ms\n",
      "68:\tlearn: 0.0570937\ttotal: 488ms\tremaining: 219ms\n",
      "69:\tlearn: 0.0563754\ttotal: 493ms\tremaining: 211ms\n",
      "70:\tlearn: 0.0552675\ttotal: 498ms\tremaining: 203ms\n",
      "71:\tlearn: 0.0546349\ttotal: 503ms\tremaining: 196ms\n",
      "72:\tlearn: 0.0536106\ttotal: 509ms\tremaining: 188ms\n",
      "73:\tlearn: 0.0531927\ttotal: 513ms\tremaining: 180ms\n",
      "74:\tlearn: 0.0529168\ttotal: 518ms\tremaining: 173ms\n",
      "75:\tlearn: 0.0524676\ttotal: 523ms\tremaining: 165ms\n",
      "76:\tlearn: 0.0515131\ttotal: 528ms\tremaining: 158ms\n",
      "77:\tlearn: 0.0510265\ttotal: 534ms\tremaining: 150ms\n",
      "78:\tlearn: 0.0502658\ttotal: 538ms\tremaining: 143ms\n",
      "79:\tlearn: 0.0495390\ttotal: 544ms\tremaining: 136ms\n",
      "80:\tlearn: 0.0485096\ttotal: 549ms\tremaining: 129ms\n",
      "81:\tlearn: 0.0480695\ttotal: 554ms\tremaining: 122ms\n",
      "82:\tlearn: 0.0478840\ttotal: 559ms\tremaining: 115ms\n",
      "83:\tlearn: 0.0467061\ttotal: 565ms\tremaining: 108ms\n",
      "84:\tlearn: 0.0456956\ttotal: 571ms\tremaining: 101ms\n",
      "85:\tlearn: 0.0450804\ttotal: 576ms\tremaining: 93.7ms\n",
      "86:\tlearn: 0.0447474\ttotal: 581ms\tremaining: 86.8ms\n",
      "87:\tlearn: 0.0445708\ttotal: 586ms\tremaining: 79.8ms\n",
      "88:\tlearn: 0.0443241\ttotal: 591ms\tremaining: 73ms\n",
      "89:\tlearn: 0.0438566\ttotal: 596ms\tremaining: 66.2ms\n",
      "90:\tlearn: 0.0434888\ttotal: 601ms\tremaining: 59.4ms\n",
      "91:\tlearn: 0.0430063\ttotal: 606ms\tremaining: 52.7ms\n",
      "92:\tlearn: 0.0423701\ttotal: 612ms\tremaining: 46ms\n",
      "93:\tlearn: 0.0417709\ttotal: 617ms\tremaining: 39.4ms\n",
      "94:\tlearn: 0.0412508\ttotal: 622ms\tremaining: 32.7ms\n",
      "95:\tlearn: 0.0409916\ttotal: 627ms\tremaining: 26.1ms\n",
      "96:\tlearn: 0.0403046\ttotal: 632ms\tremaining: 19.6ms\n",
      "97:\tlearn: 0.0399691\ttotal: 637ms\tremaining: 13ms\n",
      "98:\tlearn: 0.0398406\ttotal: 642ms\tremaining: 6.48ms\n",
      "99:\tlearn: 0.0392396\ttotal: 647ms\tremaining: 0us\n",
      "0:\tlearn: 1.3021294\ttotal: 14.1ms\tremaining: 1.4s\n",
      "1:\tlearn: 1.0989703\ttotal: 27.7ms\tremaining: 1.36s\n",
      "2:\tlearn: 0.9499511\ttotal: 39.4ms\tremaining: 1.27s\n",
      "3:\tlearn: 0.8343161\ttotal: 51ms\tremaining: 1.22s\n",
      "4:\tlearn: 0.7410438\ttotal: 62.6ms\tremaining: 1.19s\n",
      "5:\tlearn: 0.6616537\ttotal: 72.6ms\tremaining: 1.14s\n",
      "6:\tlearn: 0.5944870\ttotal: 83.6ms\tremaining: 1.11s\n",
      "7:\tlearn: 0.5391778\ttotal: 93.2ms\tremaining: 1.07s\n",
      "8:\tlearn: 0.4905936\ttotal: 103ms\tremaining: 1.04s\n",
      "9:\tlearn: 0.4501473\ttotal: 111ms\tremaining: 1s\n",
      "10:\tlearn: 0.4114710\ttotal: 121ms\tremaining: 978ms\n",
      "11:\tlearn: 0.3786739\ttotal: 130ms\tremaining: 957ms\n",
      "12:\tlearn: 0.3512977\ttotal: 140ms\tremaining: 934ms\n",
      "13:\tlearn: 0.3259923\ttotal: 148ms\tremaining: 909ms\n",
      "14:\tlearn: 0.3028499\ttotal: 156ms\tremaining: 887ms\n",
      "15:\tlearn: 0.2826329\ttotal: 165ms\tremaining: 864ms\n",
      "16:\tlearn: 0.2651792\ttotal: 172ms\tremaining: 840ms\n",
      "17:\tlearn: 0.2493778\ttotal: 180ms\tremaining: 818ms\n",
      "18:\tlearn: 0.2336147\ttotal: 187ms\tremaining: 796ms\n",
      "19:\tlearn: 0.2207244\ttotal: 194ms\tremaining: 776ms\n",
      "20:\tlearn: 0.2084196\ttotal: 201ms\tremaining: 756ms\n",
      "21:\tlearn: 0.1971517\ttotal: 207ms\tremaining: 735ms\n",
      "22:\tlearn: 0.1870344\ttotal: 214ms\tremaining: 718ms\n",
      "23:\tlearn: 0.1772148\ttotal: 221ms\tremaining: 701ms\n",
      "24:\tlearn: 0.1679566\ttotal: 228ms\tremaining: 683ms\n",
      "25:\tlearn: 0.1584913\ttotal: 235ms\tremaining: 668ms\n",
      "26:\tlearn: 0.1514793\ttotal: 241ms\tremaining: 652ms\n",
      "27:\tlearn: 0.1442573\ttotal: 248ms\tremaining: 637ms\n",
      "28:\tlearn: 0.1376305\ttotal: 254ms\tremaining: 622ms\n",
      "29:\tlearn: 0.1312487\ttotal: 260ms\tremaining: 606ms\n",
      "30:\tlearn: 0.1262321\ttotal: 266ms\tremaining: 592ms\n",
      "31:\tlearn: 0.1205739\ttotal: 272ms\tremaining: 578ms\n",
      "32:\tlearn: 0.1160609\ttotal: 277ms\tremaining: 563ms\n",
      "33:\tlearn: 0.1119900\ttotal: 284ms\tremaining: 551ms\n",
      "34:\tlearn: 0.1095278\ttotal: 289ms\tremaining: 536ms\n",
      "35:\tlearn: 0.1062497\ttotal: 294ms\tremaining: 522ms\n",
      "36:\tlearn: 0.1033665\ttotal: 299ms\tremaining: 509ms\n",
      "37:\tlearn: 0.1007192\ttotal: 304ms\tremaining: 496ms\n",
      "38:\tlearn: 0.0979269\ttotal: 309ms\tremaining: 483ms\n",
      "39:\tlearn: 0.0955302\ttotal: 314ms\tremaining: 471ms\n",
      "40:\tlearn: 0.0942383\ttotal: 319ms\tremaining: 459ms\n",
      "41:\tlearn: 0.0920612\ttotal: 324ms\tremaining: 447ms\n",
      "42:\tlearn: 0.0899213\ttotal: 329ms\tremaining: 436ms\n",
      "43:\tlearn: 0.0876854\ttotal: 334ms\tremaining: 425ms\n",
      "44:\tlearn: 0.0852226\ttotal: 339ms\tremaining: 414ms\n",
      "45:\tlearn: 0.0829647\ttotal: 344ms\tremaining: 404ms\n",
      "46:\tlearn: 0.0819250\ttotal: 349ms\tremaining: 393ms\n",
      "47:\tlearn: 0.0806548\ttotal: 354ms\tremaining: 383ms\n",
      "48:\tlearn: 0.0785207\ttotal: 359ms\tremaining: 373ms\n",
      "49:\tlearn: 0.0768555\ttotal: 364ms\tremaining: 364ms\n",
      "50:\tlearn: 0.0754073\ttotal: 369ms\tremaining: 354ms\n",
      "51:\tlearn: 0.0744765\ttotal: 374ms\tremaining: 345ms\n",
      "52:\tlearn: 0.0737297\ttotal: 379ms\tremaining: 336ms\n",
      "53:\tlearn: 0.0718575\ttotal: 384ms\tremaining: 327ms\n",
      "54:\tlearn: 0.0704888\ttotal: 389ms\tremaining: 318ms\n",
      "55:\tlearn: 0.0690091\ttotal: 394ms\tremaining: 310ms\n",
      "56:\tlearn: 0.0684817\ttotal: 399ms\tremaining: 301ms\n",
      "57:\tlearn: 0.0674302\ttotal: 404ms\tremaining: 292ms\n",
      "58:\tlearn: 0.0662724\ttotal: 408ms\tremaining: 284ms\n",
      "59:\tlearn: 0.0652245\ttotal: 413ms\tremaining: 276ms\n",
      "60:\tlearn: 0.0638514\ttotal: 418ms\tremaining: 268ms\n",
      "61:\tlearn: 0.0621106\ttotal: 424ms\tremaining: 260ms\n",
      "62:\tlearn: 0.0616576\ttotal: 428ms\tremaining: 252ms\n",
      "63:\tlearn: 0.0608813\ttotal: 433ms\tremaining: 244ms\n",
      "64:\tlearn: 0.0602367\ttotal: 438ms\tremaining: 236ms\n",
      "65:\tlearn: 0.0589950\ttotal: 443ms\tremaining: 228ms\n",
      "66:\tlearn: 0.0577193\ttotal: 448ms\tremaining: 221ms\n",
      "67:\tlearn: 0.0568031\ttotal: 453ms\tremaining: 213ms\n",
      "68:\tlearn: 0.0558504\ttotal: 458ms\tremaining: 206ms\n",
      "69:\tlearn: 0.0549291\ttotal: 463ms\tremaining: 199ms\n",
      "70:\tlearn: 0.0538658\ttotal: 468ms\tremaining: 191ms\n",
      "71:\tlearn: 0.0533918\ttotal: 473ms\tremaining: 184ms\n",
      "72:\tlearn: 0.0530388\ttotal: 478ms\tremaining: 177ms\n",
      "73:\tlearn: 0.0525301\ttotal: 483ms\tremaining: 170ms\n",
      "74:\tlearn: 0.0516770\ttotal: 488ms\tremaining: 163ms\n",
      "75:\tlearn: 0.0508962\ttotal: 493ms\tremaining: 156ms\n",
      "76:\tlearn: 0.0502363\ttotal: 498ms\tremaining: 149ms\n",
      "77:\tlearn: 0.0496007\ttotal: 503ms\tremaining: 142ms\n",
      "78:\tlearn: 0.0490597\ttotal: 508ms\tremaining: 135ms\n",
      "79:\tlearn: 0.0487284\ttotal: 512ms\tremaining: 128ms\n",
      "80:\tlearn: 0.0481161\ttotal: 517ms\tremaining: 121ms\n",
      "81:\tlearn: 0.0472934\ttotal: 523ms\tremaining: 115ms\n",
      "82:\tlearn: 0.0469915\ttotal: 528ms\tremaining: 108ms\n",
      "83:\tlearn: 0.0465567\ttotal: 533ms\tremaining: 101ms\n",
      "84:\tlearn: 0.0456023\ttotal: 538ms\tremaining: 94.9ms\n",
      "85:\tlearn: 0.0449318\ttotal: 543ms\tremaining: 88.4ms\n",
      "86:\tlearn: 0.0445668\ttotal: 548ms\tremaining: 81.8ms\n",
      "87:\tlearn: 0.0437629\ttotal: 553ms\tremaining: 75.4ms\n",
      "88:\tlearn: 0.0432693\ttotal: 558ms\tremaining: 69ms\n",
      "89:\tlearn: 0.0425050\ttotal: 563ms\tremaining: 62.6ms\n",
      "90:\tlearn: 0.0419788\ttotal: 568ms\tremaining: 56.2ms\n",
      "91:\tlearn: 0.0416887\ttotal: 573ms\tremaining: 49.8ms\n",
      "92:\tlearn: 0.0409992\ttotal: 579ms\tremaining: 43.5ms\n",
      "93:\tlearn: 0.0405528\ttotal: 584ms\tremaining: 37.2ms\n",
      "94:\tlearn: 0.0398671\ttotal: 589ms\tremaining: 31ms\n",
      "95:\tlearn: 0.0396174\ttotal: 593ms\tremaining: 24.7ms\n",
      "96:\tlearn: 0.0393303\ttotal: 598ms\tremaining: 18.5ms\n",
      "97:\tlearn: 0.0386967\ttotal: 603ms\tremaining: 12.3ms\n",
      "98:\tlearn: 0.0383507\ttotal: 608ms\tremaining: 6.15ms\n",
      "99:\tlearn: 0.0381905\ttotal: 613ms\tremaining: 0us\n",
      "0:\tlearn: 1.2977216\ttotal: 13.2ms\tremaining: 1.3s\n",
      "1:\tlearn: 1.0954808\ttotal: 25.6ms\tremaining: 1.25s\n",
      "2:\tlearn: 0.9480832\ttotal: 36.4ms\tremaining: 1.18s\n",
      "3:\tlearn: 0.8311972\ttotal: 46.1ms\tremaining: 1.11s\n",
      "4:\tlearn: 0.7409139\ttotal: 55.5ms\tremaining: 1.05s\n",
      "5:\tlearn: 0.6631443\ttotal: 64.4ms\tremaining: 1.01s\n",
      "6:\tlearn: 0.5960116\ttotal: 72.9ms\tremaining: 968ms\n",
      "7:\tlearn: 0.5399480\ttotal: 80.6ms\tremaining: 926ms\n",
      "8:\tlearn: 0.4951709\ttotal: 88ms\tremaining: 890ms\n",
      "9:\tlearn: 0.4544281\ttotal: 95.7ms\tremaining: 861ms\n",
      "10:\tlearn: 0.4162867\ttotal: 103ms\tremaining: 835ms\n",
      "11:\tlearn: 0.3835154\ttotal: 110ms\tremaining: 810ms\n",
      "12:\tlearn: 0.3565096\ttotal: 118ms\tremaining: 789ms\n",
      "13:\tlearn: 0.3323796\ttotal: 125ms\tremaining: 765ms\n",
      "14:\tlearn: 0.3088691\ttotal: 131ms\tremaining: 742ms\n",
      "15:\tlearn: 0.2882340\ttotal: 137ms\tremaining: 722ms\n",
      "16:\tlearn: 0.2697637\ttotal: 144ms\tremaining: 701ms\n",
      "17:\tlearn: 0.2523335\ttotal: 150ms\tremaining: 681ms\n",
      "18:\tlearn: 0.2367601\ttotal: 155ms\tremaining: 663ms\n",
      "19:\tlearn: 0.2213385\ttotal: 161ms\tremaining: 646ms\n",
      "20:\tlearn: 0.2092353\ttotal: 188ms\tremaining: 709ms\n",
      "21:\tlearn: 0.1977813\ttotal: 194ms\tremaining: 686ms\n",
      "22:\tlearn: 0.1873501\ttotal: 199ms\tremaining: 666ms\n",
      "23:\tlearn: 0.1775815\ttotal: 204ms\tremaining: 647ms\n",
      "24:\tlearn: 0.1685075\ttotal: 209ms\tremaining: 628ms\n",
      "25:\tlearn: 0.1603581\ttotal: 215ms\tremaining: 611ms\n",
      "26:\tlearn: 0.1528694\ttotal: 220ms\tremaining: 594ms\n",
      "27:\tlearn: 0.1463361\ttotal: 225ms\tremaining: 578ms\n",
      "28:\tlearn: 0.1393112\ttotal: 230ms\tremaining: 563ms\n",
      "29:\tlearn: 0.1332521\ttotal: 235ms\tremaining: 549ms\n",
      "30:\tlearn: 0.1281461\ttotal: 240ms\tremaining: 535ms\n",
      "31:\tlearn: 0.1229005\ttotal: 246ms\tremaining: 522ms\n",
      "32:\tlearn: 0.1183593\ttotal: 251ms\tremaining: 510ms\n",
      "33:\tlearn: 0.1137508\ttotal: 256ms\tremaining: 497ms\n",
      "34:\tlearn: 0.1096647\ttotal: 261ms\tremaining: 485ms\n",
      "35:\tlearn: 0.1068210\ttotal: 266ms\tremaining: 473ms\n",
      "36:\tlearn: 0.1030926\ttotal: 271ms\tremaining: 462ms\n",
      "37:\tlearn: 0.1002616\ttotal: 276ms\tremaining: 451ms\n",
      "38:\tlearn: 0.0984265\ttotal: 281ms\tremaining: 440ms\n",
      "39:\tlearn: 0.0951541\ttotal: 286ms\tremaining: 429ms\n",
      "40:\tlearn: 0.0928099\ttotal: 291ms\tremaining: 419ms\n",
      "41:\tlearn: 0.0907082\ttotal: 296ms\tremaining: 409ms\n",
      "42:\tlearn: 0.0878718\ttotal: 302ms\tremaining: 400ms\n",
      "43:\tlearn: 0.0852471\ttotal: 307ms\tremaining: 390ms\n",
      "44:\tlearn: 0.0822185\ttotal: 312ms\tremaining: 381ms\n",
      "45:\tlearn: 0.0802156\ttotal: 317ms\tremaining: 372ms\n",
      "46:\tlearn: 0.0792169\ttotal: 322ms\tremaining: 364ms\n",
      "47:\tlearn: 0.0782698\ttotal: 327ms\tremaining: 354ms\n",
      "48:\tlearn: 0.0767651\ttotal: 332ms\tremaining: 346ms\n",
      "49:\tlearn: 0.0746627\ttotal: 338ms\tremaining: 338ms\n",
      "50:\tlearn: 0.0733971\ttotal: 342ms\tremaining: 329ms\n",
      "51:\tlearn: 0.0724657\ttotal: 347ms\tremaining: 321ms\n",
      "52:\tlearn: 0.0715542\ttotal: 352ms\tremaining: 312ms\n",
      "53:\tlearn: 0.0704804\ttotal: 357ms\tremaining: 304ms\n",
      "54:\tlearn: 0.0690899\ttotal: 362ms\tremaining: 296ms\n",
      "55:\tlearn: 0.0679543\ttotal: 368ms\tremaining: 289ms\n",
      "56:\tlearn: 0.0672262\ttotal: 372ms\tremaining: 281ms\n",
      "57:\tlearn: 0.0659638\ttotal: 377ms\tremaining: 273ms\n",
      "58:\tlearn: 0.0650737\ttotal: 382ms\tremaining: 266ms\n",
      "59:\tlearn: 0.0635930\ttotal: 388ms\tremaining: 258ms\n",
      "60:\tlearn: 0.0630024\ttotal: 393ms\tremaining: 251ms\n",
      "61:\tlearn: 0.0618770\ttotal: 398ms\tremaining: 244ms\n",
      "62:\tlearn: 0.0613225\ttotal: 402ms\tremaining: 236ms\n",
      "63:\tlearn: 0.0596114\ttotal: 408ms\tremaining: 229ms\n",
      "64:\tlearn: 0.0582545\ttotal: 413ms\tremaining: 222ms\n",
      "65:\tlearn: 0.0574481\ttotal: 418ms\tremaining: 215ms\n",
      "66:\tlearn: 0.0565030\ttotal: 424ms\tremaining: 209ms\n",
      "67:\tlearn: 0.0558076\ttotal: 429ms\tremaining: 202ms\n",
      "68:\tlearn: 0.0548520\ttotal: 434ms\tremaining: 195ms\n",
      "69:\tlearn: 0.0540186\ttotal: 439ms\tremaining: 188ms\n",
      "70:\tlearn: 0.0533453\ttotal: 445ms\tremaining: 182ms\n",
      "71:\tlearn: 0.0530406\ttotal: 450ms\tremaining: 175ms\n",
      "72:\tlearn: 0.0524113\ttotal: 455ms\tremaining: 168ms\n",
      "73:\tlearn: 0.0520975\ttotal: 459ms\tremaining: 161ms\n",
      "74:\tlearn: 0.0516563\ttotal: 464ms\tremaining: 155ms\n",
      "75:\tlearn: 0.0509946\ttotal: 469ms\tremaining: 148ms\n",
      "76:\tlearn: 0.0501446\ttotal: 475ms\tremaining: 142ms\n",
      "77:\tlearn: 0.0499461\ttotal: 479ms\tremaining: 135ms\n",
      "78:\tlearn: 0.0494698\ttotal: 484ms\tremaining: 129ms\n",
      "79:\tlearn: 0.0486500\ttotal: 490ms\tremaining: 122ms\n",
      "80:\tlearn: 0.0480076\ttotal: 495ms\tremaining: 116ms\n",
      "81:\tlearn: 0.0472456\ttotal: 500ms\tremaining: 110ms\n",
      "82:\tlearn: 0.0464274\ttotal: 505ms\tremaining: 104ms\n",
      "83:\tlearn: 0.0457496\ttotal: 510ms\tremaining: 97.2ms\n",
      "84:\tlearn: 0.0449139\ttotal: 516ms\tremaining: 91ms\n",
      "85:\tlearn: 0.0442616\ttotal: 521ms\tremaining: 84.8ms\n",
      "86:\tlearn: 0.0440012\ttotal: 526ms\tremaining: 78.6ms\n",
      "87:\tlearn: 0.0437707\ttotal: 531ms\tremaining: 72.4ms\n",
      "88:\tlearn: 0.0432964\ttotal: 536ms\tremaining: 66.2ms\n",
      "89:\tlearn: 0.0429958\ttotal: 541ms\tremaining: 60.1ms\n",
      "90:\tlearn: 0.0426631\ttotal: 546ms\tremaining: 54ms\n",
      "91:\tlearn: 0.0422892\ttotal: 551ms\tremaining: 47.9ms\n",
      "92:\tlearn: 0.0419154\ttotal: 556ms\tremaining: 41.8ms\n",
      "93:\tlearn: 0.0413797\ttotal: 561ms\tremaining: 35.8ms\n",
      "94:\tlearn: 0.0407862\ttotal: 566ms\tremaining: 29.8ms\n",
      "95:\tlearn: 0.0405513\ttotal: 571ms\tremaining: 23.8ms\n",
      "96:\tlearn: 0.0404369\ttotal: 576ms\tremaining: 17.8ms\n",
      "97:\tlearn: 0.0399454\ttotal: 581ms\tremaining: 11.9ms\n",
      "98:\tlearn: 0.0396522\ttotal: 586ms\tremaining: 5.92ms\n",
      "99:\tlearn: 0.0393042\ttotal: 591ms\tremaining: 0us\n",
      "0:\tlearn: 1.3022881\ttotal: 11.9ms\tremaining: 1.18s\n",
      "1:\tlearn: 1.1037631\ttotal: 23.3ms\tremaining: 1.14s\n",
      "2:\tlearn: 0.9525026\ttotal: 34.7ms\tremaining: 1.12s\n",
      "3:\tlearn: 0.8365788\ttotal: 46.2ms\tremaining: 1.11s\n",
      "4:\tlearn: 0.7463767\ttotal: 57.7ms\tremaining: 1.1s\n",
      "5:\tlearn: 0.6680066\ttotal: 68.4ms\tremaining: 1.07s\n",
      "6:\tlearn: 0.6012963\ttotal: 80.1ms\tremaining: 1.06s\n",
      "7:\tlearn: 0.5448250\ttotal: 90.9ms\tremaining: 1.04s\n",
      "8:\tlearn: 0.4983802\ttotal: 101ms\tremaining: 1.02s\n",
      "9:\tlearn: 0.4560231\ttotal: 111ms\tremaining: 998ms\n",
      "10:\tlearn: 0.4167934\ttotal: 121ms\tremaining: 977ms\n",
      "11:\tlearn: 0.3846349\ttotal: 130ms\tremaining: 956ms\n",
      "12:\tlearn: 0.3560314\ttotal: 140ms\tremaining: 936ms\n",
      "13:\tlearn: 0.3318765\ttotal: 148ms\tremaining: 911ms\n",
      "14:\tlearn: 0.3085825\ttotal: 157ms\tremaining: 889ms\n",
      "15:\tlearn: 0.2874062\ttotal: 165ms\tremaining: 868ms\n",
      "16:\tlearn: 0.2695679\ttotal: 174ms\tremaining: 848ms\n",
      "17:\tlearn: 0.2519977\ttotal: 182ms\tremaining: 829ms\n",
      "18:\tlearn: 0.2363449\ttotal: 190ms\tremaining: 812ms\n",
      "19:\tlearn: 0.2212536\ttotal: 199ms\tremaining: 796ms\n",
      "20:\tlearn: 0.2094178\ttotal: 206ms\tremaining: 775ms\n",
      "21:\tlearn: 0.1985563\ttotal: 214ms\tremaining: 757ms\n",
      "22:\tlearn: 0.1879748\ttotal: 221ms\tremaining: 739ms\n",
      "23:\tlearn: 0.1788533\ttotal: 228ms\tremaining: 723ms\n",
      "24:\tlearn: 0.1691924\ttotal: 235ms\tremaining: 706ms\n",
      "25:\tlearn: 0.1602004\ttotal: 243ms\tremaining: 691ms\n",
      "26:\tlearn: 0.1536137\ttotal: 250ms\tremaining: 675ms\n",
      "27:\tlearn: 0.1464484\ttotal: 256ms\tremaining: 659ms\n",
      "28:\tlearn: 0.1411050\ttotal: 262ms\tremaining: 642ms\n",
      "29:\tlearn: 0.1358167\ttotal: 269ms\tremaining: 627ms\n",
      "30:\tlearn: 0.1310144\ttotal: 275ms\tremaining: 612ms\n",
      "31:\tlearn: 0.1257353\ttotal: 281ms\tremaining: 596ms\n",
      "32:\tlearn: 0.1210622\ttotal: 287ms\tremaining: 582ms\n",
      "33:\tlearn: 0.1169588\ttotal: 292ms\tremaining: 567ms\n",
      "34:\tlearn: 0.1125537\ttotal: 298ms\tremaining: 553ms\n",
      "35:\tlearn: 0.1078208\ttotal: 303ms\tremaining: 539ms\n",
      "36:\tlearn: 0.1046355\ttotal: 309ms\tremaining: 526ms\n",
      "37:\tlearn: 0.1016959\ttotal: 314ms\tremaining: 513ms\n",
      "38:\tlearn: 0.0994952\ttotal: 319ms\tremaining: 499ms\n",
      "39:\tlearn: 0.0965907\ttotal: 324ms\tremaining: 486ms\n",
      "40:\tlearn: 0.0946331\ttotal: 329ms\tremaining: 473ms\n",
      "41:\tlearn: 0.0927725\ttotal: 334ms\tremaining: 461ms\n",
      "42:\tlearn: 0.0896764\ttotal: 339ms\tremaining: 450ms\n",
      "43:\tlearn: 0.0885115\ttotal: 344ms\tremaining: 438ms\n",
      "44:\tlearn: 0.0863151\ttotal: 349ms\tremaining: 427ms\n",
      "45:\tlearn: 0.0839854\ttotal: 354ms\tremaining: 416ms\n",
      "46:\tlearn: 0.0828972\ttotal: 359ms\tremaining: 405ms\n",
      "47:\tlearn: 0.0811491\ttotal: 365ms\tremaining: 395ms\n",
      "48:\tlearn: 0.0799258\ttotal: 369ms\tremaining: 385ms\n",
      "49:\tlearn: 0.0781469\ttotal: 374ms\tremaining: 374ms\n",
      "50:\tlearn: 0.0754029\ttotal: 380ms\tremaining: 365ms\n",
      "51:\tlearn: 0.0741297\ttotal: 385ms\tremaining: 355ms\n",
      "52:\tlearn: 0.0723037\ttotal: 390ms\tremaining: 346ms\n",
      "53:\tlearn: 0.0713316\ttotal: 395ms\tremaining: 336ms\n",
      "54:\tlearn: 0.0702454\ttotal: 400ms\tremaining: 327ms\n",
      "55:\tlearn: 0.0691637\ttotal: 405ms\tremaining: 318ms\n",
      "56:\tlearn: 0.0672980\ttotal: 410ms\tremaining: 309ms\n",
      "57:\tlearn: 0.0658517\ttotal: 415ms\tremaining: 300ms\n",
      "58:\tlearn: 0.0645834\ttotal: 420ms\tremaining: 292ms\n",
      "59:\tlearn: 0.0633971\ttotal: 425ms\tremaining: 283ms\n",
      "60:\tlearn: 0.0624754\ttotal: 430ms\tremaining: 275ms\n",
      "61:\tlearn: 0.0617237\ttotal: 435ms\tremaining: 267ms\n",
      "62:\tlearn: 0.0603858\ttotal: 440ms\tremaining: 259ms\n",
      "63:\tlearn: 0.0597908\ttotal: 445ms\tremaining: 250ms\n",
      "64:\tlearn: 0.0587300\ttotal: 450ms\tremaining: 243ms\n",
      "65:\tlearn: 0.0579539\ttotal: 455ms\tremaining: 235ms\n",
      "66:\tlearn: 0.0569257\ttotal: 461ms\tremaining: 227ms\n",
      "67:\tlearn: 0.0565292\ttotal: 466ms\tremaining: 219ms\n",
      "68:\tlearn: 0.0554207\ttotal: 472ms\tremaining: 212ms\n",
      "69:\tlearn: 0.0545610\ttotal: 477ms\tremaining: 204ms\n",
      "70:\tlearn: 0.0539728\ttotal: 482ms\tremaining: 197ms\n",
      "71:\tlearn: 0.0530707\ttotal: 487ms\tremaining: 189ms\n",
      "72:\tlearn: 0.0521407\ttotal: 492ms\tremaining: 182ms\n",
      "73:\tlearn: 0.0518011\ttotal: 497ms\tremaining: 175ms\n",
      "74:\tlearn: 0.0515314\ttotal: 502ms\tremaining: 167ms\n",
      "75:\tlearn: 0.0509552\ttotal: 507ms\tremaining: 160ms\n",
      "76:\tlearn: 0.0497815\ttotal: 512ms\tremaining: 153ms\n",
      "77:\tlearn: 0.0489502\ttotal: 518ms\tremaining: 146ms\n",
      "78:\tlearn: 0.0482675\ttotal: 523ms\tremaining: 139ms\n",
      "79:\tlearn: 0.0477852\ttotal: 528ms\tremaining: 132ms\n",
      "80:\tlearn: 0.0471716\ttotal: 533ms\tremaining: 125ms\n",
      "81:\tlearn: 0.0463130\ttotal: 538ms\tremaining: 118ms\n",
      "82:\tlearn: 0.0456324\ttotal: 543ms\tremaining: 111ms\n",
      "83:\tlearn: 0.0450237\ttotal: 548ms\tremaining: 104ms\n",
      "84:\tlearn: 0.0442145\ttotal: 554ms\tremaining: 97.7ms\n",
      "85:\tlearn: 0.0439816\ttotal: 559ms\tremaining: 90.9ms\n",
      "86:\tlearn: 0.0434860\ttotal: 564ms\tremaining: 84.2ms\n",
      "87:\tlearn: 0.0432080\ttotal: 568ms\tremaining: 77.5ms\n",
      "88:\tlearn: 0.0422800\ttotal: 574ms\tremaining: 70.9ms\n",
      "89:\tlearn: 0.0417928\ttotal: 578ms\tremaining: 64.3ms\n",
      "90:\tlearn: 0.0414957\ttotal: 583ms\tremaining: 57.7ms\n",
      "91:\tlearn: 0.0412194\ttotal: 588ms\tremaining: 51.1ms\n",
      "92:\tlearn: 0.0407391\ttotal: 593ms\tremaining: 44.6ms\n",
      "93:\tlearn: 0.0405069\ttotal: 598ms\tremaining: 38.2ms\n",
      "94:\tlearn: 0.0400585\ttotal: 603ms\tremaining: 31.7ms\n",
      "95:\tlearn: 0.0397598\ttotal: 608ms\tremaining: 25.3ms\n",
      "96:\tlearn: 0.0394048\ttotal: 613ms\tremaining: 19ms\n",
      "97:\tlearn: 0.0389889\ttotal: 619ms\tremaining: 12.6ms\n",
      "98:\tlearn: 0.0388655\ttotal: 623ms\tremaining: 6.3ms\n",
      "99:\tlearn: 0.0383679\ttotal: 628ms\tremaining: 0us\n",
      "0:\tlearn: 1.3000014\ttotal: 13.9ms\tremaining: 1.37s\n",
      "1:\tlearn: 1.1002530\ttotal: 26.4ms\tremaining: 1.29s\n",
      "2:\tlearn: 0.9507943\ttotal: 38.2ms\tremaining: 1.23s\n",
      "3:\tlearn: 0.8343254\ttotal: 48.3ms\tremaining: 1.16s\n",
      "4:\tlearn: 0.7370960\ttotal: 58.9ms\tremaining: 1.12s\n",
      "5:\tlearn: 0.6584907\ttotal: 68.6ms\tremaining: 1.07s\n",
      "6:\tlearn: 0.5946739\ttotal: 79.5ms\tremaining: 1.06s\n",
      "7:\tlearn: 0.5406053\ttotal: 89.8ms\tremaining: 1.03s\n",
      "8:\tlearn: 0.4950860\ttotal: 99ms\tremaining: 1s\n",
      "9:\tlearn: 0.4511442\ttotal: 109ms\tremaining: 980ms\n",
      "10:\tlearn: 0.4125085\ttotal: 119ms\tremaining: 961ms\n",
      "11:\tlearn: 0.3800727\ttotal: 128ms\tremaining: 941ms\n",
      "12:\tlearn: 0.3543161\ttotal: 137ms\tremaining: 918ms\n",
      "13:\tlearn: 0.3297678\ttotal: 146ms\tremaining: 899ms\n",
      "14:\tlearn: 0.3072711\ttotal: 155ms\tremaining: 881ms\n",
      "15:\tlearn: 0.2868181\ttotal: 163ms\tremaining: 858ms\n",
      "16:\tlearn: 0.2693600\ttotal: 172ms\tremaining: 837ms\n",
      "17:\tlearn: 0.2518251\ttotal: 180ms\tremaining: 820ms\n",
      "18:\tlearn: 0.2365555\ttotal: 188ms\tremaining: 802ms\n",
      "19:\tlearn: 0.2219178\ttotal: 195ms\tremaining: 781ms\n",
      "20:\tlearn: 0.2096779\ttotal: 202ms\tremaining: 759ms\n",
      "21:\tlearn: 0.1988681\ttotal: 208ms\tremaining: 737ms\n",
      "22:\tlearn: 0.1878227\ttotal: 214ms\tremaining: 716ms\n",
      "23:\tlearn: 0.1785166\ttotal: 219ms\tremaining: 694ms\n",
      "24:\tlearn: 0.1686813\ttotal: 225ms\tremaining: 674ms\n",
      "25:\tlearn: 0.1604266\ttotal: 230ms\tremaining: 654ms\n",
      "26:\tlearn: 0.1522827\ttotal: 235ms\tremaining: 635ms\n",
      "27:\tlearn: 0.1451249\ttotal: 240ms\tremaining: 617ms\n",
      "28:\tlearn: 0.1385479\ttotal: 246ms\tremaining: 601ms\n",
      "29:\tlearn: 0.1332015\ttotal: 251ms\tremaining: 585ms\n",
      "30:\tlearn: 0.1277101\ttotal: 256ms\tremaining: 569ms\n",
      "31:\tlearn: 0.1221042\ttotal: 261ms\tremaining: 555ms\n",
      "32:\tlearn: 0.1178426\ttotal: 266ms\tremaining: 541ms\n",
      "33:\tlearn: 0.1142066\ttotal: 271ms\tremaining: 526ms\n",
      "34:\tlearn: 0.1100530\ttotal: 277ms\tremaining: 514ms\n",
      "35:\tlearn: 0.1064824\ttotal: 282ms\tremaining: 501ms\n",
      "36:\tlearn: 0.1032355\ttotal: 287ms\tremaining: 489ms\n",
      "37:\tlearn: 0.1006910\ttotal: 292ms\tremaining: 476ms\n",
      "38:\tlearn: 0.0977044\ttotal: 297ms\tremaining: 465ms\n",
      "39:\tlearn: 0.0945489\ttotal: 303ms\tremaining: 454ms\n",
      "40:\tlearn: 0.0919553\ttotal: 308ms\tremaining: 443ms\n",
      "41:\tlearn: 0.0900428\ttotal: 313ms\tremaining: 432ms\n",
      "42:\tlearn: 0.0879659\ttotal: 318ms\tremaining: 422ms\n",
      "43:\tlearn: 0.0850962\ttotal: 324ms\tremaining: 412ms\n",
      "44:\tlearn: 0.0829339\ttotal: 328ms\tremaining: 401ms\n",
      "45:\tlearn: 0.0813890\ttotal: 333ms\tremaining: 391ms\n",
      "46:\tlearn: 0.0794551\ttotal: 339ms\tremaining: 382ms\n",
      "47:\tlearn: 0.0779972\ttotal: 344ms\tremaining: 372ms\n",
      "48:\tlearn: 0.0761866\ttotal: 348ms\tremaining: 363ms\n",
      "49:\tlearn: 0.0749013\ttotal: 353ms\tremaining: 353ms\n",
      "50:\tlearn: 0.0731159\ttotal: 359ms\tremaining: 345ms\n",
      "51:\tlearn: 0.0715305\ttotal: 364ms\tremaining: 336ms\n",
      "52:\tlearn: 0.0700290\ttotal: 369ms\tremaining: 327ms\n",
      "53:\tlearn: 0.0684094\ttotal: 374ms\tremaining: 319ms\n",
      "54:\tlearn: 0.0677788\ttotal: 379ms\tremaining: 310ms\n",
      "55:\tlearn: 0.0661276\ttotal: 384ms\tremaining: 302ms\n",
      "56:\tlearn: 0.0650834\ttotal: 389ms\tremaining: 294ms\n",
      "57:\tlearn: 0.0641690\ttotal: 394ms\tremaining: 285ms\n",
      "58:\tlearn: 0.0636450\ttotal: 399ms\tremaining: 277ms\n",
      "59:\tlearn: 0.0630361\ttotal: 403ms\tremaining: 269ms\n",
      "60:\tlearn: 0.0622924\ttotal: 409ms\tremaining: 261ms\n",
      "61:\tlearn: 0.0613617\ttotal: 414ms\tremaining: 254ms\n",
      "62:\tlearn: 0.0596469\ttotal: 419ms\tremaining: 246ms\n",
      "63:\tlearn: 0.0583361\ttotal: 424ms\tremaining: 239ms\n",
      "64:\tlearn: 0.0577773\ttotal: 429ms\tremaining: 231ms\n",
      "65:\tlearn: 0.0568724\ttotal: 434ms\tremaining: 224ms\n",
      "66:\tlearn: 0.0558772\ttotal: 440ms\tremaining: 217ms\n",
      "67:\tlearn: 0.0549415\ttotal: 445ms\tremaining: 209ms\n",
      "68:\tlearn: 0.0537288\ttotal: 451ms\tremaining: 202ms\n",
      "69:\tlearn: 0.0528260\ttotal: 456ms\tremaining: 195ms\n",
      "70:\tlearn: 0.0520310\ttotal: 461ms\tremaining: 188ms\n",
      "71:\tlearn: 0.0515132\ttotal: 466ms\tremaining: 181ms\n",
      "72:\tlearn: 0.0508891\ttotal: 471ms\tremaining: 174ms\n",
      "73:\tlearn: 0.0505546\ttotal: 476ms\tremaining: 167ms\n",
      "74:\tlearn: 0.0499466\ttotal: 481ms\tremaining: 160ms\n",
      "75:\tlearn: 0.0493061\ttotal: 486ms\tremaining: 153ms\n",
      "76:\tlearn: 0.0487806\ttotal: 491ms\tremaining: 147ms\n",
      "77:\tlearn: 0.0480827\ttotal: 496ms\tremaining: 140ms\n",
      "78:\tlearn: 0.0475887\ttotal: 501ms\tremaining: 133ms\n",
      "79:\tlearn: 0.0473397\ttotal: 506ms\tremaining: 127ms\n",
      "80:\tlearn: 0.0467376\ttotal: 511ms\tremaining: 120ms\n",
      "81:\tlearn: 0.0465306\ttotal: 516ms\tremaining: 113ms\n",
      "82:\tlearn: 0.0461693\ttotal: 521ms\tremaining: 107ms\n",
      "83:\tlearn: 0.0454227\ttotal: 526ms\tremaining: 100ms\n",
      "84:\tlearn: 0.0446009\ttotal: 531ms\tremaining: 93.7ms\n",
      "85:\tlearn: 0.0442262\ttotal: 536ms\tremaining: 87.3ms\n",
      "86:\tlearn: 0.0439536\ttotal: 541ms\tremaining: 80.8ms\n",
      "87:\tlearn: 0.0435572\ttotal: 546ms\tremaining: 74.5ms\n",
      "88:\tlearn: 0.0433667\ttotal: 551ms\tremaining: 68.1ms\n",
      "89:\tlearn: 0.0428680\ttotal: 556ms\tremaining: 61.8ms\n",
      "90:\tlearn: 0.0426198\ttotal: 562ms\tremaining: 55.5ms\n",
      "91:\tlearn: 0.0423903\ttotal: 566ms\tremaining: 49.3ms\n",
      "92:\tlearn: 0.0418732\ttotal: 572ms\tremaining: 43ms\n",
      "93:\tlearn: 0.0415323\ttotal: 577ms\tremaining: 36.8ms\n",
      "94:\tlearn: 0.0410936\ttotal: 582ms\tremaining: 30.6ms\n",
      "95:\tlearn: 0.0406394\ttotal: 587ms\tremaining: 24.5ms\n",
      "96:\tlearn: 0.0402960\ttotal: 592ms\tremaining: 18.3ms\n",
      "97:\tlearn: 0.0400511\ttotal: 597ms\tremaining: 12.2ms\n",
      "98:\tlearn: 0.0394584\ttotal: 603ms\tremaining: 6.09ms\n",
      "99:\tlearn: 0.0389144\ttotal: 608ms\tremaining: 0us\n",
      "0:\tlearn: 1.2983908\ttotal: 13.9ms\tremaining: 1.38s\n",
      "1:\tlearn: 1.1004775\ttotal: 26.5ms\tremaining: 1.3s\n",
      "2:\tlearn: 0.9530042\ttotal: 39.5ms\tremaining: 1.28s\n",
      "3:\tlearn: 0.8369179\ttotal: 50.1ms\tremaining: 1.2s\n",
      "4:\tlearn: 0.7447126\ttotal: 60.6ms\tremaining: 1.15s\n",
      "5:\tlearn: 0.6663901\ttotal: 71.2ms\tremaining: 1.12s\n",
      "6:\tlearn: 0.6024168\ttotal: 81.2ms\tremaining: 1.08s\n",
      "7:\tlearn: 0.5474738\ttotal: 90.3ms\tremaining: 1.04s\n",
      "8:\tlearn: 0.5034879\ttotal: 98.9ms\tremaining: 1000ms\n",
      "9:\tlearn: 0.4609726\ttotal: 108ms\tremaining: 976ms\n",
      "10:\tlearn: 0.4217793\ttotal: 117ms\tremaining: 949ms\n",
      "11:\tlearn: 0.3879790\ttotal: 125ms\tremaining: 920ms\n",
      "12:\tlearn: 0.3590455\ttotal: 133ms\tremaining: 892ms\n",
      "13:\tlearn: 0.3337166\ttotal: 141ms\tremaining: 865ms\n",
      "14:\tlearn: 0.3094861\ttotal: 148ms\tremaining: 839ms\n",
      "15:\tlearn: 0.2877630\ttotal: 155ms\tremaining: 815ms\n",
      "16:\tlearn: 0.2694124\ttotal: 162ms\tremaining: 789ms\n",
      "17:\tlearn: 0.2522861\ttotal: 169ms\tremaining: 768ms\n",
      "18:\tlearn: 0.2365852\ttotal: 175ms\tremaining: 746ms\n",
      "19:\tlearn: 0.2231343\ttotal: 181ms\tremaining: 724ms\n",
      "20:\tlearn: 0.2112154\ttotal: 187ms\tremaining: 704ms\n",
      "21:\tlearn: 0.2002040\ttotal: 193ms\tremaining: 684ms\n",
      "22:\tlearn: 0.1892879\ttotal: 198ms\tremaining: 664ms\n",
      "23:\tlearn: 0.1792403\ttotal: 204ms\tremaining: 647ms\n",
      "24:\tlearn: 0.1710293\ttotal: 210ms\tremaining: 629ms\n",
      "25:\tlearn: 0.1619723\ttotal: 215ms\tremaining: 613ms\n",
      "26:\tlearn: 0.1552618\ttotal: 221ms\tremaining: 596ms\n",
      "27:\tlearn: 0.1480064\ttotal: 226ms\tremaining: 582ms\n",
      "28:\tlearn: 0.1414100\ttotal: 231ms\tremaining: 567ms\n",
      "29:\tlearn: 0.1354706\ttotal: 237ms\tremaining: 552ms\n",
      "30:\tlearn: 0.1302511\ttotal: 242ms\tremaining: 539ms\n",
      "31:\tlearn: 0.1250010\ttotal: 247ms\tremaining: 526ms\n",
      "32:\tlearn: 0.1205861\ttotal: 253ms\tremaining: 513ms\n",
      "33:\tlearn: 0.1169173\ttotal: 258ms\tremaining: 500ms\n",
      "34:\tlearn: 0.1132807\ttotal: 263ms\tremaining: 488ms\n",
      "35:\tlearn: 0.1089734\ttotal: 268ms\tremaining: 476ms\n",
      "36:\tlearn: 0.1053511\ttotal: 273ms\tremaining: 465ms\n",
      "37:\tlearn: 0.1030614\ttotal: 278ms\tremaining: 454ms\n",
      "38:\tlearn: 0.0999393\ttotal: 283ms\tremaining: 443ms\n",
      "39:\tlearn: 0.0965938\ttotal: 289ms\tremaining: 433ms\n",
      "40:\tlearn: 0.0942206\ttotal: 294ms\tremaining: 422ms\n",
      "41:\tlearn: 0.0922459\ttotal: 299ms\tremaining: 412ms\n",
      "42:\tlearn: 0.0899088\ttotal: 304ms\tremaining: 403ms\n",
      "43:\tlearn: 0.0880054\ttotal: 309ms\tremaining: 393ms\n",
      "44:\tlearn: 0.0860686\ttotal: 314ms\tremaining: 384ms\n",
      "45:\tlearn: 0.0840607\ttotal: 321ms\tremaining: 377ms\n",
      "46:\tlearn: 0.0810980\ttotal: 327ms\tremaining: 368ms\n",
      "47:\tlearn: 0.0799069\ttotal: 332ms\tremaining: 359ms\n",
      "48:\tlearn: 0.0780828\ttotal: 337ms\tremaining: 350ms\n",
      "49:\tlearn: 0.0767938\ttotal: 342ms\tremaining: 342ms\n",
      "50:\tlearn: 0.0750061\ttotal: 347ms\tremaining: 334ms\n",
      "51:\tlearn: 0.0733780\ttotal: 352ms\tremaining: 325ms\n",
      "52:\tlearn: 0.0725627\ttotal: 357ms\tremaining: 317ms\n",
      "53:\tlearn: 0.0708646\ttotal: 362ms\tremaining: 309ms\n",
      "54:\tlearn: 0.0692932\ttotal: 367ms\tremaining: 301ms\n",
      "55:\tlearn: 0.0682995\ttotal: 372ms\tremaining: 293ms\n",
      "56:\tlearn: 0.0672457\ttotal: 377ms\tremaining: 285ms\n",
      "57:\tlearn: 0.0663705\ttotal: 382ms\tremaining: 277ms\n",
      "58:\tlearn: 0.0653557\ttotal: 388ms\tremaining: 269ms\n",
      "59:\tlearn: 0.0643175\ttotal: 393ms\tremaining: 262ms\n",
      "60:\tlearn: 0.0636964\ttotal: 397ms\tremaining: 254ms\n",
      "61:\tlearn: 0.0622691\ttotal: 403ms\tremaining: 247ms\n",
      "62:\tlearn: 0.0618778\ttotal: 407ms\tremaining: 239ms\n",
      "63:\tlearn: 0.0602343\ttotal: 413ms\tremaining: 232ms\n",
      "64:\tlearn: 0.0590178\ttotal: 418ms\tremaining: 225ms\n",
      "65:\tlearn: 0.0580887\ttotal: 423ms\tremaining: 218ms\n",
      "66:\tlearn: 0.0570186\ttotal: 428ms\tremaining: 211ms\n",
      "67:\tlearn: 0.0563125\ttotal: 433ms\tremaining: 204ms\n",
      "68:\tlearn: 0.0558330\ttotal: 438ms\tremaining: 197ms\n",
      "69:\tlearn: 0.0552368\ttotal: 443ms\tremaining: 190ms\n",
      "70:\tlearn: 0.0543404\ttotal: 448ms\tremaining: 183ms\n",
      "71:\tlearn: 0.0537178\ttotal: 453ms\tremaining: 176ms\n",
      "72:\tlearn: 0.0528102\ttotal: 458ms\tremaining: 169ms\n",
      "73:\tlearn: 0.0525662\ttotal: 463ms\tremaining: 163ms\n",
      "74:\tlearn: 0.0523548\ttotal: 468ms\tremaining: 156ms\n",
      "75:\tlearn: 0.0514451\ttotal: 473ms\tremaining: 149ms\n",
      "76:\tlearn: 0.0510548\ttotal: 478ms\tremaining: 143ms\n",
      "77:\tlearn: 0.0508563\ttotal: 482ms\tremaining: 136ms\n",
      "78:\tlearn: 0.0504352\ttotal: 487ms\tremaining: 130ms\n",
      "79:\tlearn: 0.0496253\ttotal: 493ms\tremaining: 123ms\n",
      "80:\tlearn: 0.0488785\ttotal: 498ms\tremaining: 117ms\n",
      "81:\tlearn: 0.0479517\ttotal: 503ms\tremaining: 110ms\n",
      "82:\tlearn: 0.0475789\ttotal: 508ms\tremaining: 104ms\n",
      "83:\tlearn: 0.0471829\ttotal: 513ms\tremaining: 97.6ms\n",
      "84:\tlearn: 0.0463503\ttotal: 519ms\tremaining: 91.5ms\n",
      "85:\tlearn: 0.0457777\ttotal: 524ms\tremaining: 85.3ms\n",
      "86:\tlearn: 0.0455710\ttotal: 529ms\tremaining: 79ms\n",
      "87:\tlearn: 0.0452793\ttotal: 533ms\tremaining: 72.7ms\n",
      "88:\tlearn: 0.0449044\ttotal: 538ms\tremaining: 66.5ms\n",
      "89:\tlearn: 0.0442869\ttotal: 544ms\tremaining: 60.4ms\n",
      "90:\tlearn: 0.0437384\ttotal: 549ms\tremaining: 54.3ms\n",
      "91:\tlearn: 0.0434459\ttotal: 554ms\tremaining: 48.1ms\n",
      "92:\tlearn: 0.0427900\ttotal: 559ms\tremaining: 42.1ms\n",
      "93:\tlearn: 0.0420099\ttotal: 565ms\tremaining: 36ms\n",
      "94:\tlearn: 0.0417197\ttotal: 570ms\tremaining: 30ms\n",
      "95:\tlearn: 0.0414802\ttotal: 574ms\tremaining: 23.9ms\n",
      "96:\tlearn: 0.0409599\ttotal: 580ms\tremaining: 17.9ms\n",
      "97:\tlearn: 0.0404939\ttotal: 585ms\tremaining: 11.9ms\n",
      "98:\tlearn: 0.0401792\ttotal: 590ms\tremaining: 5.96ms\n",
      "99:\tlearn: 0.0396522\ttotal: 595ms\tremaining: 0us\n",
      "0:\tlearn: 1.3035407\ttotal: 16.8ms\tremaining: 1.67s\n",
      "1:\tlearn: 1.1056365\ttotal: 28.2ms\tremaining: 1.38s\n",
      "2:\tlearn: 0.9540143\ttotal: 39.2ms\tremaining: 1.27s\n",
      "3:\tlearn: 0.8379652\ttotal: 49.9ms\tremaining: 1.2s\n",
      "4:\tlearn: 0.7404972\ttotal: 60.4ms\tremaining: 1.15s\n",
      "5:\tlearn: 0.6621897\ttotal: 71.8ms\tremaining: 1.12s\n",
      "6:\tlearn: 0.5967794\ttotal: 82ms\tremaining: 1.09s\n",
      "7:\tlearn: 0.5395245\ttotal: 92.3ms\tremaining: 1.06s\n",
      "8:\tlearn: 0.4960816\ttotal: 101ms\tremaining: 1.02s\n",
      "9:\tlearn: 0.4547781\ttotal: 110ms\tremaining: 992ms\n",
      "10:\tlearn: 0.4156293\ttotal: 120ms\tremaining: 969ms\n",
      "11:\tlearn: 0.3822635\ttotal: 128ms\tremaining: 937ms\n",
      "12:\tlearn: 0.3553785\ttotal: 136ms\tremaining: 913ms\n",
      "13:\tlearn: 0.3308624\ttotal: 145ms\tremaining: 893ms\n",
      "14:\tlearn: 0.3078681\ttotal: 154ms\tremaining: 871ms\n",
      "15:\tlearn: 0.2855385\ttotal: 162ms\tremaining: 849ms\n",
      "16:\tlearn: 0.2662229\ttotal: 169ms\tremaining: 826ms\n",
      "17:\tlearn: 0.2487692\ttotal: 177ms\tremaining: 805ms\n",
      "18:\tlearn: 0.2331273\ttotal: 184ms\tremaining: 785ms\n",
      "19:\tlearn: 0.2198022\ttotal: 191ms\tremaining: 765ms\n",
      "20:\tlearn: 0.2072893\ttotal: 198ms\tremaining: 744ms\n",
      "21:\tlearn: 0.1966966\ttotal: 204ms\tremaining: 724ms\n",
      "22:\tlearn: 0.1858341\ttotal: 211ms\tremaining: 706ms\n",
      "23:\tlearn: 0.1768360\ttotal: 218ms\tremaining: 689ms\n",
      "24:\tlearn: 0.1671692\ttotal: 224ms\tremaining: 672ms\n",
      "25:\tlearn: 0.1587483\ttotal: 231ms\tremaining: 656ms\n",
      "26:\tlearn: 0.1522188\ttotal: 237ms\tremaining: 640ms\n",
      "27:\tlearn: 0.1455288\ttotal: 243ms\tremaining: 625ms\n",
      "28:\tlearn: 0.1387990\ttotal: 249ms\tremaining: 608ms\n",
      "29:\tlearn: 0.1330607\ttotal: 254ms\tremaining: 593ms\n",
      "30:\tlearn: 0.1279924\ttotal: 261ms\tremaining: 580ms\n",
      "31:\tlearn: 0.1233014\ttotal: 266ms\tremaining: 565ms\n",
      "32:\tlearn: 0.1191959\ttotal: 271ms\tremaining: 551ms\n",
      "33:\tlearn: 0.1146779\ttotal: 277ms\tremaining: 538ms\n",
      "34:\tlearn: 0.1101941\ttotal: 282ms\tremaining: 524ms\n",
      "35:\tlearn: 0.1060510\ttotal: 287ms\tremaining: 511ms\n",
      "36:\tlearn: 0.1025327\ttotal: 293ms\tremaining: 498ms\n",
      "37:\tlearn: 0.1002423\ttotal: 298ms\tremaining: 486ms\n",
      "38:\tlearn: 0.0968066\ttotal: 303ms\tremaining: 474ms\n",
      "39:\tlearn: 0.0943039\ttotal: 308ms\tremaining: 462ms\n",
      "40:\tlearn: 0.0927668\ttotal: 313ms\tremaining: 451ms\n",
      "41:\tlearn: 0.0901523\ttotal: 318ms\tremaining: 440ms\n",
      "42:\tlearn: 0.0879134\ttotal: 323ms\tremaining: 428ms\n",
      "43:\tlearn: 0.0861131\ttotal: 328ms\tremaining: 418ms\n",
      "44:\tlearn: 0.0834930\ttotal: 333ms\tremaining: 407ms\n",
      "45:\tlearn: 0.0810976\ttotal: 338ms\tremaining: 397ms\n",
      "46:\tlearn: 0.0786828\ttotal: 344ms\tremaining: 388ms\n",
      "47:\tlearn: 0.0769286\ttotal: 349ms\tremaining: 378ms\n",
      "48:\tlearn: 0.0751506\ttotal: 354ms\tremaining: 368ms\n",
      "49:\tlearn: 0.0742499\ttotal: 359ms\tremaining: 359ms\n",
      "50:\tlearn: 0.0721131\ttotal: 365ms\tremaining: 350ms\n",
      "51:\tlearn: 0.0710457\ttotal: 369ms\tremaining: 341ms\n",
      "52:\tlearn: 0.0698233\ttotal: 374ms\tremaining: 332ms\n",
      "53:\tlearn: 0.0687077\ttotal: 379ms\tremaining: 323ms\n",
      "54:\tlearn: 0.0681473\ttotal: 384ms\tremaining: 314ms\n",
      "55:\tlearn: 0.0668307\ttotal: 389ms\tremaining: 306ms\n",
      "56:\tlearn: 0.0656062\ttotal: 394ms\tremaining: 297ms\n",
      "57:\tlearn: 0.0650888\ttotal: 399ms\tremaining: 289ms\n",
      "58:\tlearn: 0.0638640\ttotal: 404ms\tremaining: 281ms\n",
      "59:\tlearn: 0.0619660\ttotal: 409ms\tremaining: 273ms\n",
      "60:\tlearn: 0.0615182\ttotal: 414ms\tremaining: 265ms\n",
      "61:\tlearn: 0.0602862\ttotal: 419ms\tremaining: 257ms\n",
      "62:\tlearn: 0.0596098\ttotal: 424ms\tremaining: 249ms\n",
      "63:\tlearn: 0.0590786\ttotal: 429ms\tremaining: 241ms\n",
      "64:\tlearn: 0.0581224\ttotal: 434ms\tremaining: 234ms\n",
      "65:\tlearn: 0.0571573\ttotal: 439ms\tremaining: 226ms\n",
      "66:\tlearn: 0.0557292\ttotal: 444ms\tremaining: 219ms\n",
      "67:\tlearn: 0.0550247\ttotal: 449ms\tremaining: 212ms\n",
      "68:\tlearn: 0.0541349\ttotal: 455ms\tremaining: 204ms\n",
      "69:\tlearn: 0.0533188\ttotal: 460ms\tremaining: 197ms\n",
      "70:\tlearn: 0.0529854\ttotal: 464ms\tremaining: 190ms\n",
      "71:\tlearn: 0.0525022\ttotal: 469ms\tremaining: 183ms\n",
      "72:\tlearn: 0.0520467\ttotal: 474ms\tremaining: 175ms\n",
      "73:\tlearn: 0.0516421\ttotal: 479ms\tremaining: 168ms\n",
      "74:\tlearn: 0.0508738\ttotal: 484ms\tremaining: 161ms\n",
      "75:\tlearn: 0.0505382\ttotal: 489ms\tremaining: 154ms\n",
      "76:\tlearn: 0.0495895\ttotal: 494ms\tremaining: 148ms\n",
      "77:\tlearn: 0.0485120\ttotal: 500ms\tremaining: 141ms\n",
      "78:\tlearn: 0.0478764\ttotal: 505ms\tremaining: 134ms\n",
      "79:\tlearn: 0.0471464\ttotal: 510ms\tremaining: 128ms\n",
      "80:\tlearn: 0.0462783\ttotal: 515ms\tremaining: 121ms\n",
      "81:\tlearn: 0.0456758\ttotal: 521ms\tremaining: 114ms\n",
      "82:\tlearn: 0.0454191\ttotal: 525ms\tremaining: 108ms\n",
      "83:\tlearn: 0.0446539\ttotal: 531ms\tremaining: 101ms\n",
      "84:\tlearn: 0.0437605\ttotal: 536ms\tremaining: 94.6ms\n",
      "85:\tlearn: 0.0432198\ttotal: 541ms\tremaining: 88.1ms\n",
      "86:\tlearn: 0.0427796\ttotal: 546ms\tremaining: 81.6ms\n",
      "87:\tlearn: 0.0422599\ttotal: 551ms\tremaining: 75.2ms\n",
      "88:\tlearn: 0.0419824\ttotal: 556ms\tremaining: 68.7ms\n",
      "89:\tlearn: 0.0416288\ttotal: 561ms\tremaining: 62.3ms\n",
      "90:\tlearn: 0.0413712\ttotal: 566ms\tremaining: 56ms\n",
      "91:\tlearn: 0.0409877\ttotal: 571ms\tremaining: 49.6ms\n",
      "92:\tlearn: 0.0403436\ttotal: 576ms\tremaining: 43.4ms\n",
      "93:\tlearn: 0.0399344\ttotal: 581ms\tremaining: 37.1ms\n",
      "94:\tlearn: 0.0393953\ttotal: 587ms\tremaining: 30.9ms\n",
      "95:\tlearn: 0.0392568\ttotal: 592ms\tremaining: 24.7ms\n",
      "96:\tlearn: 0.0388676\ttotal: 597ms\tremaining: 18.4ms\n",
      "97:\tlearn: 0.0383442\ttotal: 602ms\tremaining: 12.3ms\n",
      "98:\tlearn: 0.0379855\ttotal: 607ms\tremaining: 6.13ms\n",
      "99:\tlearn: 0.0373811\ttotal: 613ms\tremaining: 0us\n",
      "0:\tlearn: 1.3013281\ttotal: 12.5ms\tremaining: 1.23s\n",
      "1:\tlearn: 1.1020214\ttotal: 27.8ms\tremaining: 1.36s\n",
      "2:\tlearn: 0.9530037\ttotal: 40.4ms\tremaining: 1.3s\n",
      "3:\tlearn: 0.8369038\ttotal: 52.4ms\tremaining: 1.26s\n",
      "4:\tlearn: 0.7400899\ttotal: 63.6ms\tremaining: 1.21s\n",
      "5:\tlearn: 0.6616488\ttotal: 75ms\tremaining: 1.17s\n",
      "6:\tlearn: 0.5957324\ttotal: 85.2ms\tremaining: 1.13s\n",
      "7:\tlearn: 0.5412342\ttotal: 96.1ms\tremaining: 1.1s\n",
      "8:\tlearn: 0.4931821\ttotal: 106ms\tremaining: 1.07s\n",
      "9:\tlearn: 0.4532324\ttotal: 115ms\tremaining: 1.04s\n",
      "10:\tlearn: 0.4152293\ttotal: 126ms\tremaining: 1.01s\n",
      "11:\tlearn: 0.3828405\ttotal: 135ms\tremaining: 989ms\n",
      "12:\tlearn: 0.3564095\ttotal: 145ms\tremaining: 970ms\n",
      "13:\tlearn: 0.3321373\ttotal: 154ms\tremaining: 949ms\n",
      "14:\tlearn: 0.3089436\ttotal: 163ms\tremaining: 921ms\n",
      "15:\tlearn: 0.2876267\ttotal: 171ms\tremaining: 897ms\n",
      "16:\tlearn: 0.2698041\ttotal: 179ms\tremaining: 876ms\n",
      "17:\tlearn: 0.2535339\ttotal: 187ms\tremaining: 854ms\n",
      "18:\tlearn: 0.2377113\ttotal: 196ms\tremaining: 835ms\n",
      "19:\tlearn: 0.2233187\ttotal: 203ms\tremaining: 813ms\n",
      "20:\tlearn: 0.2105809\ttotal: 211ms\tremaining: 793ms\n",
      "21:\tlearn: 0.1998762\ttotal: 219ms\tremaining: 775ms\n",
      "22:\tlearn: 0.1891563\ttotal: 226ms\tremaining: 756ms\n",
      "23:\tlearn: 0.1801174\ttotal: 233ms\tremaining: 738ms\n",
      "24:\tlearn: 0.1705146\ttotal: 240ms\tremaining: 720ms\n",
      "25:\tlearn: 0.1628456\ttotal: 247ms\tremaining: 704ms\n",
      "26:\tlearn: 0.1551059\ttotal: 254ms\tremaining: 686ms\n",
      "27:\tlearn: 0.1490898\ttotal: 260ms\tremaining: 668ms\n",
      "28:\tlearn: 0.1423199\ttotal: 266ms\tremaining: 651ms\n",
      "29:\tlearn: 0.1360998\ttotal: 272ms\tremaining: 634ms\n",
      "30:\tlearn: 0.1320748\ttotal: 278ms\tremaining: 618ms\n",
      "31:\tlearn: 0.1269848\ttotal: 283ms\tremaining: 602ms\n",
      "32:\tlearn: 0.1222518\ttotal: 289ms\tremaining: 586ms\n",
      "33:\tlearn: 0.1189371\ttotal: 294ms\tremaining: 571ms\n",
      "34:\tlearn: 0.1151489\ttotal: 299ms\tremaining: 556ms\n",
      "35:\tlearn: 0.1113987\ttotal: 304ms\tremaining: 541ms\n",
      "36:\tlearn: 0.1087176\ttotal: 310ms\tremaining: 527ms\n",
      "37:\tlearn: 0.1057177\ttotal: 315ms\tremaining: 514ms\n",
      "38:\tlearn: 0.1022327\ttotal: 320ms\tremaining: 501ms\n",
      "39:\tlearn: 0.0989065\ttotal: 325ms\tremaining: 488ms\n",
      "40:\tlearn: 0.0970214\ttotal: 330ms\tremaining: 475ms\n",
      "41:\tlearn: 0.0950968\ttotal: 335ms\tremaining: 463ms\n",
      "42:\tlearn: 0.0929241\ttotal: 340ms\tremaining: 451ms\n",
      "43:\tlearn: 0.0901809\ttotal: 346ms\tremaining: 440ms\n",
      "44:\tlearn: 0.0873958\ttotal: 351ms\tremaining: 429ms\n",
      "45:\tlearn: 0.0856936\ttotal: 356ms\tremaining: 418ms\n",
      "46:\tlearn: 0.0842616\ttotal: 361ms\tremaining: 407ms\n",
      "47:\tlearn: 0.0834316\ttotal: 366ms\tremaining: 397ms\n",
      "48:\tlearn: 0.0813445\ttotal: 371ms\tremaining: 386ms\n",
      "49:\tlearn: 0.0794166\ttotal: 376ms\tremaining: 376ms\n",
      "50:\tlearn: 0.0779321\ttotal: 381ms\tremaining: 366ms\n",
      "51:\tlearn: 0.0770619\ttotal: 386ms\tremaining: 356ms\n",
      "52:\tlearn: 0.0760621\ttotal: 391ms\tremaining: 347ms\n",
      "53:\tlearn: 0.0743994\ttotal: 396ms\tremaining: 338ms\n",
      "54:\tlearn: 0.0735801\ttotal: 401ms\tremaining: 328ms\n",
      "55:\tlearn: 0.0721559\ttotal: 407ms\tremaining: 320ms\n",
      "56:\tlearn: 0.0704931\ttotal: 412ms\tremaining: 311ms\n",
      "57:\tlearn: 0.0697269\ttotal: 416ms\tremaining: 301ms\n",
      "58:\tlearn: 0.0684049\ttotal: 421ms\tremaining: 293ms\n",
      "59:\tlearn: 0.0675116\ttotal: 426ms\tremaining: 284ms\n",
      "60:\tlearn: 0.0657122\ttotal: 431ms\tremaining: 276ms\n",
      "61:\tlearn: 0.0644648\ttotal: 436ms\tremaining: 267ms\n",
      "62:\tlearn: 0.0630051\ttotal: 442ms\tremaining: 259ms\n",
      "63:\tlearn: 0.0613355\ttotal: 447ms\tremaining: 252ms\n",
      "64:\tlearn: 0.0608689\ttotal: 452ms\tremaining: 244ms\n",
      "65:\tlearn: 0.0601112\ttotal: 457ms\tremaining: 236ms\n",
      "66:\tlearn: 0.0589244\ttotal: 463ms\tremaining: 228ms\n",
      "67:\tlearn: 0.0577362\ttotal: 469ms\tremaining: 220ms\n",
      "68:\tlearn: 0.0567772\ttotal: 474ms\tremaining: 213ms\n",
      "69:\tlearn: 0.0562584\ttotal: 479ms\tremaining: 205ms\n",
      "70:\tlearn: 0.0553120\ttotal: 484ms\tremaining: 198ms\n",
      "71:\tlearn: 0.0542440\ttotal: 490ms\tremaining: 190ms\n",
      "72:\tlearn: 0.0537328\ttotal: 495ms\tremaining: 183ms\n",
      "73:\tlearn: 0.0532673\ttotal: 500ms\tremaining: 176ms\n",
      "74:\tlearn: 0.0530113\ttotal: 505ms\tremaining: 168ms\n",
      "75:\tlearn: 0.0521820\ttotal: 510ms\tremaining: 161ms\n",
      "76:\tlearn: 0.0511508\ttotal: 515ms\tremaining: 154ms\n",
      "77:\tlearn: 0.0506426\ttotal: 520ms\tremaining: 147ms\n",
      "78:\tlearn: 0.0497969\ttotal: 525ms\tremaining: 140ms\n",
      "79:\tlearn: 0.0493162\ttotal: 530ms\tremaining: 132ms\n",
      "80:\tlearn: 0.0486540\ttotal: 535ms\tremaining: 125ms\n",
      "81:\tlearn: 0.0481200\ttotal: 539ms\tremaining: 118ms\n",
      "82:\tlearn: 0.0478617\ttotal: 544ms\tremaining: 112ms\n",
      "83:\tlearn: 0.0472459\ttotal: 550ms\tremaining: 105ms\n",
      "84:\tlearn: 0.0464226\ttotal: 555ms\tremaining: 98ms\n",
      "85:\tlearn: 0.0458612\ttotal: 560ms\tremaining: 91.2ms\n",
      "86:\tlearn: 0.0456585\ttotal: 565ms\tremaining: 84.4ms\n",
      "87:\tlearn: 0.0451863\ttotal: 570ms\tremaining: 77.7ms\n",
      "88:\tlearn: 0.0446940\ttotal: 575ms\tremaining: 71ms\n",
      "89:\tlearn: 0.0439534\ttotal: 580ms\tremaining: 64.5ms\n",
      "90:\tlearn: 0.0435984\ttotal: 585ms\tremaining: 57.8ms\n",
      "91:\tlearn: 0.0432018\ttotal: 590ms\tremaining: 51.3ms\n",
      "92:\tlearn: 0.0426006\ttotal: 595ms\tremaining: 44.8ms\n",
      "93:\tlearn: 0.0423488\ttotal: 600ms\tremaining: 38.3ms\n",
      "94:\tlearn: 0.0416246\ttotal: 606ms\tremaining: 31.9ms\n",
      "95:\tlearn: 0.0413992\ttotal: 611ms\tremaining: 25.4ms\n",
      "96:\tlearn: 0.0408579\ttotal: 616ms\tremaining: 19ms\n",
      "97:\tlearn: 0.0403457\ttotal: 621ms\tremaining: 12.7ms\n",
      "98:\tlearn: 0.0400208\ttotal: 626ms\tremaining: 6.32ms\n",
      "99:\tlearn: 0.0398460\ttotal: 631ms\tremaining: 0us\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6923.0    29.0    12.0    1.0  0.0\n",
      "1.0    62.0  4739.0    11.0    2.0  0.0\n",
      "2.0     2.0     4.0  1244.0   13.0  0.0\n",
      "3.0     0.0     0.0     5.0  312.0  0.0\n",
      "4.0     0.0     0.0     2.0    3.0  3.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9890775791127403\n",
      "Precision total:  0.9805950197626492\n",
      "Recall total:  0.8645147743711867\n",
      "F1 total:  0.8940478901142047\n",
      "BACC total:  0.8645147743711867\n",
      "MCC total:  0.981483105573481\n"
     ]
    }
   ],
   "source": [
    "import catboost\n",
    "start = time.time()\n",
    "\n",
    "bag_cat = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "\n",
    "base_classifier = bag_cat\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_cat'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Baggin LGBM\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6934.0    22.0     7.0    2.0  0.0\n",
      "1.0    27.0  4776.0    11.0    0.0  0.0\n",
      "2.0     2.0    13.0  1245.0    3.0  0.0\n",
      "3.0     0.0     0.0     8.0  309.0  0.0\n",
      "4.0     0.0     0.0     3.0    0.0  5.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9926685120071819\n",
      "Precision total:  0.9899747231039229\n",
      "Recall total:  0.9146334312742699\n",
      "F1 total:  0.9436420896019891\n",
      "BACC total:  0.9146334312742699\n",
      "MCC total:  0.9875616525165409\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "\n",
    "\n",
    "base_classifier = lgbm\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_lgbm'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging XGB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# import xgboost as xgb\n",
    "\n",
    "# # Create a DMatrix for XGBoost\n",
    "# dtrain = xgb.DMatrix(X_train_01, label=y_train_01)\n",
    "# dtest = xgb.DMatrix(X_test_01, label=y_test_01)\n",
    "\n",
    "# # Set XGBoost parameters\n",
    "# params = {\n",
    "#     'objective': 'multi:softmax',  # for multi-class classification\n",
    "#     'num_class': 5,  # specify the number of classes\n",
    "#     'max_depth': 3,\n",
    "#     'learning_rate': 0.1,\n",
    "#     'eval_metric': 'mlogloss'  # metric for multi-class classification\n",
    "# }\n",
    "\n",
    "# # Train the XGBoost model\n",
    "# num_round = 100\n",
    "# xgb_01 = xgb.train(params, dtrain, num_round)\n",
    "\n",
    "# base_classifier = xgb\n",
    "\n",
    "# # Define the BaggingClassifier\n",
    "# bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# # Train the BaggingClassifier\n",
    "# bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# # Make predictions on the test set\n",
    "# y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "# with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "# name = 'bag_xgb'\n",
    "\n",
    "# pred_label = y_pred\n",
    "\n",
    "\n",
    "# metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "# Acc = metrics[0]\n",
    "# Precision = metrics[1]\n",
    "# Recall = metrics[2]\n",
    "# F1 = metrics[3]\n",
    "# BACC = metrics[4]\n",
    "# MCC = metrics[5]    \n",
    "\n",
    "\n",
    "# globals()[f\"{name}_acc_01\"] = Acc\n",
    "# globals()[f\"{name}_pre_01\"] = Precision\n",
    "# globals()[f\"{name}_rec_01\"] = Recall\n",
    "# globals()[f\"{name}_f1_01\"] = F1\n",
    "# globals()[f\"{name}_bacc_01\"] = BACC\n",
    "# globals()[f\"{name}_mcc_01\"] = MCC\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6881.0    52.0    25.0    7.0  0.0\n",
      "1.0   204.0  4527.0    44.0   39.0  0.0\n",
      "2.0     3.0     0.0  1228.0   32.0  0.0\n",
      "3.0     0.0     0.0    14.0  303.0  0.0\n",
      "4.0     0.0     0.0     3.0    5.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9679808483578963\n",
      "Precision total:  0.7357929204725122\n",
      "Recall total:  0.7712892163717731\n",
      "F1 total:  0.7516536128505626\n",
      "BACC total:  0.7712892163717731\n",
      "MCC total:  0.9461130850698776\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "\n",
    "base_classifier = rf\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train_01, y_train_01)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test_01)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_rf'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging with many models"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### do bootstrapping "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 1. Multiple subsets are created from the original dataset, selecting observations with replacement.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [],
   "source": [
    "start = time.time()\n",
    "\n",
    "num_bootstraps = 10  # Adjust the number of bootstraps as needed\n",
    "\n",
    "original_data_df = X_train_01.assign(label = y_train_01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [],
   "source": [
    "boot_df = []\n",
    "for i in range(0,num_bootstraps): \n",
    "    boot_df.append(original_data_df.sample(frac = 1, replace=True).reset_index(drop=True))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.980953</td>\n",
       "      <td>0.994399</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.328195</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.635956e-03</td>\n",
       "      <td>0.997407</td>\n",
       "      <td>0.998120</td>\n",
       "      <td>0.999988</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.983764</td>\n",
       "      <td>0.951195</td>\n",
       "      <td>0.999914</td>\n",
       "      <td>0.247061</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999999</td>\n",
       "      <td>1.121566e-01</td>\n",
       "      <td>0.991271</td>\n",
       "      <td>0.977176</td>\n",
       "      <td>0.983263</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.611451</td>\n",
       "      <td>0.670715</td>\n",
       "      <td>0.999846</td>\n",
       "      <td>0.244830</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999907</td>\n",
       "      <td>1.707593e-02</td>\n",
       "      <td>0.973211</td>\n",
       "      <td>0.988113</td>\n",
       "      <td>0.654757</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.979681</td>\n",
       "      <td>0.994399</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.328195</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>4.896998e-01</td>\n",
       "      <td>0.997405</td>\n",
       "      <td>0.998120</td>\n",
       "      <td>0.999978</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.865988</td>\n",
       "      <td>0.834482</td>\n",
       "      <td>0.999315</td>\n",
       "      <td>0.298362</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999824</td>\n",
       "      <td>9.180450e-01</td>\n",
       "      <td>0.970987</td>\n",
       "      <td>0.972668</td>\n",
       "      <td>0.741880</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31184</th>\n",
       "      <td>0.080429</td>\n",
       "      <td>0.070939</td>\n",
       "      <td>0.994828</td>\n",
       "      <td>0.285726</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.971680</td>\n",
       "      <td>5.006934e-07</td>\n",
       "      <td>0.780542</td>\n",
       "      <td>0.947960</td>\n",
       "      <td>0.250015</td>\n",
       "      <td>1.0</td>\n",
       "      <td>3.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31185</th>\n",
       "      <td>0.232416</td>\n",
       "      <td>0.441556</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.277787</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.978840</td>\n",
       "      <td>9.030094e-01</td>\n",
       "      <td>0.904251</td>\n",
       "      <td>0.715115</td>\n",
       "      <td>0.446083</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31186</th>\n",
       "      <td>0.619723</td>\n",
       "      <td>0.865051</td>\n",
       "      <td>0.999960</td>\n",
       "      <td>0.244830</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>4.192237e-04</td>\n",
       "      <td>0.977959</td>\n",
       "      <td>0.994783</td>\n",
       "      <td>0.999915</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31187</th>\n",
       "      <td>0.996560</td>\n",
       "      <td>0.994399</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.307118</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.671131e-02</td>\n",
       "      <td>0.997257</td>\n",
       "      <td>0.998120</td>\n",
       "      <td>0.999481</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31188</th>\n",
       "      <td>0.999034</td>\n",
       "      <td>0.683130</td>\n",
       "      <td>0.999718</td>\n",
       "      <td>0.238713</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>9.772383e-01</td>\n",
       "      <td>0.966629</td>\n",
       "      <td>0.919217</td>\n",
       "      <td>0.991843</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>31189 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            dnn        rf      lgbm       ada  knn       mlp           svm  \\\n",
       "0      0.980953  0.994399  1.000000  0.328195  1.0  1.000000  1.635956e-03   \n",
       "1      0.983764  0.951195  0.999914  0.247061  1.0  0.999999  1.121566e-01   \n",
       "2      0.611451  0.670715  0.999846  0.244830  1.0  0.999907  1.707593e-02   \n",
       "3      0.979681  0.994399  1.000000  0.328195  1.0  1.000000  4.896998e-01   \n",
       "4      0.865988  0.834482  0.999315  0.298362  1.0  0.999824  9.180450e-01   \n",
       "...         ...       ...       ...       ...  ...       ...           ...   \n",
       "31184  0.080429  0.070939  0.994828  0.285726  1.0  0.971680  5.006934e-07   \n",
       "31185  0.232416  0.441556  1.000000  0.277787  1.0  0.978840  9.030094e-01   \n",
       "31186  0.619723  0.865051  0.999960  0.244830  1.0  1.000000  4.192237e-04   \n",
       "31187  0.996560  0.994399  1.000000  0.307118  1.0  1.000000  1.671131e-02   \n",
       "31188  0.999034  0.683130  0.999718  0.238713  1.0  1.000000  9.772383e-01   \n",
       "\n",
       "            cat       xgb        lr   dt  label  \n",
       "0      0.997407  0.998120  0.999988  1.0    1.0  \n",
       "1      0.991271  0.977176  0.983263  1.0    0.0  \n",
       "2      0.973211  0.988113  0.654757  1.0    1.0  \n",
       "3      0.997405  0.998120  0.999978  1.0    1.0  \n",
       "4      0.970987  0.972668  0.741880  1.0    0.0  \n",
       "...         ...       ...       ...  ...    ...  \n",
       "31184  0.780542  0.947960  0.250015  1.0    3.0  \n",
       "31185  0.904251  0.715115  0.446083  1.0    0.0  \n",
       "31186  0.977959  0.994783  0.999915  1.0    1.0  \n",
       "31187  0.997257  0.998120  0.999481  1.0    1.0  \n",
       "31188  0.966629  0.919217  0.991843  1.0    0.0  \n",
       "\n",
       "[31189 rows x 12 columns]"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boot_df[5]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 2.A base model (weak model) is created on each of these subsets."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [],
   "source": [
    "bag_comb_pred = []\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [],
   "source": [
    "# SVM\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "clf = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "y_train_boot = boot_df[0].pop('label')\n",
    "X_train_boot = boot_df[0]\n",
    "clf.fit(X_train_boot, y_train_boot)\n",
    "preds_svm_01 = clf.predict(X_test_01)\n",
    "bag_comb_pred.append(preds_svm_01)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [],
   "source": [
    "#ADA\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "abc = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "ada = abc.fit(X_train_01, y_train_01)\n",
    "y_train_boot = boot_df[1].pop('label')\n",
    "X_train_boot = boot_df[1]\n",
    "preds_ada_01 = ada.predict(X_test_01)\n",
    "bag_comb_pred.append(preds_ada_01)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.2843984\ttest: 1.2864629\tbest: 1.2864629 (0)\ttotal: 13.6ms\tremaining: 1.34s\n",
      "10:\tlearn: 0.4082859\ttest: 0.4143082\tbest: 0.4143082 (10)\ttotal: 102ms\tremaining: 826ms\n",
      "20:\tlearn: 0.2066348\ttest: 0.2133401\tbest: 0.2133401 (20)\ttotal: 156ms\tremaining: 586ms\n",
      "30:\tlearn: 0.1260214\ttest: 0.1333037\tbest: 0.1333037 (30)\ttotal: 208ms\tremaining: 464ms\n",
      "40:\tlearn: 0.0915229\ttest: 0.0995841\tbest: 0.0995841 (40)\ttotal: 259ms\tremaining: 373ms\n",
      "50:\tlearn: 0.0742493\ttest: 0.0833399\tbest: 0.0833399 (50)\ttotal: 309ms\tremaining: 296ms\n",
      "60:\tlearn: 0.0618138\ttest: 0.0713403\tbest: 0.0713403 (60)\ttotal: 361ms\tremaining: 231ms\n",
      "70:\tlearn: 0.0521997\ttest: 0.0624708\tbest: 0.0624708 (70)\ttotal: 412ms\tremaining: 168ms\n",
      "80:\tlearn: 0.0453291\ttest: 0.0555215\tbest: 0.0555215 (80)\ttotal: 463ms\tremaining: 109ms\n",
      "90:\tlearn: 0.0413197\ttest: 0.0520110\tbest: 0.0520110 (90)\ttotal: 512ms\tremaining: 50.7ms\n",
      "99:\tlearn: 0.0376181\ttest: 0.0484522\tbest: 0.0484522 (99)\ttotal: 557ms\tremaining: 0us\n",
      "\n",
      "bestTest = 0.04845220473\n",
      "bestIteration = 99\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#Catboost\n",
    "import catboost\n",
    "cat_01 = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "y_train_boot = boot_df[2].pop('label')\n",
    "X_train_boot = boot_df[2]\n",
    "cat_01.fit(X_train_boot, y_train_boot, eval_set=(X_test_01, y_test_01), verbose=10)\n",
    "preds_cat = cat_01.predict(X_test_01)\n",
    "preds_cat = np.squeeze(preds_cat)\n",
    "pred_label = preds_cat\n",
    "bag_comb_pred.append(preds_cat)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n"
     ]
    }
   ],
   "source": [
    "#MLP\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "y_train_boot = boot_df[3].pop('label')\n",
    "X_train_boot = boot_df[3]\n",
    "if 1 == 1 and 0 == 0:\n",
    "    MLP = mlp.fit(X_train_boot, y_train_boot)\n",
    "    y_pred = MLP.predict_proba(X_test_01)\n",
    "    preds_mlp_01 = np.argmax(y_pred,axis = 1)\n",
    "\n",
    "bag_comb_pred.append(preds_mlp_01)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining LGBM Model\n",
      "---------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "#LGBM\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining LGBM Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#LGBM\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "y_train_boot = boot_df[4].pop('label')\n",
    "X_train_boot = boot_df[4]\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "    lgbm.fit(X_train_boot, y_train_boot)\n",
    "    preds_lgbm_01 = lgbm.predict(X_test_01)\n",
    "    bag_comb_pred.append(preds_lgbm_01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [],
   "source": [
    "#KNN\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_clf_01=KNeighborsClassifier(n_neighbors = 5)\n",
    "y_train_boot = boot_df[5].pop('label')\n",
    "X_train_boot = boot_df[5]\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "    knn_clf_01.fit(X_train_boot,y_train_boot)\n",
    "if use_model_knn == 1:\n",
    "    preds_knn =knn_clf_01.predict(X_test_01)\n",
    "    bag_comb_pred.append(preds_knn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Random Forest\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "y_train_boot = boot_df[6].pop('label')\n",
    "X_train_boot = boot_df[6]\n",
    "\n",
    "if True == True:\n",
    "    model_rf_01 = rf.fit(X_train_boot,y_train_boot)\n",
    "    preds_rf_01 = model_rf_01.predict(X_test_01)\n",
    "    bag_comb_pred.append(preds_rf_01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 121,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100\n",
      "195/195 [==============================] - 1s 3ms/step - loss: 1.3790 - accuracy: 0.3384 - val_loss: 1.0201 - val_accuracy: 0.5175\n",
      "Epoch 2/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 1.0376 - accuracy: 0.5079 - val_loss: 0.9691 - val_accuracy: 0.5180\n",
      "Epoch 3/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.9380 - accuracy: 0.5073 - val_loss: 0.8339 - val_accuracy: 0.5224\n",
      "Epoch 4/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.8314 - accuracy: 0.5488 - val_loss: 0.7608 - val_accuracy: 0.6031\n",
      "Epoch 5/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7650 - accuracy: 0.6201 - val_loss: 0.7101 - val_accuracy: 0.6616\n",
      "Epoch 6/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7286 - accuracy: 0.6530 - val_loss: 0.6864 - val_accuracy: 0.7057\n",
      "Epoch 7/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7101 - accuracy: 0.6679 - val_loss: 0.6714 - val_accuracy: 0.7063\n",
      "Epoch 8/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6986 - accuracy: 0.6831 - val_loss: 0.6596 - val_accuracy: 0.7326\n",
      "Epoch 9/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6836 - accuracy: 0.6976 - val_loss: 0.6465 - val_accuracy: 0.7191\n",
      "Epoch 10/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6724 - accuracy: 0.7137 - val_loss: 0.6323 - val_accuracy: 0.7475\n",
      "Epoch 11/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6530 - accuracy: 0.7265 - val_loss: 0.6077 - val_accuracy: 0.7567\n",
      "Epoch 12/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6334 - accuracy: 0.7436 - val_loss: 0.5758 - val_accuracy: 0.7797\n",
      "Epoch 13/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5938 - accuracy: 0.7591 - val_loss: 0.5197 - val_accuracy: 0.8148\n",
      "Epoch 14/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5564 - accuracy: 0.7902 - val_loss: 0.4800 - val_accuracy: 0.8602\n",
      "Epoch 15/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5378 - accuracy: 0.8124 - val_loss: 0.4738 - val_accuracy: 0.8310\n",
      "Epoch 16/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5248 - accuracy: 0.8243 - val_loss: 0.4880 - val_accuracy: 0.8160\n",
      "Epoch 17/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5155 - accuracy: 0.8259 - val_loss: 0.4465 - val_accuracy: 0.8790\n",
      "Epoch 18/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5084 - accuracy: 0.8338 - val_loss: 0.4431 - val_accuracy: 0.8786\n",
      "Epoch 19/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5057 - accuracy: 0.8343 - val_loss: 0.4386 - val_accuracy: 0.8769\n",
      "Epoch 20/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5016 - accuracy: 0.8370 - val_loss: 0.4421 - val_accuracy: 0.8811\n",
      "Epoch 21/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.5022 - accuracy: 0.8362 - val_loss: 0.4353 - val_accuracy: 0.8746\n",
      "Epoch 22/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4953 - accuracy: 0.8374 - val_loss: 0.4323 - val_accuracy: 0.8774\n",
      "Epoch 23/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4936 - accuracy: 0.8397 - val_loss: 0.4314 - val_accuracy: 0.8753\n",
      "Epoch 24/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4921 - accuracy: 0.8379 - val_loss: 0.4291 - val_accuracy: 0.8734\n",
      "Epoch 25/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4892 - accuracy: 0.8381 - val_loss: 0.4368 - val_accuracy: 0.8671\n",
      "Epoch 26/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4870 - accuracy: 0.8418 - val_loss: 0.4242 - val_accuracy: 0.8743\n",
      "Epoch 27/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4790 - accuracy: 0.8417 - val_loss: 0.4309 - val_accuracy: 0.8613\n",
      "Epoch 28/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4745 - accuracy: 0.8439 - val_loss: 0.4169 - val_accuracy: 0.8762\n",
      "Epoch 29/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4740 - accuracy: 0.8405 - val_loss: 0.4144 - val_accuracy: 0.8785\n",
      "Epoch 30/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.4790 - accuracy: 0.8383 - val_loss: 0.4155 - val_accuracy: 0.8705\n"
     ]
    }
   ],
   "source": [
    "#DNN\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense\n",
    "#Model Parameters\n",
    "y_train_boot = boot_df[7].pop('label')\n",
    "X_train_boot = boot_df[7]\n",
    "\n",
    "\n",
    "dropout_rate = 0.02\n",
    "nodes = 3\n",
    "out_layer = 5\n",
    "optimizer='adam'\n",
    "loss='sparse_categorical_crossentropy'\n",
    "epochs=100\n",
    "batch_size=128\n",
    "num_columns = X_train_boot.shape[1]\n",
    "dnn_01 = tf.keras.Sequential()\n",
    "# Input layer\n",
    "dnn_01.add(tf.keras.Input(shape=(num_columns,)))\n",
    "# Dense layers with dropout\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "# Output layer\n",
    "# dnn_01.add(tf.keras.layers.Dense(out_layer))\n",
    "dnn_01.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "# dnn.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "dnn_01.compile(optimizer=optimizer, loss=loss,metrics=['accuracy'])\n",
    "from keras.callbacks import EarlyStopping\n",
    "# Define EarlyStopping callback\n",
    "early_stopping = EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True)\n",
    "dnn_01.fit(X_train_boot, y_train_boot, epochs=epochs, batch_size=batch_size,validation_split=0.2, callbacks=[early_stopping])\n",
    "pred_dnn = dnn_01.predict(X_test_01)\n",
    "preds_dnn_01 = np.argmax(pred_dnn,axis = 1)\n",
    "bag_comb_pred.append(preds_dnn_01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    }
   ],
   "source": [
    "#LogReg\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "logreg_01 = LogisticRegression()\n",
    "y_train_boot = boot_df[8].pop('label')\n",
    "X_train_boot = boot_df[8]\n",
    "\n",
    "logreg_01.fit(X_train_boot,y_train_boot)\n",
    "preds_logreg =logreg_01.predict(X_test_01)\n",
    "bag_comb_pred.append(preds_logreg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xgboost as xgb\n",
    "y_train_boot = boot_df[9].pop('label')\n",
    "X_train_boot = boot_df[9]\n",
    "\n",
    "# Create a DMatrix for XGBoost\n",
    "dtrain = xgb.DMatrix(X_train_boot, label=y_train_boot)\n",
    "dtest = xgb.DMatrix(X_test_01, label=y_test_01)\n",
    "# Set XGBoost parameters\n",
    "params = {\n",
    "    'objective': 'multi:softmax',  # for multi-class classification\n",
    "    'num_class': 5,  # specify the number of classes\n",
    "    'max_depth': 3,\n",
    "    'learning_rate': 0.1,\n",
    "    'eval_metric': 'mlogloss'  # metric for multi-class classification\n",
    "}\n",
    "# Train the XGBoost model\n",
    "num_round = 100\n",
    "xgb_01 = xgb.train(params, dtrain, num_round)\n",
    "preds_xgb_01 = xgb_01.predict(dtest)\n",
    "bag_comb_pred.append(preds_xgb_01)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3. The models run in parallel and are independent of each other."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 124,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "       model_0  model_1  model_2  model_3  model_4  model_5  model_6  model_7  \\\n",
      "0          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "1          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "2          1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "3          1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "4          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "...        ...      ...      ...      ...      ...      ...      ...      ...   \n",
      "13362      0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "13363      1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "13364      0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "13365      2.0      0.0      2.0        2      2.0      2.0      2.0        2   \n",
      "13366      0.0      0.0      1.0        0      1.0      1.0      1.0        0   \n",
      "\n",
      "       model_8  model_9  \n",
      "0          0.0      0.0  \n",
      "1          0.0      0.0  \n",
      "2          1.0      1.0  \n",
      "3          1.0      1.0  \n",
      "4          0.0      1.0  \n",
      "...        ...      ...  \n",
      "13362      0.0      0.0  \n",
      "13363      1.0      1.0  \n",
      "13364      0.0      0.0  \n",
      "13365      2.0      2.0  \n",
      "13366      0.0      1.0  \n",
      "\n",
      "[13367 rows x 10 columns]\n"
     ]
    }
   ],
   "source": [
    "bag_vot_df = pd.DataFrame()\n",
    "for i in range(0,len(bag_comb_pred)):\n",
    "    bag_vot_df[f'model_{i}'] =  bag_comb_pred[i]\n",
    "print(bag_vot_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 125,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "       model_0  model_1  model_2  model_3  model_4  model_5  model_6  model_7  \\\n",
      "0          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "1          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "2          1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "3          1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "4          0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "...        ...      ...      ...      ...      ...      ...      ...      ...   \n",
      "13362      0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "13363      1.0      0.0      1.0        1      1.0      1.0      1.0        1   \n",
      "13364      0.0      0.0      0.0        0      0.0      0.0      0.0        0   \n",
      "13365      2.0      0.0      2.0        2      2.0      2.0      2.0        2   \n",
      "13366      0.0      0.0      1.0        0      1.0      1.0      1.0        0   \n",
      "\n",
      "       model_8  model_9  ensemble  \n",
      "0          0.0      0.0         0  \n",
      "1          0.0      0.0         0  \n",
      "2          1.0      1.0         1  \n",
      "3          1.0      1.0         1  \n",
      "4          0.0      1.0         0  \n",
      "...        ...      ...       ...  \n",
      "13362      0.0      0.0         0  \n",
      "13363      1.0      1.0         1  \n",
      "13364      0.0      0.0         0  \n",
      "13365      2.0      2.0         2  \n",
      "13366      0.0      1.0         0  \n",
      "\n",
      "[13367 rows x 11 columns]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0        0\n",
       "1        0\n",
       "2        1\n",
       "3        1\n",
       "4        0\n",
       "        ..\n",
       "13362    0\n",
       "13363    1\n",
       "13364    0\n",
       "13365    2\n",
       "13366    0\n",
       "Name: ensemble, Length: 13367, dtype: int64"
      ]
     },
     "execution_count": 125,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Voting start\n",
    "from scipy.stats import mode\n",
    "# bag_comb_pred_df = pd.DataFrame(bag_comb_pred)\n",
    "# Extract predictions columns\n",
    "\n",
    "# predictions = df[['dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm','cat','xgb']]\n",
    "    # selected_columns = df.loc[:, ~df.columns.isin(['rf'])]\n",
    "predictions = bag_vot_df \n",
    "\n",
    "# predictions = bag_comb_pred_df.loc[:, ~df.columns.isin(['label'])] #df[column_features]\n",
    "\n",
    "# Use the mode function along axis 1 to get the most common prediction for each row\n",
    "ensemble_predictions, _ = mode(predictions.values, axis=1)\n",
    "\n",
    "# Add the ensemble predictions to the DataFrame\n",
    "bag_vot_df['ensemble'] = ensemble_predictions.astype(int)\n",
    "\n",
    "# Display the DataFrame with ensemble predictions\n",
    "print(bag_vot_df)\n",
    "\n",
    "pred_label = bag_vot_df ['ensemble'].values\n",
    "bag_vot_df.pop('ensemble')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 126,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0       1       2      3    4\n",
      "0  6939.0    12.0    12.0    2.0  0.0\n",
      "1   387.0  4396.0    29.0    2.0  0.0\n",
      "2     7.0     2.0  1248.0    6.0  0.0\n",
      "3     0.0     1.0    13.0  303.0  0.0\n",
      "4     0.0     0.0     2.0    4.0  2.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9641654821575522\n",
      "Precision total:  0.971152174489298\n",
      "Recall total:  0.8206792896362973\n",
      "F1 total:  0.8503729618850817\n",
      "BACC total:  0.8206792896362973\n",
      "MCC total:  0.9401749304934824\n"
     ]
    }
   ],
   "source": [
    "name='bag_comb'\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 127,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining DNN Model\n",
      "---------------------------------------------------------------------------------\n",
      "Model: \"sequential_3\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_18 (Dense)             (None, 3)                 36        \n",
      "_________________________________________________________________\n",
      "dropout_15 (Dropout)         (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_19 (Dense)             (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_16 (Dropout)         (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_20 (Dense)             (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_17 (Dropout)         (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_21 (Dense)             (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_18 (Dropout)         (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_22 (Dense)             (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_19 (Dropout)         (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_23 (Dense)             (None, 5)                 20        \n",
      "=================================================================\n",
      "Total params: 104\n",
      "Trainable params: 104\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining DNN Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "start_dnn = time.time()\n",
    "\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense\n",
    "\n",
    "#Model Parameters\n",
    "dropout_rate = 0.2\n",
    "nodes = 3\n",
    "out_layer = 5\n",
    "optimizer='adam'\n",
    "loss='sparse_categorical_crossentropy'\n",
    "epochs=100\n",
    "batch_size=128\n",
    "\n",
    "\n",
    "num_columns = X_train_01.shape[1]\n",
    "\n",
    "dnn_01 = tf.keras.Sequential()\n",
    "\n",
    "# Input layer\n",
    "dnn_01.add(tf.keras.Input(shape=(num_columns,)))\n",
    "\n",
    "# # Dense layers with dropout\n",
    "# dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(2*nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(3*nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn_01.add(tf.keras.layers.Dense(2*nodes))\n",
    "# dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# dnn.add(tf.keras.layers.Dense(nodes))\n",
    "# dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "\n",
    "\n",
    "# Dense layers with dropout\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn_01.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_01.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# Output layer\n",
    "# dnn_01.add(tf.keras.layers.Dense(out_layer))\n",
    "\n",
    "dnn_01.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "# dnn.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "\n",
    "\n",
    "dnn_01.compile(optimizer=optimizer, loss=loss,metrics=['accuracy'])\n",
    "\n",
    "dnn_01.summary()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 128,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Training DNN\n",
      "---------------------------------------------------------------------------------\n",
      "Epoch 1/100\n",
      "195/195 [==============================] - 1s 3ms/step - loss: 1.4005 - accuracy: 0.4074 - val_loss: 1.1403 - val_accuracy: 0.5111\n",
      "Epoch 2/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 1.1624 - accuracy: 0.4837 - val_loss: 1.0410 - val_accuracy: 0.5111\n",
      "Epoch 3/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 1.0406 - accuracy: 0.4933 - val_loss: 0.8857 - val_accuracy: 0.5258\n",
      "Epoch 4/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.9607 - accuracy: 0.5062 - val_loss: 0.8247 - val_accuracy: 0.5415\n",
      "Epoch 5/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.9023 - accuracy: 0.5316 - val_loss: 0.7701 - val_accuracy: 0.5875\n",
      "Epoch 6/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.8536 - accuracy: 0.5570 - val_loss: 0.7430 - val_accuracy: 0.5949\n",
      "Epoch 7/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.8287 - accuracy: 0.5745 - val_loss: 0.7357 - val_accuracy: 0.5973\n",
      "Epoch 8/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.8118 - accuracy: 0.5804 - val_loss: 0.7204 - val_accuracy: 0.5997\n",
      "Epoch 9/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7954 - accuracy: 0.5848 - val_loss: 0.7141 - val_accuracy: 0.5981\n",
      "Epoch 10/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7914 - accuracy: 0.5879 - val_loss: 0.7109 - val_accuracy: 0.6039\n",
      "Epoch 11/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7843 - accuracy: 0.5882 - val_loss: 0.7062 - val_accuracy: 0.6034\n",
      "Epoch 12/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7796 - accuracy: 0.5908 - val_loss: 0.7007 - val_accuracy: 0.6060\n",
      "Epoch 13/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7742 - accuracy: 0.5959 - val_loss: 0.6977 - val_accuracy: 0.6106\n",
      "Epoch 14/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7713 - accuracy: 0.5997 - val_loss: 0.6995 - val_accuracy: 0.6130\n",
      "Epoch 15/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7664 - accuracy: 0.6033 - val_loss: 0.6944 - val_accuracy: 0.6124\n",
      "Epoch 16/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7610 - accuracy: 0.6034 - val_loss: 0.6916 - val_accuracy: 0.6137\n",
      "Epoch 17/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7597 - accuracy: 0.6082 - val_loss: 0.6856 - val_accuracy: 0.6113\n",
      "Epoch 18/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7589 - accuracy: 0.6065 - val_loss: 0.6846 - val_accuracy: 0.6122\n",
      "Epoch 19/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7544 - accuracy: 0.6102 - val_loss: 0.6834 - val_accuracy: 0.6162\n",
      "Epoch 20/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7580 - accuracy: 0.6098 - val_loss: 0.6794 - val_accuracy: 0.6121\n",
      "Epoch 21/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7522 - accuracy: 0.6196 - val_loss: 0.6750 - val_accuracy: 0.6140\n",
      "Epoch 22/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7552 - accuracy: 0.6202 - val_loss: 0.6747 - val_accuracy: 0.6135\n",
      "Epoch 23/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7481 - accuracy: 0.6298 - val_loss: 0.6655 - val_accuracy: 0.6145\n",
      "Epoch 24/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7476 - accuracy: 0.6280 - val_loss: 0.6753 - val_accuracy: 0.7698\n",
      "Epoch 25/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7430 - accuracy: 0.6356 - val_loss: 0.6606 - val_accuracy: 0.6874\n",
      "Epoch 26/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7400 - accuracy: 0.6408 - val_loss: 0.6560 - val_accuracy: 0.7023\n",
      "Epoch 27/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7376 - accuracy: 0.6508 - val_loss: 0.6474 - val_accuracy: 0.6980\n",
      "Epoch 28/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7344 - accuracy: 0.6599 - val_loss: 0.6424 - val_accuracy: 0.7515\n",
      "Epoch 29/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7342 - accuracy: 0.6671 - val_loss: 0.6343 - val_accuracy: 0.7478\n",
      "Epoch 30/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7281 - accuracy: 0.6797 - val_loss: 0.6245 - val_accuracy: 0.7464\n",
      "Epoch 31/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7148 - accuracy: 0.6883 - val_loss: 0.6178 - val_accuracy: 0.7674\n",
      "Epoch 32/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7181 - accuracy: 0.6992 - val_loss: 0.6041 - val_accuracy: 0.7592\n",
      "Epoch 33/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7076 - accuracy: 0.7063 - val_loss: 0.5929 - val_accuracy: 0.7570\n",
      "Epoch 34/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7080 - accuracy: 0.7123 - val_loss: 0.5952 - val_accuracy: 0.8214\n",
      "Epoch 35/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.7034 - accuracy: 0.7243 - val_loss: 0.5709 - val_accuracy: 0.8009\n",
      "Epoch 36/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6926 - accuracy: 0.7368 - val_loss: 0.5707 - val_accuracy: 0.8445\n",
      "Epoch 37/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6864 - accuracy: 0.7465 - val_loss: 0.5499 - val_accuracy: 0.8107\n",
      "Epoch 38/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6769 - accuracy: 0.7528 - val_loss: 0.5422 - val_accuracy: 0.8492\n",
      "Epoch 39/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6785 - accuracy: 0.7613 - val_loss: 0.5447 - val_accuracy: 0.8057\n",
      "Epoch 40/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6647 - accuracy: 0.7686 - val_loss: 0.5352 - val_accuracy: 0.8140\n",
      "Epoch 41/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6752 - accuracy: 0.7674 - val_loss: 0.5301 - val_accuracy: 0.8673\n",
      "Epoch 42/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6631 - accuracy: 0.7691 - val_loss: 0.5301 - val_accuracy: 0.8679\n",
      "Epoch 43/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6460 - accuracy: 0.7805 - val_loss: 0.5174 - val_accuracy: 0.8625\n",
      "Epoch 44/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6563 - accuracy: 0.7787 - val_loss: 0.5202 - val_accuracy: 0.8544\n",
      "Epoch 45/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6505 - accuracy: 0.7810 - val_loss: 0.5174 - val_accuracy: 0.8516\n",
      "Epoch 46/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6558 - accuracy: 0.7831 - val_loss: 0.5152 - val_accuracy: 0.8599\n",
      "Epoch 47/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6561 - accuracy: 0.7831 - val_loss: 0.5200 - val_accuracy: 0.8684\n",
      "Epoch 48/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6428 - accuracy: 0.7896 - val_loss: 0.5149 - val_accuracy: 0.8649\n",
      "Epoch 49/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6515 - accuracy: 0.7891 - val_loss: 0.5142 - val_accuracy: 0.8653\n",
      "Epoch 50/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6414 - accuracy: 0.7891 - val_loss: 0.5144 - val_accuracy: 0.8621\n",
      "Epoch 51/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6605 - accuracy: 0.7885 - val_loss: 0.5181 - val_accuracy: 0.8568\n",
      "Epoch 52/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6513 - accuracy: 0.7908 - val_loss: 0.5150 - val_accuracy: 0.8602\n",
      "Epoch 53/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6445 - accuracy: 0.7907 - val_loss: 0.5139 - val_accuracy: 0.8453\n",
      "Epoch 54/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6395 - accuracy: 0.7983 - val_loss: 0.5089 - val_accuracy: 0.8649\n",
      "Epoch 55/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6343 - accuracy: 0.8000 - val_loss: 0.5102 - val_accuracy: 0.8599\n",
      "Epoch 56/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6387 - accuracy: 0.7985 - val_loss: 0.5196 - val_accuracy: 0.8312\n",
      "Epoch 57/100\n",
      "195/195 [==============================] - 0s 2ms/step - loss: 0.6431 - accuracy: 0.7969 - val_loss: 0.5065 - val_accuracy: 0.8631\n"
     ]
    }
   ],
   "source": [
    "#DNN\n",
    "try:\n",
    "    from keras.callbacks import EarlyStopping\n",
    "\n",
    "    # Define EarlyStopping callback\n",
    "    early_stopping = EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True)\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training DNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training DNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    # Convert Y_test back to its original format\n",
    "    # y_test = np.argmax(Y_test, axis=1)\n",
    "\n",
    "    # Start the timer\n",
    "    start = time.time()\n",
    "    # dnn_01.fit(X_train_01, y_train_01, epochs=epochs, batch_size=batch_size)\n",
    "    dnn_01.fit(X_train_01, y_train_01, epochs=epochs, batch_size=batch_size,validation_split=0.2, callbacks=[early_stopping])\n",
    "\n",
    "    # model.fit(x_train, Y_train, epochs=100, batch_size=128, validation_split=0.2, callbacks=[early_stopping])\n",
    "\n",
    "    # End the timer\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    # joblib.dump(dnn_01, 'dnn_level_01.joblib')\n",
    "    # dnn_01.save(\"dnn_level_01.h5\")\n",
    "\n",
    "    # Calculate the time taken and print it out\n",
    "    # print(f'Time taken for training: {time_taken} seconds')\n",
    "except: \n",
    "    None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 129,
   "metadata": {},
   "outputs": [],
   "source": [
    "# dnn_01 = load_model(\"dnn_level_01.h5\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 130,
   "metadata": {},
   "outputs": [],
   "source": [
    "#DNN\n",
    "try:\n",
    "    start = time.time()\n",
    "    pred_dnn = dnn_01.predict(X_test_01)\n",
    "    preds_dnn_01 = np.argmax(pred_dnn,axis = 1)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "except:\n",
    "        with open(output_file_name, \"a\") as f: print('error', file = f)\n",
    "        preds_dnn_01 = 0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0  3.0  4.0\n",
      "0.0  6491.0   430.0    44.0  0.0  0.0\n",
      "1.0   634.0  4045.0   135.0  0.0  0.0\n",
      "2.0    10.0   146.0  1107.0  0.0  0.0\n",
      "3.0     6.0   113.0   198.0  0.0  0.0\n",
      "4.0     0.0     4.0     4.0  0.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8710256602079749\n",
      "Precision total:  0.5013327400465016\n",
      "Recall total:  0.5297375168231198\n",
      "F1 total:  0.5144117798211291\n",
      "BACC total:  0.5297375168231198\n",
      "MCC total:  0.7797001551578328\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    name = 'dnn'\n",
    "    pred_label = preds_dnn_01\n",
    "        \n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_dnn\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n",
    "except: None    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### SVM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 132,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining SVM Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "#SVM\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining SVM Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "start_svm = time.time()\n",
    "\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "\n",
    "# Instantiate the SGDClassifier with additional hyperparameters\n",
    "clf = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "\n",
    "#SVM\n",
    "start = time.time()\n",
    "clf.fit(X_train_01, y_train_01)\n",
    "end = time.time()\n",
    "clf.score(X_train_01, y_train_01)\n",
    "time_taken = end - start\n",
    "with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "joblib.dump(clf, 'svm_level_01.joblib')\n",
    "\n",
    "\n",
    "clf = loaded_model = joblib.load('svm_level_01.joblib')\n",
    "\n",
    "\n",
    "#SVM\n",
    "start = time.time()\n",
    "preds_svm_01 = clf.predict(X_test_01)\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "print('---------------------------------------------------------------------------------')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 133,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6651.0   195.0   118.0    1.0  0.0\n",
      "1.0   985.0  3481.0   289.0   56.0  3.0\n",
      "2.0    38.0     4.0  1170.0   49.0  2.0\n",
      "3.0    11.0     1.0   138.0  167.0  0.0\n",
      "4.0     1.0     0.0     0.0    7.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8580085284656243\n",
      "Precision total:  0.6179301663310224\n",
      "Recall total:  0.626239282788392\n",
      "F1 total:  0.6145383046652212\n",
      "BACC total:  0.626239282788392\n",
      "MCC total:  0.7661257831060257\n"
     ]
    }
   ],
   "source": [
    "\n",
    "pred_label = preds_svm_01\n",
    "name = 'svm'\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start_svm\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Random Forest"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 134,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining RF Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training RF\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction RF\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6838.0   105.0    20.0    2.0  0.0\n",
      "1.0   215.0  4515.0    33.0   51.0  0.0\n",
      "2.0    17.0     2.0  1195.0   49.0  0.0\n",
      "3.0    10.0     0.0    14.0  293.0  0.0\n",
      "4.0     0.0     0.0     4.0    4.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9606493603650782\n",
      "Precision total:  0.7241845497379493\n",
      "Recall total:  0.7580211238380259\n",
      "F1 total:  0.7388347834607831\n",
      "BACC total:  0.7580211238380259\n",
      "MCC total:  0.9335368217945694\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining RF Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "start_rf = time.time()\n",
    "\n",
    "#Random Forest\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "#------------------------------------------------------------------------------\n",
    "\n",
    "if True == True:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training RF')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Training RF', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #RF\n",
    "    start = time.time()\n",
    "    model_rf_01 = rf.fit(X_train_01,y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(model_rf_01, X_train_01, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(model_rf_01, 'rf_base_model_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "    model_rf_01  = joblib.load('rf_base_model_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Prediction RF')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction RF', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #RF\n",
    "    start = time.time()\n",
    "    preds_rf_01 = model_rf_01.predict(X_test_01)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('-------------------------------------------------------', file = f)\n",
    "pred_label = preds_rf_01\n",
    "name='rf'\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start_rf\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### LGBM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 135,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining LGBM Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training LGBM\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Prediction LGBM\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6851.0    72.0    14.0   25.0  3.0\n",
      "1.0    84.0  4705.0    11.0    9.0  5.0\n",
      "2.0    37.0    87.0  1126.0    8.0  5.0\n",
      "3.0     2.0     4.0     9.0  300.0  2.0\n",
      "4.0     0.0     2.0     2.0    1.0  3.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9714221590484028\n",
      "Precision total:  0.7917606654736168\n",
      "Recall total:  0.8347781004141283\n",
      "F1 total:  0.804644637180375\n",
      "BACC total:  0.8347781004141283\n",
      "MCC total:  0.9514739319904306\n"
     ]
    }
   ],
   "source": [
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining LGBM Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#LGBM\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "\n",
    "start_lgbm = time.time()\n",
    "\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training LGBM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training LGBM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start = time.time()\n",
    "    lgbm.fit(X_train_01, y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(lgbm, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(lgbm, 'lgbm_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "    lgbm = joblib.load('lgbm_01.joblib')\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    print('Prediction LGBM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction LGBM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #LGBM\n",
    "    start = time.time()\n",
    "    preds_lgbm_01 = lgbm.predict(X_test_01)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "    pred_label = preds_lgbm_01\n",
    "    name='lgbm'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_lgbm\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### MLP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 136,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining MLP Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training MLP\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0       1       2      3    4\n",
      "0  6668.0   263.0    28.0    6.0  0.0\n",
      "1   246.0  4542.0    17.0    9.0  0.0\n",
      "2    15.0    13.0  1192.0   42.0  1.0\n",
      "3     0.0    10.0    12.0  294.0  1.0\n",
      "4     0.0     0.0     0.0    2.0  6.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9502506171915912\n",
      "Precision total:  0.8880638255696018\n",
      "Recall total:  0.904417156964388\n",
      "F1 total:  0.8957246989574692\n",
      "BACC total:  0.904417156964388\n",
      "MCC total:  0.9157457902950608\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n"
     ]
    }
   ],
   "source": [
    "\n",
    "#MLP\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining MLP Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "start_mlp = time.time()\n",
    "\n",
    "\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "import time\n",
    "\n",
    "# create MLPClassifier instance\n",
    "mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training MLP')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Training MLP', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "    start = time.time()\n",
    "    MLP = mlp.fit(X_train_01, y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(MLP, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(MLP, 'mlp_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "    MLP = joblib.load('mlp_01.joblib')\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    #MLP\n",
    "    start = time.time()\n",
    "    y_pred = MLP.predict_proba(X_test_01)\n",
    "    preds_mlp_01 = np.argmax(y_pred,axis = 1)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "#MLP\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('MLP 01 model', file = f)\n",
    "    pred_label = preds_mlp_01\n",
    "    name='mlp'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_mlp\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### ADA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 137,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining ADA Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training ADA\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction ADA\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0   1.0   2.0    3.0  4.0\n",
      "0.0  6957.0   4.0   1.0    3.0  0.0\n",
      "1.0  4717.0  52.0  31.0   13.0  1.0\n",
      "2.0  1138.0  30.0  29.0   65.0  1.0\n",
      "3.0   115.0  48.0  27.0  127.0  0.0\n",
      "4.0     0.0   0.0   4.0    0.0  4.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.5363207900052368\n",
      "Precision total:  0.5037393186849342\n",
      "Recall total:  0.38664906923367076\n",
      "F1 total:  0.3637076586815818\n",
      "BACC total:  0.38664906923367076\n",
      "MCC total:  0.14266059206665777\n"
     ]
    }
   ],
   "source": [
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining ADA Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#ADA\n",
    "# from sklearn.multioutput import MultiOutputClassifier\n",
    "start_ada = time.time()\n",
    "\n",
    "\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "import time\n",
    "abc = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training ADA')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training ADA', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #ADA\n",
    "\n",
    "\n",
    "    start = time.time()\n",
    "    ada = abc.fit(X_train_01, y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(ada, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "\n",
    "    # Assuming 'model' is your trained model\n",
    "    joblib.dump(ada, 'ada_01.joblib')\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "    ada = joblib.load('ada_01.joblib')\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    print('Prediction ADA')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction ADA', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #ADA\n",
    "    start = time.time()\n",
    "    preds_ada_01 = ada.predict(X_test_01)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('ADA 01 model', file = f)\n",
    "\n",
    "\n",
    "    pred_label = preds_ada_01\n",
    "    name='ada'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_ada\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### KNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 138,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining KNN Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training KNN\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6797.0   135.0    25.0    8.0  0.0\n",
      "1.0   130.0  4662.0    14.0    8.0  0.0\n",
      "2.0    10.0    17.0  1210.0   26.0  0.0\n",
      "3.0     2.0     5.0    17.0  293.0  0.0\n",
      "4.0     0.0     0.0     2.0    6.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9697015037031496\n",
      "Precision total:  0.7520905589222886\n",
      "Recall total:  0.7653262929731459\n",
      "F1 total:  0.75846966950664\n",
      "BACC total:  0.7653262929731459\n",
      "MCC total:  0.9486629111010035\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "#KNN\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining KNN Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "start_knn = time.time()\n",
    "\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_clf_01=KNeighborsClassifier(n_neighbors = 5)\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "\n",
    "    #KNN\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training KNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training KNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start = time.time()\n",
    "    knn_clf_01.fit(X_train_01,y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(knn_clf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(knn_clf_01, 'knn_01.joblib')\n",
    "\n",
    "\n",
    "if load_model_knn == 1:\n",
    "    knn_clf_01 = joblib.load('knn_01.joblib')\n",
    "\n",
    "if use_model_knn == 1:\n",
    "\n",
    "    #KNN\n",
    "    start = time.time()\n",
    "    preds_knn =knn_clf_01.predict(X_test_01)\n",
    "    preds_knn\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('KNN 01 model', file = f)\n",
    "\n",
    "    pred_label = preds_knn\n",
    "    name='knn'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start_knn\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Log Regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 139,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining Logistic Regression Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training LR \n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6600.0   298.0    64.0    3.0  0.0\n",
      "1.0   671.0  3970.0   111.0   62.0  0.0\n",
      "2.0    16.0    82.0  1109.0   55.0  1.0\n",
      "3.0     2.0     2.0    87.0  226.0  0.0\n",
      "4.0     1.0     0.0     0.0    7.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8906261689234682\n",
      "Precision total:  0.653339860398001\n",
      "Recall total:  0.6726549973343975\n",
      "F1 total:  0.6617855727586975\n",
      "BACC total:  0.6726549973343975\n",
      "MCC total:  0.8153640228952933\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "#Logistic Regression\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining Logistic Regression Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "logreg_01 = LogisticRegression()\n",
    "start_lr = time.time()\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "\n",
    "    #KNN\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training LR ')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training LR', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start = time.time()\n",
    "    logreg_01.fit(X_train_01,y_train_01)\n",
    "    end = time.time()\n",
    "\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(knn_clf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(logreg_01, 'logreg_01.joblib')\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "    logreg_01 = joblib.load('logreg_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    #lR\n",
    "    start = time.time()\n",
    "    preds_logreg =logreg_01.predict(X_test_01)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "#LR\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('LR 01 model', file = f)\n",
    "\n",
    "    pred_label = preds_logreg\n",
    "    # pred_label = label[ypred]\n",
    "    name='lr'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_lr\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Catboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 140,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.2864998\ttest: 1.2880007\tbest: 1.2880007 (0)\ttotal: 10.1ms\tremaining: 996ms\n",
      "10:\tlearn: 0.4143431\ttest: 0.4179287\tbest: 0.4179287 (10)\ttotal: 83.5ms\tremaining: 676ms\n",
      "20:\tlearn: 0.2074951\ttest: 0.2110461\tbest: 0.2110461 (20)\ttotal: 136ms\tremaining: 513ms\n",
      "30:\tlearn: 0.1281895\ttest: 0.1320760\tbest: 0.1320760 (30)\ttotal: 192ms\tremaining: 427ms\n",
      "40:\tlearn: 0.0929379\ttest: 0.0970862\tbest: 0.0970862 (40)\ttotal: 242ms\tremaining: 349ms\n",
      "50:\tlearn: 0.0760629\ttest: 0.0808997\tbest: 0.0808997 (50)\ttotal: 292ms\tremaining: 280ms\n",
      "60:\tlearn: 0.0651202\ttest: 0.0701830\tbest: 0.0701830 (60)\ttotal: 341ms\tremaining: 218ms\n",
      "70:\tlearn: 0.0565540\ttest: 0.0619242\tbest: 0.0619242 (70)\ttotal: 391ms\tremaining: 160ms\n",
      "80:\tlearn: 0.0498913\ttest: 0.0552424\tbest: 0.0552424 (80)\ttotal: 442ms\tremaining: 104ms\n",
      "90:\tlearn: 0.0451576\ttest: 0.0508352\tbest: 0.0508352 (90)\ttotal: 491ms\tremaining: 48.6ms\n",
      "99:\tlearn: 0.0413990\ttest: 0.0471631\tbest: 0.0471631 (99)\ttotal: 535ms\tremaining: 0us\n",
      "\n",
      "bestTest = 0.04716311536\n",
      "bestIteration = 99\n",
      "\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6922.0    30.0    12.0    1.0  0.0\n",
      "1.0    61.0  4740.0    11.0    2.0  0.0\n",
      "2.0     0.0     4.0  1248.0   11.0  0.0\n",
      "3.0     0.0     0.0     6.0  311.0  0.0\n",
      "4.0     0.0     0.0     2.0    2.0  4.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9893768235206104\n",
      "Precision total:  0.9821950479701111\n",
      "Recall total:  0.8895301025433101\n",
      "F1 total:  0.919137768468096\n",
      "BACC total:  0.8895301025433101\n",
      "MCC total:  0.9819916806065055\n"
     ]
    }
   ],
   "source": [
    "import catboost\n",
    "start = time.time()\n",
    "\n",
    "cat_01 = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "\n",
    "# Fit the model\n",
    "cat_01.fit(X_train_01, y_train_01, eval_set=(X_test_01, y_test_01), verbose=10)\n",
    "\n",
    "# Make predictions on the test set\n",
    "preds_cat = cat_01.predict(X_test_01)\n",
    "preds_cat = np.squeeze(preds_cat)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('catboost', file = f)\n",
    "\n",
    "\n",
    "pred_label = preds_cat\n",
    "name='cat'\n",
    "metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_01\"] = Acc\n",
    "globals()[f\"{name}_pre_01\"] = Precision\n",
    "globals()[f\"{name}_rec_01\"] = Recall\n",
    "globals()[f\"{name}_f1_01\"] = F1\n",
    "globals()[f\"{name}_bacc_01\"] = BACC\n",
    "globals()[f\"{name}_mcc_01\"] = MCC\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### XGB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 141,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "        0.0     1.0     2.0    3.0  4.0\n",
      "0.0  6920.0    32.0    10.0    3.0  0.0\n",
      "1.0    58.0  4734.0    16.0    6.0  0.0\n",
      "2.0     1.0     6.0  1251.0    5.0  0.0\n",
      "3.0     0.0     1.0     6.0  310.0  0.0\n",
      "4.0     0.0     0.0     0.0    0.0  8.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9892272013166754\n",
      "Precision total:  0.9830447370622423\n",
      "Recall total:  0.9890675441381713\n",
      "F1 total:  0.986017126276327\n",
      "BACC total:  0.9890675441381713\n",
      "MCC total:  0.9817431178007439\n"
     ]
    }
   ],
   "source": [
    "\n",
    "import xgboost as xgb\n",
    "start = time.time()\n",
    "\n",
    "# Create a DMatrix for XGBoost\n",
    "dtrain = xgb.DMatrix(X_train_01, label=y_train_01)\n",
    "dtest = xgb.DMatrix(X_test_01, label=y_test_01)\n",
    "\n",
    "# Set XGBoost parameters\n",
    "params = {\n",
    "    'objective': 'multi:softmax',  # for multi-class classification\n",
    "    'num_class': 5,  # specify the number of classes\n",
    "    'max_depth': 3,\n",
    "    'learning_rate': 0.1,\n",
    "    'eval_metric': 'mlogloss'  # metric for multi-class classification\n",
    "}\n",
    "\n",
    "# Train the XGBoost model\n",
    "num_round = 100\n",
    "xgb_01 = xgb.train(params, dtrain, num_round)\n",
    "\n",
    "# Make predictions on the test set\n",
    "preds_xgb_01 = xgb_01.predict(dtest)\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('xgboost base model', file = f)\n",
    "\n",
    "    pred_label = preds_xgb_01\n",
    "    name='xgb'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test_01)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "\n",
    "    globals()[f\"{name}_acc_01\"] = Acc\n",
    "    globals()[f\"{name}_pre_01\"] = Precision\n",
    "    globals()[f\"{name}_rec_01\"] = Recall\n",
    "    globals()[f\"{name}_f1_01\"] = F1\n",
    "    globals()[f\"{name}_bacc_01\"] = BACC\n",
    "    globals()[f\"{name}_mcc_01\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_01\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    " ### Generating Summary Metric Table"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 142,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------------+----------+----------+----------+----------+\n",
      "| Models      |   ACC-01 |   PRE-01 |   REC-01 |    F1-01 |\n",
      "+=============+==========+==========+==========+==========+\n",
      "| XGB         | 0.989227 | 0.983045 | 0.989068 | 0.986017 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_DT      | 0.990424 | 0.959331 | 0.938019 | 0.94796  |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_lgbm    | 0.992669 | 0.989975 | 0.914633 | 0.943642 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| CAT         | 0.989377 | 0.982195 | 0.88953  | 0.919138 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| MLP         | 0.950251 | 0.888064 | 0.904417 | 0.895725 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_cat     | 0.989078 | 0.980595 | 0.864515 | 0.894048 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_mlp     | 0.949503 | 0.894255 | 0.847848 | 0.863505 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_comb    | 0.964165 | 0.971152 | 0.820679 | 0.850373 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| LGBM        | 0.971422 | 0.791761 | 0.834778 | 0.804645 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_knn     | 0.969851 | 0.74979  | 0.7689   | 0.758852 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| KNN         | 0.969702 | 0.752091 | 0.765326 | 0.75847  |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_rf      | 0.967981 | 0.735793 | 0.771289 | 0.751654 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| RF          | 0.960649 | 0.724185 | 0.758021 | 0.738835 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| LR          | 0.890626 | 0.65334  | 0.672655 | 0.661786 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_LR      | 0.890402 | 0.65375  | 0.672334 | 0.66151  |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_svm     | 0.858457 | 0.616948 | 0.636466 | 0.620538 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| SVM         | 0.858009 | 0.61793  | 0.626239 | 0.614538 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| DNN         | 0.871026 | 0.501333 | 0.529738 | 0.514412 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| ADA         | 0.536321 | 0.503739 | 0.386649 | 0.363708 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| Bag_ada     | 0.368669 | 0.492972 | 0.336767 | 0.298932 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| avg         | 0.362385 | 0.141417 | 0.20072  | 0.108682 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| weighed_avg | 0.361188 | 0.138939 | 0.200222 | 0.107684 |\n",
      "+-------------+----------+----------+----------+----------+\n",
      "| VOTING      | 0        | 0        | 0        | 0        |\n",
      "+-------------+----------+----------+----------+----------+\n"
     ]
    }
   ],
   "source": [
    "from tabulate import tabulate\n",
    "\n",
    "# Assuming data is a 110x4 list, where each row is a sublist\n",
    "# data =  [[\"Row {} Col {}\".format(i + 1, j + 1) for j in range(4)] for i in range(110)]\n",
    "names_models = ['ADA',\n",
    "                'SVM',\n",
    "                'DNN',\n",
    "                'MLP',\n",
    "                'KNN',\n",
    "                'CAT',\n",
    "                'XGB',\n",
    "                'LGBM',\n",
    "                'RF',\n",
    "                'LR',\n",
    "                'VOTING',\n",
    "                'Bag_svm',\n",
    "                'Bag_knn',\n",
    "                'Bag_DT',\n",
    "                'Bag_LR',\n",
    "                'Bag_mlp',\n",
    "\n",
    "                'Bag_rf',\n",
    "                'Bag_ada',\n",
    "                'Bag_lgbm',\n",
    "                # 'Bag_xgb',\n",
    "                'Bag_cat',\n",
    "                'Bag_comb',\n",
    "                'avg',\n",
    "                'weighed_avg'\n",
    "                ]\n",
    "\n",
    "data = [[\"\" for _ in range(5)] for _ in range(len(names_models))]\n",
    "\n",
    "level_01_acc = [\n",
    "                ada_acc_01,\n",
    "                svm_acc_01,\n",
    "                dnn_acc_01,\n",
    "                mlp_acc_01,\n",
    "                knn_acc_01,\n",
    "                cat_acc_01,\n",
    "                xgb_acc_01,\n",
    "                lgbm_acc_01,\n",
    "                rf_acc_01,\n",
    "                lr_acc_01,\n",
    "                voting_acc_01,\n",
    "                bag_svm_acc_01,\n",
    "                bag_knn_acc_01,\n",
    "                bag_dt_acc_01,\n",
    "                bag_lr_acc_01,\n",
    "                bag_mlp_acc_01,\n",
    "\n",
    "                bag_rf_acc_01,\n",
    "                bag_ada_acc_01,\n",
    "                bag_lgbm_acc_01,\n",
    "                # bag_xgb_acc_01,\n",
    "                bag_cat_acc_01,\n",
    "                bag_comb_acc_01,\n",
    "\n",
    "                avg_acc_01,\n",
    "                weighed_avg_acc_01\n",
    "                ]  \n",
    "\n",
    "\n",
    "level_01_pre = [\n",
    "                ada_pre_01,\n",
    "                svm_pre_01,\n",
    "                dnn_pre_01,\n",
    "                mlp_pre_01,\n",
    "                knn_pre_01,\n",
    "                cat_pre_01,\n",
    "                xgb_pre_01,\n",
    "                lgbm_pre_01,\n",
    "                rf_pre_01,\n",
    "                lr_pre_01,\n",
    "                voting_pre_01,\n",
    "                bag_svm_pre_01,\n",
    "                bag_knn_pre_01,\n",
    "                bag_dt_pre_01,\n",
    "                bag_lr_pre_01,\n",
    "                bag_mlp_pre_01,\n",
    "\n",
    "                bag_rf_pre_01,\n",
    "                bag_ada_pre_01,\n",
    "                bag_lgbm_pre_01,\n",
    "                # bag_xgb_pre_01,\n",
    "                bag_cat_pre_01,\n",
    "                bag_comb_pre_01,\n",
    "\n",
    "                avg_pre_01,\n",
    "                weighed_avg_pre_01\n",
    "                ]  \n",
    "\n",
    "level_01_rec = [\n",
    "                ada_rec_01,\n",
    "                svm_rec_01,\n",
    "                dnn_rec_01,\n",
    "                mlp_rec_01,\n",
    "                knn_rec_01,\n",
    "                cat_rec_01,\n",
    "                xgb_rec_01,\n",
    "                lgbm_rec_01,\n",
    "                rf_rec_01,\n",
    "                lr_rec_01,\n",
    "                voting_rec_01,\n",
    "                bag_svm_rec_01,\n",
    "                bag_knn_rec_01,\n",
    "                bag_dt_rec_01,\n",
    "                bag_lr_rec_01,\n",
    "                bag_mlp_rec_01,\n",
    "\n",
    "                bag_rf_rec_01,\n",
    "                bag_ada_rec_01,\n",
    "                bag_lgbm_rec_01,\n",
    "                # bag_xgb_rec_01,\n",
    "                bag_cat_rec_01,\n",
    "                bag_comb_rec_01,\n",
    "\n",
    "                avg_rec_01,\n",
    "                weighed_avg_rec_01\n",
    "                ]  \n",
    "\n",
    "level_01_f1 = [\n",
    "                ada_f1_01,\n",
    "                svm_f1_01,\n",
    "                dnn_f1_01,\n",
    "                mlp_f1_01,\n",
    "                knn_f1_01,\n",
    "                cat_f1_01,\n",
    "                xgb_f1_01,\n",
    "                lgbm_f1_01,\n",
    "                rf_f1_01,\n",
    "                lr_f1_01,\n",
    "                voting_f1_01,\n",
    "                bag_svm_f1_01,\n",
    "                bag_knn_f1_01,\n",
    "                bag_dt_f1_01,\n",
    "                bag_lr_f1_01,\n",
    "                bag_mlp_f1_01,\n",
    "\n",
    "                bag_rf_f1_01,\n",
    "                bag_ada_f1_01,\n",
    "                bag_lgbm_f1_01,\n",
    "                # bag_xgb_f1_01,\n",
    "                bag_cat_f1_01,\n",
    "                bag_comb_f1_01,\n",
    "\n",
    "                avg_f1_01,\n",
    "                weighed_avg_f1_01\n",
    "                ]  \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# Combine data into a list of tuples for sorting\n",
    "model_data = list(zip(names_models, level_01_acc, level_01_pre, level_01_rec, level_01_f1))\n",
    "\n",
    "# Sort by F1-01 score in descending order\n",
    "model_data_sorted = sorted(model_data, key=lambda x: x[4], reverse=True)\n",
    "\n",
    "# Separate the sorted data back into individual lists\n",
    "sorted_names_models, sorted_level_01_acc, sorted_level_01_pre, sorted_level_01_rec, sorted_level_01_f1 = zip(*model_data_sorted)\n",
    "\n",
    "# Assign the sorted data to the table\n",
    "for i in range(len(sorted_names_models)):\n",
    "    data[i][0] = sorted_names_models[i]\n",
    "    data[i][1] = sorted_level_01_acc[i]\n",
    "    data[i][2] = sorted_level_01_pre[i] \n",
    "    data[i][3] = sorted_level_01_rec[i] \n",
    "    data[i][4] = sorted_level_01_f1[i]\n",
    "\n",
    "# Define column headers\n",
    "headers = [\"Models\", \"ACC-01\", \"PRE-01\", \"REC-01\", \"F1-01\"]\n",
    "\n",
    "# Print the table\n",
    "table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "with open(output_file_name, \"a\") as f: print('Summary table', file = f)\n",
    "if pick_prob == 1: \n",
    "    with open(output_file_name, \"a\") as f: print('Level 01 - Probabilities', file = f)\n",
    "else:\n",
    "    with open(output_file_name, \"a\") as f: print('Level 01 - CLASSES', file = f)\n",
    "if feature_selection_bit == 1: \n",
    "    with open(output_file_name, \"a\") as f: print('Feature Selection was applied', file = f)\n",
    "else:\n",
    "    with open(output_file_name, \"a\") as f: print('All features were used', file = f)\n",
    "\n",
    "\n",
    "    \n",
    "print(table)\n",
    "with open(output_file_name, \"a\") as f: print(table, file = f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 143,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------------+----------------+\n",
      "| Models      |   time-01(sec) |\n",
      "+=============+================+\n",
      "| weighed_avg |      0.0699325 |\n",
      "+-------------+----------------+\n",
      "| avg         |      0.086875  |\n",
      "+-------------+----------------+\n",
      "| SVM         |      0.276331  |\n",
      "+-------------+----------------+\n",
      "| RF          |      0.298863  |\n",
      "+-------------+----------------+\n",
      "| CAT         |      0.692509  |\n",
      "+-------------+----------------+\n",
      "| LR          |      1.08519   |\n",
      "+-------------+----------------+\n",
      "| Bag_DT      |      1.29227   |\n",
      "+-------------+----------------+\n",
      "| ADA         |      1.35954   |\n",
      "+-------------+----------------+\n",
      "| KNN         |      1.85234   |\n",
      "+-------------+----------------+\n",
      "| Bag_svm     |      2.35323   |\n",
      "+-------------+----------------+\n",
      "| Bag_rf      |      4.06792   |\n",
      "+-------------+----------------+\n",
      "| Bag_cat     |      7.26663   |\n",
      "+-------------+----------------+\n",
      "| Bag_ada     |      9.51046   |\n",
      "+-------------+----------------+\n",
      "| Bag_LR      |     12.7032    |\n",
      "+-------------+----------------+\n",
      "| Bag_knn     |     20.1781    |\n",
      "+-------------+----------------+\n",
      "| DNN         |     24.8225    |\n",
      "+-------------+----------------+\n",
      "| MLP         |     31.7943    |\n",
      "+-------------+----------------+\n",
      "| XGB         |     57.8082    |\n",
      "+-------------+----------------+\n",
      "| Bag_mlp     |    309.19      |\n",
      "+-------------+----------------+\n",
      "| LGBM        |    540.586     |\n",
      "+-------------+----------------+\n",
      "| Bag_comb    |    620.17      |\n",
      "+-------------+----------------+\n",
      "| Bag_lgbm    |   5100.11      |\n",
      "+-------------+----------------+\n",
      "| VOTING      |   9999         |\n",
      "+-------------+----------------+\n"
     ]
    }
   ],
   "source": [
    "# implement time table\n",
    "from tabulate import tabulate\n",
    "\n",
    "names_models = ['ADA',\n",
    "                'SVM',\n",
    "                'DNN',\n",
    "                'MLP',\n",
    "                'KNN',\n",
    "                'CAT',\n",
    "                'XGB',\n",
    "                'LGBM',\n",
    "                'RF',\n",
    "                'LR',\n",
    "                'VOTING',\n",
    "                'Bag_svm',\n",
    "                'Bag_knn',\n",
    "                'Bag_DT',\n",
    "                'Bag_LR',\n",
    "                'Bag_mlp',\n",
    "\n",
    "                'Bag_rf',\n",
    "                'Bag_ada',\n",
    "                'Bag_lgbm',\n",
    "                # 'Bag_xgb',\n",
    "                'Bag_cat',\n",
    "                'Bag_comb',\n",
    "                'avg',\n",
    "                'weighed_avg'\n",
    "                ]\n",
    "\n",
    "data = [[\"\" for _ in range(2)] for _ in range(len(names_models))]\n",
    "\n",
    "level_01_time = [\n",
    "                ada_time_01,\n",
    "                svm_time_01,\n",
    "                dnn_time_01,\n",
    "                mlp_time_01,\n",
    "                knn_time_01,\n",
    "                cat_time_01,\n",
    "                xgb_time_01,\n",
    "                lgbm_time_01,\n",
    "                rf_time_01,\n",
    "                lr_time_01,\n",
    "                voting_time_01,\n",
    "                bag_svm_time_01,\n",
    "                bag_knn_time_01,\n",
    "                bag_dt_time_01,\n",
    "                bag_lr_time_01,\n",
    "                bag_mlp_time_01,\n",
    "\n",
    "                bag_rf_time_01,\n",
    "                bag_ada_time_01,\n",
    "                bag_lgbm_time_01,\n",
    "                # bag_xgb_time_01,\n",
    "                bag_cat_time_01,\n",
    "                bag_comb_time_01,\n",
    "\n",
    "                avg_time_01,\n",
    "                weighed_avg_time_01\n",
    "                ]  \n",
    "\n",
    "\n",
    "# Combine data into a list of tuples for sorting\n",
    "model_data = list(zip(names_models, level_01_time))\n",
    "\n",
    "# Sort by F1-01 score in descending order\n",
    "model_data_sorted = sorted(model_data, key=lambda x: x[1], reverse=False)\n",
    "\n",
    "# Separate the sorted data back into individual lists\n",
    "sorted_names_models, sorted_level_01_time = zip(*model_data_sorted)\n",
    "\n",
    "# Assign the sorted data to the table\n",
    "for i in range(len(sorted_names_models)):\n",
    "    data[i][0] = sorted_names_models[i]\n",
    "    data[i][1] = sorted_level_01_time[i]\n",
    "\n",
    "# Define column headers\n",
    "headers = [\"Models\", \"time-01(sec)\"]\n",
    "\n",
    "\n",
    "# Print the table\n",
    "table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "with open(output_file_name, \"a\") as f: print('Time is counted is seconds', file = f)\n",
    "print(table)\n",
    "with open(output_file_name, \"a\") as f: print(table, file = f)\n",
    "end_program = time.time()\n",
    "time_program = end_program - start_program\n",
    "with open(output_file_name, \"a\") as f: print('Running time of entire program is:', time_program ,' seconds',file = f)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ------------------------------------------------------------------"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Feature Selection"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 144,
   "metadata": {},
   "outputs": [],
   "source": [
    "if generate_feature_importance == 1:\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating SHAP explanation')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "  with open(output_file_name, \"a\") as f:print('ADA FEATURE IMPORTANCE',file = f)\n",
    "\n",
    "      #START TIMER MODEL\n",
    "  start = time.time()\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating explainer')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "  test = X_test_01\n",
    "  train = X_train_01\n",
    "  # ## Summary Bar Plot Global\n",
    "  start_index = 0\n",
    "  end_index = 250\n",
    "  # test.pop('Label')\n",
    "  # test.pop('is_train')\n",
    "  # print(label2)\n",
    "\n",
    "\n",
    "  # models = [ada,dnn_01,clf,knn_clf_01,cat_01,xgb_01, rf, lgbm, mlp,logreg_01]\n",
    "  explainer = shap.KernelExplainer(ada.predict_proba, test[start_index:end_index])\n",
    "\n",
    "  shap_values = explainer.shap_values(test[start_index:end_index])\n",
    "\n",
    "  shap.summary_plot(shap_values = shap_values,\n",
    "                    features = test[start_index:end_index],\n",
    "                    # class_names=[column_features[:-1]],\n",
    "                    show=False)\n",
    "\n",
    "  # if feature_selection_bit == 1 # On\n",
    "  # pick_prob = 0 # set equal one to choose the dataset with probabilities, set to 0 to choose one with the classes.\n",
    "  if pick_prob == 1:\n",
    "    plt.savefig('ADA_SHAP_NSL_prob_01.png')\n",
    "  elif pick_prob == 0:\n",
    "    plt.savefig('ADA_SHAP_NSL_class_01.png')\n",
    "        \n",
    "  else: None\n",
    "  plt.clf()\n",
    "\n",
    "\n",
    "  vals= np.abs(shap_values).mean(1)\n",
    "  feature_importance = pd.DataFrame(list(zip(train.columns, sum(vals))), columns=['col_name','feature_importance_vals'])\n",
    "  feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)\n",
    "  feature_importance.head()\n",
    "  print(feature_importance.to_string())\n",
    "\n",
    "  with open(output_file_name, \"a\") as f:print('Feature Importance: ',feature_importance.to_string(),file = f)\n",
    "\n",
    "\n",
    "\n",
    "  end = time.time()\n",
    "  with open(output_file_name, \"a\") as f:print('ELAPSE TIME LIME GLOBAL: ',(end - start)/60, 'min',file = f)\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  # feature_importance_vals = 'feature_importance_vals'  # Replace with the name of the column you want to extract\n",
    "  feature_val = feature_importance['feature_importance_vals'].tolist()\n",
    "\n",
    "  # col_name = 'col_name'  # Replace with the name of the column you want to extract\n",
    "  feature_name = feature_importance['col_name'].tolist()\n",
    "\n",
    "\n",
    "  # for item1, item2 in zip(feature_name, feature_val):\n",
    "  #     print(item1, item2)\n",
    "\n",
    "\n",
    "  # Use zip to combine the two lists, sort based on list1, and then unzip them\n",
    "  zipped_lists = list(zip(feature_name, feature_val))\n",
    "  zipped_lists.sort(key=lambda x: x[1],reverse=True)\n",
    "\n",
    "  # Convert the sorted result back into separate lists\n",
    "  sorted_list1, sorted_list2 = [list(x) for x in zip(*zipped_lists)]\n",
    "\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f: print(\"df.pop('\",k,\"')\", sep='', file = f)\n",
    "\n",
    "  # with open(output_file_name, \"a\") as f:print(\"Trial_ =[\", file = f)\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f:print(\"'\",k,\"',\", sep='', file = f)\n",
    "  # with open(output_file_name, \"a\") as f:print(\"]\", file = f)\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 145,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# explainer = shap.TreeExplainer(model)\n",
    "# start_index = 0\n",
    "# end_index = samples\n",
    "# shap_values = explainer.shap_values(test[start_index:end_index])\n",
    "# shap_obj = explainer(test[start_index:end_index])\n",
    "# shap.summary_plot(shap_values = shap_values,\n",
    "#                   features = test[start_index:end_index],\n",
    "#                 show=False)\n",
    "# plt.savefig('Light_SHAP_CIC_Summary.png')\n",
    "# plt.clf()\n",
    "\n",
    "\n",
    "# vals= np.abs(shap_values).mean(1)\n",
    "# feature_importance = pd.DataFrame(list(zip(train.columns, sum(vals))), columns=['col_name','feature_importance_vals'])\n",
    "# feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)\n",
    "# feature_importance.head()\n",
    "# print(feature_importance.to_string())\n",
    "# print('---------------------------------------------------------------------------------')\n",
    "# # feature_importance_vals = 'feature_importance_vals'  # Replace with the name of the column you want to extract\n",
    "# feature_val = feature_importance['feature_importance_vals'].tolist()\n",
    "\n",
    "# # col_name = 'col_name'  # Replace with the name of the column you want to extract\n",
    "# feature_name = feature_importance['col_name'].tolist()\n",
    "\n",
    "\n",
    "# # for item1, item2 in zip(feature_name, feature_val):\n",
    "# #     print(item1, item2)\n",
    "\n",
    "\n",
    "# # Use zip to combine the two lists, sort based on list1, and then unzip them\n",
    "# zipped_lists = list(zip(feature_name, feature_val))\n",
    "# zipped_lists.sort(key=lambda x: x[1],reverse=True)\n",
    "\n",
    "# # Convert the sorted result back into separate lists\n",
    "# sorted_list1, sorted_list2 = [list(x) for x in zip(*zipped_lists)]\n",
    "\n",
    "# for k in sorted_list1:\n",
    "#   with open(output_file_name, \"a\") as f:print(\"df.pop('\",k,\"')\", sep='', file = f)\n",
    "\n",
    "# # with open(output_file_name, \"a\") as f:print(\"Trial_ =[\", file = f)\n",
    "# for k in sorted_list1:\n",
    "#   with open(output_file_name, \"a\") as f:print(\"'\",k,\"',\", sep='', file = f)\n",
    "# with open(output_file_name, \"a\") as f:print(\"]\", file = f)\n",
    "# print('---------------------------------------------------------------------------------')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 146,
   "metadata": {},
   "outputs": [],
   "source": [
    "if generate_feature_importance == 1:\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating SHAP explanation')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "\n",
    "  with open(output_file_name, \"a\") as f:print('XGB FEATURE IMPORTANCE',file = f)\n",
    "\n",
    "      #START TIMER MODEL\n",
    "  start = time.time()\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating explainer')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "  test = X_test_01\n",
    "  train = X_train_01\n",
    "  # ## Summary Bar Plot Global\n",
    "  start_index = 0\n",
    "  end_index = 250\n",
    "  # test.pop('Label')\n",
    "  # test.pop('is_train')\n",
    "  # print(label2)\n",
    "\n",
    "\n",
    "  explainer = shap.TreeExplainer(xgb_01)\n",
    "\n",
    "  shap_values = explainer.shap_values(test[start_index:end_index])\n",
    "  shap_obj = explainer(test[start_index:end_index])\n",
    "  shap.summary_plot(shap_values = shap_values,\n",
    "                    features = test[start_index:end_index],\n",
    "                  show=False)\n",
    "  # plt.clf()\n",
    "\n",
    "  # if feature_selection_bit == 1 # On\n",
    "  # pick_prob = 0 # set equal one to choose the dataset with probabilities, set to 0 to choose one with the classes.\n",
    "  if pick_prob == 1:\n",
    "    plt.savefig('XGB_SHAP_NSL_prob_01.png')\n",
    "  elif pick_prob == 0:\n",
    "    plt.savefig('XGB_SHAP_NSL_class_01.png')\n",
    "\n",
    "  else: None\n",
    "  plt.clf()\n",
    "\n",
    "\n",
    "  vals= np.abs(shap_values).mean(1)\n",
    "  feature_importance = pd.DataFrame(list(zip(train.columns, sum(vals))), columns=['col_name','feature_importance_vals'])\n",
    "  feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)\n",
    "  feature_importance.head()\n",
    "  print(feature_importance.to_string())\n",
    "  with open(output_file_name, \"a\") as f:print('Feature Importance: ',feature_importance.to_string(),file = f)\n",
    "\n",
    "\n",
    "\n",
    "  end = time.time()\n",
    "  with open(output_file_name, \"a\") as f:print('ELAPSE TIME LIME GLOBAL: ',(end - start)/60, 'min',file = f)\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  # feature_importance_vals = 'feature_importance_vals'  # Replace with the name of the column you want to extract\n",
    "  feature_val = feature_importance['feature_importance_vals'].tolist()\n",
    "\n",
    "  # col_name = 'col_name'  # Replace with the name of the column you want to extract\n",
    "  feature_name = feature_importance['col_name'].tolist()\n",
    "\n",
    "\n",
    "  # for item1, item2 in zip(feature_name, feature_val):\n",
    "  #     print(item1, item2)\n",
    "\n",
    "\n",
    "  # Use zip to combine the two lists, sort based on list1, and then unzip them\n",
    "  zipped_lists = list(zip(feature_name, feature_val))\n",
    "  zipped_lists.sort(key=lambda x: x[1],reverse=True)\n",
    "\n",
    "  # Convert the sorted result back into separate lists\n",
    "  sorted_list1, sorted_list2 = [list(x) for x in zip(*zipped_lists)]\n",
    "\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f: print(\"df.pop('\",k,\"')\", sep='', file = f)\n",
    "\n",
    "  # with open(output_file_name, \"a\") as f:print(\"Trial_ =[\", file = f)\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f:print(\"'\",k,\"',\", sep='', file = f)\n",
    "  # with open(output_file_name, \"a\") as f:print(\"]\", file = f)\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 147,
   "metadata": {},
   "outputs": [],
   "source": [
    "if generate_feature_importance == 1:\n",
    "\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating SHAP explanation')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "\n",
    "  with open(output_file_name, \"a\") as f:print('LGBM FEATURE IMPORTANCE',file = f)\n",
    "\n",
    "      #START TIMER MODEL\n",
    "  start = time.time()\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating explainer')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "  test = X_test_01\n",
    "  train = X_train_01\n",
    "  # ## Summary Bar Plot Global\n",
    "  start_index = 0\n",
    "  end_index = 250\n",
    "  # test.pop('Label')\n",
    "  # test.pop('is_train')\n",
    "  # print(label2)\n",
    "\n",
    "\n",
    "  explainer = shap.TreeExplainer(lgbm)\n",
    "\n",
    "  shap_values = explainer.shap_values(test[start_index:end_index])\n",
    "  shap_obj = explainer(test[start_index:end_index])\n",
    "  shap.summary_plot(shap_values = shap_values,\n",
    "                    features = test[start_index:end_index],\n",
    "                  show=False)\n",
    "  # plt.clf()\n",
    "\n",
    "  # if feature_selection_bit == 1 # On\n",
    "  # pick_prob = 0 # set equal one to choose the dataset with probabilities, set to 0 to choose one with the classes.\n",
    "  if pick_prob == 1:\n",
    "    plt.savefig('LGBM_SHAP_NSL_prob_01.png')\n",
    "  elif pick_prob == 0:\n",
    "    plt.savefig('LGBM_SHAP_NSL_class_01.png')\n",
    "\n",
    "  else: None\n",
    "  plt.clf()\n",
    "\n",
    "\n",
    "  vals= np.abs(shap_values).mean(1)\n",
    "  feature_importance = pd.DataFrame(list(zip(train.columns, sum(vals))), columns=['col_name','feature_importance_vals'])\n",
    "  feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)\n",
    "  feature_importance.head()\n",
    "  print(feature_importance.to_string())\n",
    "  with open(output_file_name, \"a\") as f:print('Feature Importance: ',feature_importance.to_string(),file = f)\n",
    "\n",
    "\n",
    "\n",
    "  end = time.time()\n",
    "  with open(output_file_name, \"a\") as f:print('ELAPSE TIME LIME GLOBAL: ',(end - start)/60, 'min',file = f)\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  # feature_importance_vals = 'feature_importance_vals'  # Replace with the name of the column you want to extract\n",
    "  feature_val = feature_importance['feature_importance_vals'].tolist()\n",
    "\n",
    "  # col_name = 'col_name'  # Replace with the name of the column you want to extract\n",
    "  feature_name = feature_importance['col_name'].tolist()\n",
    "\n",
    "\n",
    "  # for item1, item2 in zip(feature_name, feature_val):\n",
    "  #     print(item1, item2)\n",
    "\n",
    "\n",
    "  # Use zip to combine the two lists, sort based on list1, and then unzip them\n",
    "  zipped_lists = list(zip(feature_name, feature_val))\n",
    "  zipped_lists.sort(key=lambda x: x[1],reverse=True)\n",
    "\n",
    "  # Convert the sorted result back into separate lists\n",
    "  sorted_list1, sorted_list2 = [list(x) for x in zip(*zipped_lists)]\n",
    "\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f: print(\"df.pop('\",k,\"')\", sep='', file = f)\n",
    "\n",
    "  # with open(output_file_name, \"a\") as f:print(\"Trial_ =[\", file = f)\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f:print(\"'\",k,\"',\", sep='', file = f)\n",
    "  # with open(output_file_name, \"a\") as f:print(\"]\", file = f)\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 148,
   "metadata": {},
   "outputs": [],
   "source": [
    "if generate_feature_importance == 1:\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating SHAP explanation')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "\n",
    "  with open(output_file_name, \"a\") as f:print('RF FEATURE IMPORTANCE',file = f)\n",
    "\n",
    "      #START TIMER MODEL\n",
    "  start = time.time()\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('Generating explainer')\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  print('')\n",
    "  test = X_test_01\n",
    "  train = X_train_01\n",
    "  # ## Summary Bar Plot Global\n",
    "  start_index = 0\n",
    "  end_index = 250\n",
    "  # test.pop('Label')\n",
    "  # test.pop('is_train')\n",
    "  # print(label2)\n",
    "\n",
    "\n",
    "  explainer = shap.TreeExplainer(rf)\n",
    "\n",
    "  shap_values = explainer.shap_values(test[start_index:end_index])\n",
    "  shap_obj = explainer(test[start_index:end_index])\n",
    "  shap.summary_plot(shap_values = shap_values,\n",
    "                    features = test[start_index:end_index],\n",
    "                  show=False)\n",
    "  # plt.clf()\n",
    "\n",
    "  # if feature_selection_bit == 1 # On\n",
    "  # pick_prob = 0 # set equal one to choose the dataset with probabilities, set to 0 to choose one with the classes.\n",
    "  if pick_prob == 1:\n",
    "    plt.savefig('RF_SHAP_NSL_prob_01.png')\n",
    "  elif pick_prob == 0:\n",
    "    plt.savefig('RF_SHAP_NSL_class_01.png')\n",
    "\n",
    "  else: None\n",
    "  plt.clf()\n",
    "\n",
    "\n",
    "  vals= np.abs(shap_values).mean(1)\n",
    "  feature_importance = pd.DataFrame(list(zip(train.columns, sum(vals))), columns=['col_name','feature_importance_vals'])\n",
    "  feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)\n",
    "  feature_importance.head()\n",
    "  print(feature_importance.to_string())\n",
    "  with open(output_file_name, \"a\") as f:print('Feature Importance: ',feature_importance.to_string(),file = f)\n",
    "\n",
    "\n",
    "\n",
    "  end = time.time()\n",
    "  with open(output_file_name, \"a\") as f:print('ELAPSE TIME LIME GLOBAL: ',(end - start)/60, 'min',file = f)\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "  # feature_importance_vals = 'feature_importance_vals'  # Replace with the name of the column you want to extract\n",
    "  feature_val = feature_importance['feature_importance_vals'].tolist()\n",
    "\n",
    "  # col_name = 'col_name'  # Replace with the name of the column you want to extract\n",
    "  feature_name = feature_importance['col_name'].tolist()\n",
    "\n",
    "\n",
    "  # for item1, item2 in zip(feature_name, feature_val):\n",
    "  #     print(item1, item2)\n",
    "\n",
    "\n",
    "  # Use zip to combine the two lists, sort based on list1, and then unzip them\n",
    "  zipped_lists = list(zip(feature_name, feature_val))\n",
    "  zipped_lists.sort(key=lambda x: x[1],reverse=True)\n",
    "\n",
    "  # Convert the sorted result back into separate lists\n",
    "  sorted_list1, sorted_list2 = [list(x) for x in zip(*zipped_lists)]\n",
    "\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f: print(\"df.pop('\",k,\"')\", sep='', file = f)\n",
    "\n",
    "  # with open(output_file_name, \"a\") as f:print(\"Trial_ =[\", file = f)\n",
    "  # for k in sorted_list1:\n",
    "  #   with open(output_file_name, \"a\") as f:print(\"'\",k,\"',\", sep='', file = f)\n",
    "  # with open(output_file_name, \"a\") as f:print(\"]\", file = f)\n",
    "\n",
    "  print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
