{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "# First ensemble with NSL-KDD\n",
    "# Parameters\n",
    "\n",
    "#----------------------------------------------\n",
    "# 0 for not using it as base learner\n",
    "# 1 for using it as base learner\n",
    "\n",
    "use_model_ada = 1 \n",
    "use_model_dnn = 1 \n",
    "use_model_mlp = 1 \n",
    "use_model_lgbm = 1 \n",
    "use_model_rf = 1 \n",
    "use_model_svm = 1\n",
    "use_model_knn = 1 \n",
    "#----------------------------------------------\n",
    "# 0 for training the model\n",
    "# 1 for using the saved version of the model\n",
    "\n",
    "load_model_ada = 0 \n",
    "load_model_dnn = 0 \n",
    "load_model_mlp = 0 \n",
    "load_model_lgbm = 0 \n",
    "load_model_rf = 0 \n",
    "load_model_svm = 0\n",
    "load_model_knn = 0 \n",
    "#----------------------------------------------\n",
    "\n",
    "# load_model_ada = 1\n",
    "# load_model_dnn = 1 \n",
    "# load_model_mlp = 1 \n",
    "# load_model_lgbm = 1 \n",
    "# load_model_rf = 1 \n",
    "# load_model_svm = 1\n",
    "# load_model_knn = 1 \n",
    "#----------------------------------------------\n",
    "feature_selection_bit = 0\n",
    "# feature_selection_bit = 1\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Specify the name of the output text file\n",
    "if feature_selection_bit == 0:\n",
    "    output_file_name = \"ensemble_base_models_all_features.txt\"\n",
    "    with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('---- ensemble_base_models_all_features', file = f)\n",
    "\n",
    "elif feature_selection_bit == 1:\n",
    "    output_file_name = \"ensemble_base_models_feature_selection.txt\"\n",
    "    with open(output_file_name, \"w\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('----ensemble_base_models_feature_selection--', file = f)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python       \n",
    "# coding: utf-8\n",
    "\n",
    "# In[1]:\n",
    "# importing required libraries\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pickle # saving and loading trained model\n",
    "from os import path\n",
    "\n",
    "\n",
    "# importing required libraries for normalizing data\n",
    "from sklearn import preprocessing\n",
    "from sklearn.preprocessing import (StandardScaler, OrdinalEncoder,LabelEncoder, MinMaxScaler, OneHotEncoder)\n",
    "from sklearn.preprocessing import Normalizer, MaxAbsScaler , RobustScaler, PowerTransformer\n",
    "\n",
    "# importing library for plotting\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import accuracy_score # for calculating accuracy of model\n",
    "from sklearn.model_selection import train_test_split # for splitting the dataset for training and testing\n",
    "from sklearn.metrics import classification_report # for generating a classification report of model\n",
    "\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "\n",
    "from keras.layers import Dense # importing dense layer\n",
    "\n",
    "from keras.layers import Input\n",
    "from keras.models import Model\n",
    "# representation of model layers\n",
    "#from keras.utils import plot_model\n",
    "from sklearn.metrics import confusion_matrix\n",
    "import shap\n",
    "\n",
    "from sklearn.metrics import balanced_accuracy_score\n",
    "from sklearn.metrics import matthews_corrcoef\n",
    "from sklearn.metrics import roc_auc_score\n",
    "import time\n",
    "start_program = time.time()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def confusion_metrics (name_model,predictions,true_labels,time_taken):\n",
    "\n",
    "    name = name_model\n",
    "    pred_label = predictions\n",
    "    y_test_01 = true_labels \n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print(name, file = f)\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('CONFUSION MATRIX')\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "    # pred_label = label[ypred]\n",
    "\n",
    "    confusion_matrix = pd.crosstab(y_test_01, pred_label,rownames=['Actual ALERT'],colnames = ['Predicted ALERT'], dropna=False).sort_index(axis=0).sort_index(axis=1)\n",
    "    all_unique_values = sorted(set(pred_label) | set(y_test_01))\n",
    "    z = np.zeros((len(all_unique_values), len(all_unique_values)))\n",
    "    rows, cols = confusion_matrix.shape\n",
    "    z[:rows, :cols] = confusion_matrix\n",
    "    confusion_matrix  = pd.DataFrame(z, columns=all_unique_values, index=all_unique_values)\n",
    "    # confusion_matrix.to_csv('Ensemble_conf_matrix.csv')\n",
    "    # with open(output_file_name, \"a\") as f:print(confusion_matrix,file=f)\n",
    "    print(confusion_matrix)\n",
    "    with open(output_file_name, \"a\") as f: print('Confusion Matrix', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print(confusion_matrix, file = f)\n",
    "\n",
    "\n",
    "    FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)\n",
    "    FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)\n",
    "    TP = np.diag(confusion_matrix)\n",
    "    TN = confusion_matrix.values.sum() - (FP + FN + TP)\n",
    "    TP_total = sum(TP)\n",
    "    TN_total = sum(TN)\n",
    "    FP_total = sum(FP)\n",
    "    FN_total = sum(FN)\n",
    "\n",
    "    TP_total = np.array(TP_total,dtype=np.float64)\n",
    "    TN_total = np.array(TN_total,dtype=np.float64)\n",
    "    FP_total = np.array(FP_total,dtype=np.float64)\n",
    "    FN_total = np.array(FN_total,dtype=np.float64)\n",
    "    FPR = FP / (FP + TN)\n",
    "    FPR = 100*(sum(FPR)/len(FPR))\n",
    "\n",
    "    #----------------------------------------------------------------#----------------------------------------------------------------\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('METRICS')\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "    Acc = accuracy_score(y_test_01, pred_label)\n",
    "    Precision = precision_score(y_test_01, pred_label, average='macro')\n",
    "    Recall = recall_score(y_test_01, pred_label, average='macro')\n",
    "    F1 =  f1_score(y_test_01, pred_label, average='macro')\n",
    "    BACC = balanced_accuracy_score(y_test_01, pred_label)\n",
    "    MCC = matthews_corrcoef(y_test_01, pred_label)\n",
    "\n",
    "\n",
    "    # voting_acc_01 = Acc\n",
    "    # voting_pre_01 = Precision\n",
    "    # weighed_avg_rec_01 = Recall\n",
    "    # weighed_avg_f1_01 = F1\n",
    "    # weighed_avg_bacc_01 = BACC\n",
    "    # weighed_avg_mcc_01 = MCC\n",
    "    # with open(output_file_name, \"a\") as f:print('Accuracy total: ', Acc,file=f)\n",
    "    print('Accuracy total: ', Acc)\n",
    "    print('Precision total: ', Precision )\n",
    "    print('Recall total: ', Recall )\n",
    "    print('F1 total: ', F1 )\n",
    "    print('BACC total: ', BACC)\n",
    "    print('MCC total: ', MCC)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Accuracy total: ', Acc, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Precision total: ', Precision, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Recall total: ', Recall , file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('F1 total: ', F1, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('BACC total: ', BACC , file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('MCC total: ', MCC, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Time Taken: ', time_taken, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('FPR: ', FPR, '%' ,file = f)\n",
    "\n",
    "    return Acc, Precision, Recall, F1, BACC, MCC, FPR\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "\n",
    "# In[3]:\n",
    "\n",
    "\n",
    "# attach the column names to the dataset\n",
    "feature=[\"duration\",\"protocol_type\",\"service\",\"flag\",\"src_bytes\",\"dst_bytes\",\"land\",\"wrong_fragment\",\"urgent\",\"hot\",\n",
    "          \"num_failed_logins\",\"logged_in\",\"num_compromised\",\"root_shell\",\"su_attempted\",\"num_root\",\"num_file_creations\",\"num_shells\",\n",
    "          \"num_access_files\",\"num_outbound_cmds\",\"is_host_login\",\"is_guest_login\",\"count\",\"srv_count\",\"serror_rate\",\"srv_serror_rate\",\n",
    "          \"rerror_rate\",\"srv_rerror_rate\",\"same_srv_rate\",\"diff_srv_rate\",\"srv_diff_host_rate\",\"dst_host_count\",\"dst_host_srv_count\", \n",
    "          \"dst_host_same_srv_rate\",\"dst_host_diff_srv_rate\",\"dst_host_same_src_port_rate\",\"dst_host_srv_diff_host_rate\",\"dst_host_serror_rate\",\n",
    "          \"dst_host_srv_serror_rate\",\"dst_host_rerror_rate\",\"dst_host_srv_rerror_rate\",\"label\",\"difficulty\"]\n",
    "# KDDTrain+_2.csv & KDDTest+_2.csv are the datafiles without the last column about the difficulty score\n",
    "# these have already been removed.\n",
    "\n",
    "train='KDDTrain+.txt'\n",
    "test='KDDTest+.txt'\n",
    "\n",
    "df=pd.read_csv(train,names=feature)\n",
    "df_test=pd.read_csv(test,names=feature)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dimensions of the Training set: (125973, 43)\n",
      "Dimensions of the Test set: (22544, 43)\n",
      "Label distribution Training set:\n",
      "normal             67343\n",
      "neptune            41214\n",
      "satan               3633\n",
      "ipsweep             3599\n",
      "portsweep           2931\n",
      "smurf               2646\n",
      "nmap                1493\n",
      "back                 956\n",
      "teardrop             892\n",
      "warezclient          890\n",
      "pod                  201\n",
      "guess_passwd          53\n",
      "buffer_overflow       30\n",
      "warezmaster           20\n",
      "land                  18\n",
      "imap                  11\n",
      "rootkit               10\n",
      "loadmodule             9\n",
      "ftp_write              8\n",
      "multihop               7\n",
      "phf                    4\n",
      "perl                   3\n",
      "spy                    2\n",
      "Name: label, dtype: int64\n",
      "\n",
      "Label distribution Test set:\n",
      "normal             9711\n",
      "neptune            4657\n",
      "guess_passwd       1231\n",
      "mscan               996\n",
      "warezmaster         944\n",
      "apache2             737\n",
      "satan               735\n",
      "processtable        685\n",
      "smurf               665\n",
      "back                359\n",
      "snmpguess           331\n",
      "saint               319\n",
      "mailbomb            293\n",
      "snmpgetattack       178\n",
      "portsweep           157\n",
      "ipsweep             141\n",
      "httptunnel          133\n",
      "nmap                 73\n",
      "pod                  41\n",
      "buffer_overflow      20\n",
      "multihop             18\n",
      "named                17\n",
      "ps                   15\n",
      "sendmail             14\n",
      "xterm                13\n",
      "rootkit              13\n",
      "teardrop             12\n",
      "xlock                 9\n",
      "land                  7\n",
      "xsnoop                4\n",
      "ftp_write             3\n",
      "perl                  2\n",
      "udpstorm              2\n",
      "sqlattack             2\n",
      "phf                   2\n",
      "loadmodule            2\n",
      "worm                  2\n",
      "imap                  1\n",
      "Name: label, dtype: int64\n",
      "Training set:\n",
      "Feature 'protocol_type' has 3 categories\n",
      "Feature 'service' has 70 categories\n",
      "Feature 'flag' has 11 categories\n",
      "Feature 'label' has 23 categories\n",
      "\n",
      "Distribution of categories in service:\n",
      "http        40338\n",
      "private     21853\n",
      "domain_u     9043\n",
      "smtp         7313\n",
      "ftp_data     6860\n",
      "Name: service, dtype: int64\n",
      "Test set:\n",
      "Feature 'protocol_type' has 3 categories\n",
      "Feature 'service' has 64 categories\n",
      "Feature 'flag' has 11 categories\n",
      "Feature 'label' has 38 categories\n",
      "['Protocol_type_icmp', 'Protocol_type_tcp', 'Protocol_type_udp', 'service_IRC', 'service_X11', 'service_Z39_50', 'service_aol', 'service_auth', 'service_bgp', 'service_courier', 'service_csnet_ns', 'service_ctf', 'service_daytime', 'service_discard', 'service_domain', 'service_domain_u', 'service_echo', 'service_eco_i', 'service_ecr_i', 'service_efs', 'service_exec', 'service_finger', 'service_ftp', 'service_ftp_data', 'service_gopher', 'service_harvest', 'service_hostnames', 'service_http', 'service_http_2784', 'service_http_443', 'service_http_8001', 'service_imap4', 'service_iso_tsap', 'service_klogin', 'service_kshell', 'service_ldap', 'service_link', 'service_login', 'service_mtp', 'service_name', 'service_netbios_dgm', 'service_netbios_ns', 'service_netbios_ssn', 'service_netstat', 'service_nnsp', 'service_nntp', 'service_ntp_u', 'service_other', 'service_pm_dump', 'service_pop_2', 'service_pop_3', 'service_printer', 'service_private', 'service_red_i', 'service_remote_job', 'service_rje', 'service_shell', 'service_smtp', 'service_sql_net', 'service_ssh', 'service_sunrpc', 'service_supdup', 'service_systat', 'service_telnet', 'service_tftp_u', 'service_tim_i', 'service_time', 'service_urh_i', 'service_urp_i', 'service_uucp', 'service_uucp_path', 'service_vmnet', 'service_whois', 'flag_OTH', 'flag_REJ', 'flag_RSTO', 'flag_RSTOS0', 'flag_RSTR', 'flag_S0', 'flag_S1', 'flag_S2', 'flag_S3', 'flag_SF', 'flag_SH']\n",
      "   protocol_type  service  flag\n",
      "0              1       20     9\n",
      "1              2       44     9\n",
      "2              1       49     5\n",
      "3              1       24     9\n",
      "4              1       24     9\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(125973, 123)\n",
      "(22544, 123)\n",
      "0    0\n",
      "1    0\n",
      "2    1\n",
      "3    0\n",
      "4    0\n",
      "Name: label, dtype: int64\n",
      "X_train has shape: (125973, 122) \n",
      "y_train has shape: (125973, 1)\n",
      "X_test has shape: (22544, 122) \n",
      "y_test has shape: (22544, 1)\n",
      "Counter({0: 67343, 1: 45927, 2: 11656, 3: 995, 4: 52})\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "# shape, this gives the dimensions of the dataset\n",
    "print('Dimensions of the Training set:',df.shape)\n",
    "print('Dimensions of the Test set:',df_test.shape)\n",
    "\n",
    "\n",
    "df.drop(['difficulty'],axis=1,inplace=True)\n",
    "df_test.drop(['difficulty'],axis=1,inplace=True)\n",
    "\n",
    "\n",
    "\n",
    "print('Label distribution Training set:')\n",
    "print(df['label'].value_counts())\n",
    "print()\n",
    "print('Label distribution Test set:')\n",
    "print(df_test['label'].value_counts())\n",
    "\n",
    "\n",
    "\n",
    "# colums that are categorical and not binary yet: protocol_type (column 2), service (column 3), flag (column 4).\n",
    "# explore categorical features\n",
    "print('Training set:')\n",
    "for col_name in df.columns:\n",
    "    if df[col_name].dtypes == 'object' :\n",
    "        unique_cat = len(df[col_name].unique())\n",
    "        print(\"Feature '{col_name}' has {unique_cat} categories\".format(col_name=col_name, unique_cat=unique_cat))\n",
    "\n",
    "#see how distributed the feature service is, it is evenly distributed and therefore we need to make dummies for all.\n",
    "print()\n",
    "print('Distribution of categories in service:')\n",
    "print(df['service'].value_counts().sort_values(ascending=False).head())\n",
    "\n",
    "\n",
    "\n",
    "# Test set\n",
    "print('Test set:')\n",
    "for col_name in df_test.columns:\n",
    "    if df_test[col_name].dtypes == 'object' :\n",
    "        unique_cat = len(df_test[col_name].unique())\n",
    "        print(\"Feature '{col_name}' has {unique_cat} categories\".format(col_name=col_name, unique_cat=unique_cat))\n",
    "\n",
    "\n",
    "from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n",
    "categorical_columns=['protocol_type', 'service', 'flag']\n",
    "# insert code to get a list of categorical columns into a variable, categorical_columns\n",
    "categorical_columns=['protocol_type', 'service', 'flag'] \n",
    " # Get the categorical values into a 2D numpy array\n",
    "df_categorical_values = df[categorical_columns]\n",
    "testdf_categorical_values = df_test[categorical_columns]\n",
    "df_categorical_values.head()\n",
    "\n",
    "\n",
    "# protocol type\n",
    "unique_protocol=sorted(df.protocol_type.unique())\n",
    "string1 = 'Protocol_type_'\n",
    "unique_protocol2=[string1 + x for x in unique_protocol]\n",
    "# service\n",
    "unique_service=sorted(df.service.unique())\n",
    "string2 = 'service_'\n",
    "unique_service2=[string2 + x for x in unique_service]\n",
    "# flag\n",
    "unique_flag=sorted(df.flag.unique())\n",
    "string3 = 'flag_'\n",
    "unique_flag2=[string3 + x for x in unique_flag]\n",
    "# put together\n",
    "dumcols=unique_protocol2 + unique_service2 + unique_flag2\n",
    "print(dumcols)\n",
    "\n",
    "#do same for test set\n",
    "unique_service_test=sorted(df_test.service.unique())\n",
    "unique_service2_test=[string2 + x for x in unique_service_test]\n",
    "testdumcols=unique_protocol2 + unique_service2_test + unique_flag2\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "df_categorical_values_enc=df_categorical_values.apply(LabelEncoder().fit_transform)\n",
    "print(df_categorical_values_enc.head())\n",
    "# test set\n",
    "testdf_categorical_values_enc=testdf_categorical_values.apply(LabelEncoder().fit_transform)\n",
    "\n",
    "\n",
    "\n",
    "enc = OneHotEncoder()\n",
    "df_categorical_values_encenc = enc.fit_transform(df_categorical_values_enc)\n",
    "df_cat_data = pd.DataFrame(df_categorical_values_encenc.toarray(),columns=dumcols)\n",
    "# test set\n",
    "testdf_categorical_values_encenc = enc.fit_transform(testdf_categorical_values_enc)\n",
    "testdf_cat_data = pd.DataFrame(testdf_categorical_values_encenc.toarray(),columns=testdumcols)\n",
    "\n",
    "df_cat_data.head()\n",
    "\n",
    "\n",
    "trainservice=df['service'].tolist()\n",
    "testservice= df_test['service'].tolist()\n",
    "difference=list(set(trainservice) - set(testservice))\n",
    "string = 'service_'\n",
    "difference=[string + x for x in difference]\n",
    "difference\n",
    "\n",
    "for col in difference:\n",
    "    testdf_cat_data[col] = 0\n",
    "\n",
    "testdf_cat_data.shape\n",
    "\n",
    "newdf=df.join(df_cat_data)\n",
    "newdf.drop('flag', axis=1, inplace=True)\n",
    "newdf.drop('protocol_type', axis=1, inplace=True)\n",
    "newdf.drop('service', axis=1, inplace=True)\n",
    "# test data\n",
    "newdf_test=df_test.join(testdf_cat_data)\n",
    "newdf_test.drop('flag', axis=1, inplace=True)\n",
    "newdf_test.drop('protocol_type', axis=1, inplace=True)\n",
    "newdf_test.drop('service', axis=1, inplace=True)\n",
    "print(newdf.shape)\n",
    "print(newdf_test.shape)\n",
    "\n",
    "\n",
    "# take label column\n",
    "labeldf=newdf['label']\n",
    "labeldf_test=newdf_test['label']\n",
    "# change the label column\n",
    "newlabeldf=labeldf.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,\n",
    "                           'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2\n",
    "                           ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,\n",
    "                           'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})\n",
    "newlabeldf_test=labeldf_test.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,\n",
    "                           'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2\n",
    "                           ,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,\n",
    "                           'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})\n",
    "# put the new label column back\n",
    "newdf['label'] = newlabeldf\n",
    "newdf_test['label'] = newlabeldf_test\n",
    "print(newdf['label'].head())\n",
    "\n",
    "\n",
    "# Specify your selected features. Note that you'll need to modify this list according to your final processed dataframe\n",
    "#Uncomment the below lines to use these top 20 features from shap analysis\n",
    "#selected_features = [\"root_shell\",\"service_telnet\",\"num_shells\",\"service_uucp\",\"dst_host_same_src_port_rate\"\n",
    "#                     ,\"dst_host_rerror_rate\",\"dst_host_srv_serror_rate\",\"dst_host_srv_count\",\"service_private\",\"logged_in\",\n",
    "#                    \"dst_host_serror_rate\",\"serror_rate\",\"srv_serror_rate\",\"flag_S0\",\"diff_srv_rate\",\"dst_host_srv_diff_host_rate\",\"num_file_creations\",\"flag_RSTR\"#,\"dst_host_same_srv_rate\",\"service_Idap\",\"label\"]\n",
    "                     \n",
    "\n",
    "# Select those features from your dataframe\n",
    "#newdf = newdf[selected_features]\n",
    "#newdf_test = newdf_test[selected_features]\n",
    "\n",
    "# Now your dataframe only contains your selected features.\n",
    "\n",
    "# creating a dataframe with multi-class labels (Dos,Probe,R2L,U2R,normal)\n",
    "multi_data = newdf.copy()\n",
    "multi_label = pd.DataFrame(multi_data.label)\n",
    "\n",
    "multi_data_test=newdf_test.copy()\n",
    "multi_label_test = pd.DataFrame(multi_data_test.label)\n",
    "\n",
    "\n",
    "# using standard scaler for normalizing\n",
    "std_scaler = StandardScaler()\n",
    "def standardization(df,col):\n",
    "    for i in col:\n",
    "        arr = df[i]\n",
    "        arr = np.array(arr)\n",
    "        df[i] = std_scaler.fit_transform(arr.reshape(len(arr),1))\n",
    "    return df\n",
    "\n",
    "numeric_col = multi_data.select_dtypes(include='number').columns\n",
    "data = standardization(multi_data,numeric_col)\n",
    "numeric_col_test = multi_data_test.select_dtypes(include='number').columns\n",
    "data_test = standardization(multi_data_test,numeric_col_test)\n",
    "\n",
    "# label encoding (0,1,2,3,4) multi-class labels (Dos,normal,Probe,R2L,U2R)\n",
    "le2 = preprocessing.LabelEncoder()\n",
    "le2_test = preprocessing.LabelEncoder()\n",
    "enc_label = multi_label.apply(le2.fit_transform)\n",
    "enc_label_test = multi_label_test.apply(le2_test.fit_transform)\n",
    "multi_data = multi_data.copy()\n",
    "multi_data_test = multi_data_test.copy()\n",
    "\n",
    "multi_data['intrusion'] = enc_label\n",
    "multi_data_test['intrusion'] = enc_label_test\n",
    "\n",
    "#y_mul = multi_data['intrusion']\n",
    "multi_data\n",
    "multi_data_test\n",
    "\n",
    "\n",
    "\n",
    "multi_data.drop(labels= [ 'label'], axis=1, inplace=True)\n",
    "multi_data\n",
    "multi_data_test.drop(labels= [ 'label'], axis=1, inplace=True)\n",
    "multi_data_test\n",
    "\n",
    "\n",
    "y_train_multi= multi_data[['intrusion']]\n",
    "X_train_multi= multi_data.drop(labels=['intrusion'], axis=1)\n",
    "\n",
    "print('X_train has shape:',X_train_multi.shape,'\\ny_train has shape:',y_train_multi.shape)\n",
    "\n",
    "y_test_multi= multi_data_test[['intrusion']]\n",
    "X_test_multi= multi_data_test.drop(labels=['intrusion'], axis=1)\n",
    "\n",
    "print('X_test has shape:',X_test_multi.shape,'\\ny_test has shape:',y_test_multi.shape)\n",
    "\n",
    "\n",
    "from collections import Counter\n",
    "\n",
    "label_counts = Counter(y_train_multi['intrusion'])\n",
    "print(label_counts)\n",
    "\n",
    "\n",
    "from sklearn.preprocessing import LabelBinarizer\n",
    "\n",
    "y_train_multi = LabelBinarizer().fit_transform(y_train_multi)\n",
    "\n",
    "y_test_multi = LabelBinarizer().fit_transform(y_test_multi)\n",
    "\n",
    "\n",
    "Y_train=y_train_multi.copy()\n",
    "X_train=X_train_multi.copy()\n",
    "\n",
    "Y_test=y_test_multi.copy()\n",
    "X_test=X_test_multi.copy()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0 1 0 0 0]\n",
      " [0 1 0 0 0]\n",
      " [1 0 0 0 0]\n",
      " ...\n",
      " [0 1 0 0 0]\n",
      " [1 0 0 0 0]\n",
      " [0 0 1 0 0]]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[-1.10249223e-01, -7.67859947e-03, -4.91864438e-03, ...,\n",
       "        -1.97262160e-02,  8.25150071e-01, -4.64315895e-02],\n",
       "       [-1.10249223e-01, -7.73736981e-03, -4.91864438e-03, ...,\n",
       "        -1.97262160e-02,  8.25150071e-01, -4.64315895e-02],\n",
       "       [-1.10249223e-01, -7.76224074e-03, -4.91864438e-03, ...,\n",
       "        -1.97262160e-02, -1.21190076e+00, -4.64315895e-02],\n",
       "       ...,\n",
       "       [-9.29714678e-02, -7.36430591e-03, -3.87394518e-03, ...,\n",
       "        -1.97262160e-02,  8.25150071e-01, -4.64315895e-02],\n",
       "       [-8.68282658e-02, -7.36430591e-03, -3.87568593e-03, ...,\n",
       "        -1.97262160e-02,  8.25150071e-01, -4.64315895e-02],\n",
       "       [ 1.61587463e-01, -7.46804833e-03,  1.06953862e-03, ...,\n",
       "        -1.97262160e-02,  8.25150071e-01, -4.64315895e-02]])"
      ]
     },
     "execution_count": 74,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from imblearn.over_sampling import RandomOverSampler\n",
    "from sklearn.datasets import make_classification\n",
    "\n",
    "# Assuming you have features X and labels Y\n",
    "# X, Y = make_classification()\n",
    "\n",
    "ros = RandomOverSampler(sampling_strategy='minority', random_state=100)\n",
    "\n",
    "X_train, Y_train = ros.fit_resample(X_train, Y_train)\n",
    "\n",
    "\n",
    "# In[33]:\n",
    "\n",
    "\n",
    "print(Y_test)\n",
    "\n",
    "\n",
    "# In[34]:\n",
    "\n",
    "\n",
    "X_train.values\n",
    "\n",
    "\n",
    "# In[35]:\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "single_class_train = np.argmax(y_train_multi, axis=1)\n",
    "single_class_test = np.argmax(y_test_multi, axis=1)\n",
    "\n",
    "\n",
    "df1 = X_train_multi.assign(Label = single_class_train)\n",
    "df2 =  X_test_multi.assign(Label = single_class_test)\n",
    "\n",
    "frames = [df1,  df2]\n",
    "\n",
    "df = pd.concat(frames,ignore_index=True)\n",
    "df_fs = df\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [],
   "source": [
    "y = df.pop('Label')\n",
    "X = df\n",
    "df = X.assign(Label = y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "# y = df_fs.pop('Label')\n",
    "# X = df_fs\n",
    "# df_fs = X.assign(Label = y)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Feature Selection Methods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from sklearn.tree import DecisionTreeClassifier\n",
    "# from sklearn.feature_selection import mutual_info_classif\n",
    "\n",
    "# # Split the dataset into training and testing sets\n",
    "# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# # Train a decision tree classifier\n",
    "# clf = DecisionTreeClassifier(random_state=42)\n",
    "# clf.fit(X_train, y_train)\n",
    "\n",
    "# # Compute information gain using mutual information\n",
    "# info_gain = mutual_info_classif(X_train, y_train)\n",
    "\n",
    "# # Display information gain for each feature\n",
    "# for feature, gain in zip(X_train.columns, info_gain):\n",
    "#     print(f'Information Gain for {feature}: {gain}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "if feature_selection_bit == 2:\n",
    "\n",
    "    from sklearn.feature_selection import mutual_info_classif\n",
    "    %matplotlib inline\n",
    "\n",
    "    # Compute information gain using mutual information\n",
    "    importances = mutual_info_classif(X, y)\n",
    "\n",
    "    feat_importances = pd.Series(importances, df.columns[0:len(df.columns)-1])\n",
    "    # feat_importances.plot(kind='barh', color = 'teal')\n",
    "        \n",
    "    feat_importances_sorted = feat_importances.sort_values( ascending=False)\n",
    "\n",
    "    # Print or use the sorted DataFrame\n",
    "    print(feat_importances_sorted)\n",
    "    # feat_importances_sorted.plot(kind='barh', color = 'teal')\n",
    "    # feat_importances_sorted\n",
    "    top_features = feat_importances_sorted.nlargest(10)\n",
    "    top_feature_names = top_features.index.tolist()\n",
    "\n",
    "    print(\"Top 10 feature names:\")\n",
    "    print(top_feature_names)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "# feat_importances"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# feat_importances_sorted = feat_importances.sort_values( ascending=False)\n",
    "\n",
    "# # Print or use the sorted DataFrame\n",
    "# print(feat_importances_sorted)\n",
    "# # feat_importances_sorted.plot(kind='barh', color = 'teal')\n",
    "# # feat_importances_sorted\n",
    "# top_features = feat_importances_sorted.nlargest(10)\n",
    "# top_feature_names = top_features.index.tolist()\n",
    "\n",
    "# print(\"Top 10 feature names:\")\n",
    "# print(top_feature_names)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "# X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from skfeature.function.similarity_based import fisher_score\n",
    "# import matplotlib.pyplot as plt\n",
    "# %matplotlib inline \n",
    "\n",
    "# ranks = fisher_score.fisher_score(X,y)\n",
    "\n",
    "# feat_importances = pd.Series(ranks, dataframe.columns[0:len(dataframe.columns)-1])\n",
    "# feat_importances.plot(kind = 'barh',color = 'teal')\n",
    "# plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "# stop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "if feature_selection_bit == 1:\n",
    "    # USE XAI from last work\n",
    "    feature_selection = [\n",
    "                        'dst_host_same_srv_rate',\n",
    "                        'dst_host_srv_count',\n",
    "                        'dst_host_same_src_port_rate',\n",
    "                        'logged_in',\n",
    "                        'dst_host_serror_rate',\n",
    "                        'count',\n",
    "                        'srv_count',\n",
    "                        'dst_host_rerror_rate',\n",
    "                        'Label'\n",
    "                        ]\n",
    "    # Use information gain\n",
    "    # feature_selection = top_feature_names\n",
    "    \n",
    "\n",
    "    df_og = df\n",
    "    df = df[feature_selection]\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# y = df.pop('Label')\n",
    "# X = df\n",
    "\n",
    "y1, y2 = pd.factorize(y)\n",
    "\n",
    "y_0 = pd.DataFrame(y1)\n",
    "y_1 = pd.DataFrame(y1)\n",
    "y_2 = pd.DataFrame(y1)\n",
    "y_3 = pd.DataFrame(y1)\n",
    "y_4 = pd.DataFrame(y1)\n",
    "\n",
    "\n",
    "# y_0 = y_0.replace(0, 0)\n",
    "# y_0 = y_0.replace(1, 1)\n",
    "y_0 = y_0.replace(2, 1)\n",
    "y_0 = y_0.replace(3, 1)\n",
    "y_0 = y_0.replace(4, 1)\n",
    "\n",
    "\n",
    "y_1 = y_1.replace(1, 999)\n",
    "y_1 = y_1.replace(0, 1)\n",
    "# y_1 = y_1.replace(1, 0)\n",
    "y_1 = y_1.replace(2, 1)\n",
    "y_1 = y_1.replace(3, 1)\n",
    "y_1 = y_1.replace(4, 1)\n",
    "y_1 = y_1.replace(999, 1)\n",
    "\n",
    "\n",
    "y_2 = y_2.replace(0, 1)\n",
    "y_2 = y_2.replace(1, 1)\n",
    "y_2 = y_2.replace(2, 0)\n",
    "y_2 = y_2.replace(3, 1)\n",
    "y_2 = y_2.replace(4, 1)\n",
    "\n",
    "\n",
    "y_3 = y_3.replace(0, 1)\n",
    "# y_3 = y_3.replace(1, 1)\n",
    "y_3 = y_3.replace(2, 1)\n",
    "y_3 = y_3.replace(3, 0)\n",
    "y_3 = y_3.replace(4, 1)\n",
    "\n",
    "\n",
    "y_4 = y_4.replace(0, 1)\n",
    "# y_4 = y_4.replace(1, 1)\n",
    "y_4 = y_4.replace(2, 1)\n",
    "y_4 = y_4.replace(3, 1)\n",
    "y_4 = y_4.replace(4, 0)\n",
    "\n",
    "\n",
    "\n",
    "df = df.assign(Label = y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Divide the dataset between level 00 and level 01\n",
    "import sklearn\n",
    "from sklearn.model_selection import train_test_split\n",
    "split = 0.7 # 0.5\n",
    "\n",
    "# X_00,X_01, y_00, y_01 = sklearn.model_selection.train_test_split(X, y, train_size=split)\n",
    "X_train,X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, train_size=split)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Counter({0: 77054, 1: 53387, 2: 14077, 3: 3880, 4: 119})\n"
     ]
    }
   ],
   "source": [
    "from collections import Counter\n",
    "\n",
    "label_counts2 = Counter(y)\n",
    "print(label_counts2)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Base learner Split\n",
    "# split = 0.7\n",
    "\n",
    "# X_train,X_test, y_train, y_test = sklearn.model_selection.train_test_split(X_00, y_00, train_size=split)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>duration</th>\n",
       "      <th>src_bytes</th>\n",
       "      <th>dst_bytes</th>\n",
       "      <th>land</th>\n",
       "      <th>wrong_fragment</th>\n",
       "      <th>urgent</th>\n",
       "      <th>hot</th>\n",
       "      <th>num_failed_logins</th>\n",
       "      <th>logged_in</th>\n",
       "      <th>num_compromised</th>\n",
       "      <th>...</th>\n",
       "      <th>flag_REJ</th>\n",
       "      <th>flag_RSTO</th>\n",
       "      <th>flag_RSTOS0</th>\n",
       "      <th>flag_RSTR</th>\n",
       "      <th>flag_S0</th>\n",
       "      <th>flag_S1</th>\n",
       "      <th>flag_S2</th>\n",
       "      <th>flag_S3</th>\n",
       "      <th>flag_SF</th>\n",
       "      <th>flag_SH</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>140535</th>\n",
       "      <td>-0.155534</td>\n",
       "      <td>0.093373</td>\n",
       "      <td>0.294926</td>\n",
       "      <td>-0.017624</td>\n",
       "      <td>-0.059104</td>\n",
       "      <td>-0.019459</td>\n",
       "      <td>2.040705</td>\n",
       "      <td>-0.143999</td>\n",
       "      <td>1.123125</td>\n",
       "      <td>0.121069</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.453815</td>\n",
       "      <td>-0.18843</td>\n",
       "      <td>-0.009419</td>\n",
       "      <td>-0.174880</td>\n",
       "      <td>-0.313124</td>\n",
       "      <td>-0.030535</td>\n",
       "      <td>-0.025803</td>\n",
       "      <td>-0.105681</td>\n",
       "      <td>0.718027</td>\n",
       "      <td>-0.056997</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>70073</th>\n",
       "      <td>-0.110249</td>\n",
       "      <td>-0.007762</td>\n",
       "      <td>-0.004919</td>\n",
       "      <td>-0.014089</td>\n",
       "      <td>-0.089486</td>\n",
       "      <td>-0.007736</td>\n",
       "      <td>-0.095076</td>\n",
       "      <td>-0.027023</td>\n",
       "      <td>-0.809262</td>\n",
       "      <td>-0.011664</td>\n",
       "      <td>...</td>\n",
       "      <td>3.196020</td>\n",
       "      <td>-0.11205</td>\n",
       "      <td>-0.028606</td>\n",
       "      <td>-0.139982</td>\n",
       "      <td>-0.618438</td>\n",
       "      <td>-0.053906</td>\n",
       "      <td>-0.031767</td>\n",
       "      <td>-0.019726</td>\n",
       "      <td>-1.211901</td>\n",
       "      <td>-0.046432</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>144523</th>\n",
       "      <td>-0.152691</td>\n",
       "      <td>-0.021929</td>\n",
       "      <td>-0.092513</td>\n",
       "      <td>-0.017624</td>\n",
       "      <td>-0.059104</td>\n",
       "      <td>-0.019459</td>\n",
       "      <td>-0.113521</td>\n",
       "      <td>-0.143999</td>\n",
       "      <td>1.123125</td>\n",
       "      <td>-0.016494</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.453815</td>\n",
       "      <td>-0.18843</td>\n",
       "      <td>-0.009419</td>\n",
       "      <td>-0.174880</td>\n",
       "      <td>-0.313124</td>\n",
       "      <td>-0.030535</td>\n",
       "      <td>-0.025803</td>\n",
       "      <td>-0.105681</td>\n",
       "      <td>0.718027</td>\n",
       "      <td>-0.056997</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>98462</th>\n",
       "      <td>-0.110249</td>\n",
       "      <td>-0.007721</td>\n",
       "      <td>-0.004795</td>\n",
       "      <td>-0.014089</td>\n",
       "      <td>-0.089486</td>\n",
       "      <td>-0.007736</td>\n",
       "      <td>-0.095076</td>\n",
       "      <td>-0.027023</td>\n",
       "      <td>1.235694</td>\n",
       "      <td>-0.011664</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.312889</td>\n",
       "      <td>-0.11205</td>\n",
       "      <td>-0.028606</td>\n",
       "      <td>-0.139982</td>\n",
       "      <td>-0.618438</td>\n",
       "      <td>-0.053906</td>\n",
       "      <td>-0.031767</td>\n",
       "      <td>-0.019726</td>\n",
       "      <td>0.825150</td>\n",
       "      <td>-0.046432</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>133858</th>\n",
       "      <td>5.087705</td>\n",
       "      <td>-0.021988</td>\n",
       "      <td>-0.094822</td>\n",
       "      <td>-0.017624</td>\n",
       "      <td>-0.059104</td>\n",
       "      <td>-0.019459</td>\n",
       "      <td>-0.113521</td>\n",
       "      <td>-0.143999</td>\n",
       "      <td>-0.890373</td>\n",
       "      <td>-0.016494</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.453815</td>\n",
       "      <td>-0.18843</td>\n",
       "      <td>-0.009419</td>\n",
       "      <td>-0.174880</td>\n",
       "      <td>-0.313124</td>\n",
       "      <td>-0.030535</td>\n",
       "      <td>-0.025803</td>\n",
       "      <td>-0.105681</td>\n",
       "      <td>0.718027</td>\n",
       "      <td>-0.056997</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3677</th>\n",
       "      <td>-0.110249</td>\n",
       "      <td>-0.007755</td>\n",
       "      <td>-0.004900</td>\n",
       "      <td>-0.014089</td>\n",
       "      <td>-0.089486</td>\n",
       "      <td>-0.007736</td>\n",
       "      <td>-0.095076</td>\n",
       "      <td>-0.027023</td>\n",
       "      <td>-0.809262</td>\n",
       "      <td>-0.011664</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.312889</td>\n",
       "      <td>-0.11205</td>\n",
       "      <td>-0.028606</td>\n",
       "      <td>-0.139982</td>\n",
       "      <td>-0.618438</td>\n",
       "      <td>-0.053906</td>\n",
       "      <td>-0.031767</td>\n",
       "      <td>-0.019726</td>\n",
       "      <td>0.825150</td>\n",
       "      <td>-0.046432</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>141635</th>\n",
       "      <td>-0.155534</td>\n",
       "      <td>-0.021988</td>\n",
       "      <td>-0.096896</td>\n",
       "      <td>-0.017624</td>\n",
       "      <td>-0.059104</td>\n",
       "      <td>-0.019459</td>\n",
       "      <td>-0.113521</td>\n",
       "      <td>-0.143999</td>\n",
       "      <td>-0.890373</td>\n",
       "      <td>-0.016494</td>\n",
       "      <td>...</td>\n",
       "      <td>2.203539</td>\n",
       "      <td>-0.18843</td>\n",
       "      <td>-0.009419</td>\n",
       "      <td>-0.174880</td>\n",
       "      <td>-0.313124</td>\n",
       "      <td>-0.030535</td>\n",
       "      <td>-0.025803</td>\n",
       "      <td>-0.105681</td>\n",
       "      <td>-1.392705</td>\n",
       "      <td>-0.056997</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>112047</th>\n",
       "      <td>-0.110249</td>\n",
       "      <td>-0.007723</td>\n",
       "      <td>-0.004815</td>\n",
       "      <td>-0.014089</td>\n",
       "      <td>-0.089486</td>\n",
       "      <td>-0.007736</td>\n",
       "      <td>-0.095076</td>\n",
       "      <td>-0.027023</td>\n",
       "      <td>1.235694</td>\n",
       "      <td>-0.011664</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.312889</td>\n",
       "      <td>-0.11205</td>\n",
       "      <td>-0.028606</td>\n",
       "      <td>-0.139982</td>\n",
       "      <td>-0.618438</td>\n",
       "      <td>-0.053906</td>\n",
       "      <td>-0.031767</td>\n",
       "      <td>-0.019726</td>\n",
       "      <td>0.825150</td>\n",
       "      <td>-0.046432</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>134808</th>\n",
       "      <td>-0.155534</td>\n",
       "      <td>-0.021988</td>\n",
       "      <td>-0.096896</td>\n",
       "      <td>-0.017624</td>\n",
       "      <td>-0.059104</td>\n",
       "      <td>-0.019459</td>\n",
       "      <td>-0.113521</td>\n",
       "      <td>-0.143999</td>\n",
       "      <td>-0.890373</td>\n",
       "      <td>-0.016494</td>\n",
       "      <td>...</td>\n",
       "      <td>2.203539</td>\n",
       "      <td>-0.18843</td>\n",
       "      <td>-0.009419</td>\n",
       "      <td>-0.174880</td>\n",
       "      <td>-0.313124</td>\n",
       "      <td>-0.030535</td>\n",
       "      <td>-0.025803</td>\n",
       "      <td>-0.105681</td>\n",
       "      <td>-1.392705</td>\n",
       "      <td>-0.056997</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>67024</th>\n",
       "      <td>-0.110249</td>\n",
       "      <td>-0.007762</td>\n",
       "      <td>-0.004919</td>\n",
       "      <td>-0.014089</td>\n",
       "      <td>-0.089486</td>\n",
       "      <td>-0.007736</td>\n",
       "      <td>-0.095076</td>\n",
       "      <td>-0.027023</td>\n",
       "      <td>-0.809262</td>\n",
       "      <td>-0.011664</td>\n",
       "      <td>...</td>\n",
       "      <td>3.196020</td>\n",
       "      <td>-0.11205</td>\n",
       "      <td>-0.028606</td>\n",
       "      <td>-0.139982</td>\n",
       "      <td>-0.618438</td>\n",
       "      <td>-0.053906</td>\n",
       "      <td>-0.031767</td>\n",
       "      <td>-0.019726</td>\n",
       "      <td>-1.211901</td>\n",
       "      <td>-0.046432</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>103961 rows × 122 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        duration  src_bytes  dst_bytes      land  wrong_fragment    urgent  \\\n",
       "140535 -0.155534   0.093373   0.294926 -0.017624       -0.059104 -0.019459   \n",
       "70073  -0.110249  -0.007762  -0.004919 -0.014089       -0.089486 -0.007736   \n",
       "144523 -0.152691  -0.021929  -0.092513 -0.017624       -0.059104 -0.019459   \n",
       "98462  -0.110249  -0.007721  -0.004795 -0.014089       -0.089486 -0.007736   \n",
       "133858  5.087705  -0.021988  -0.094822 -0.017624       -0.059104 -0.019459   \n",
       "...          ...        ...        ...       ...             ...       ...   \n",
       "3677   -0.110249  -0.007755  -0.004900 -0.014089       -0.089486 -0.007736   \n",
       "141635 -0.155534  -0.021988  -0.096896 -0.017624       -0.059104 -0.019459   \n",
       "112047 -0.110249  -0.007723  -0.004815 -0.014089       -0.089486 -0.007736   \n",
       "134808 -0.155534  -0.021988  -0.096896 -0.017624       -0.059104 -0.019459   \n",
       "67024  -0.110249  -0.007762  -0.004919 -0.014089       -0.089486 -0.007736   \n",
       "\n",
       "             hot  num_failed_logins  logged_in  num_compromised  ...  \\\n",
       "140535  2.040705          -0.143999   1.123125         0.121069  ...   \n",
       "70073  -0.095076          -0.027023  -0.809262        -0.011664  ...   \n",
       "144523 -0.113521          -0.143999   1.123125        -0.016494  ...   \n",
       "98462  -0.095076          -0.027023   1.235694        -0.011664  ...   \n",
       "133858 -0.113521          -0.143999  -0.890373        -0.016494  ...   \n",
       "...          ...                ...        ...              ...  ...   \n",
       "3677   -0.095076          -0.027023  -0.809262        -0.011664  ...   \n",
       "141635 -0.113521          -0.143999  -0.890373        -0.016494  ...   \n",
       "112047 -0.095076          -0.027023   1.235694        -0.011664  ...   \n",
       "134808 -0.113521          -0.143999  -0.890373        -0.016494  ...   \n",
       "67024  -0.095076          -0.027023  -0.809262        -0.011664  ...   \n",
       "\n",
       "        flag_REJ  flag_RSTO  flag_RSTOS0  flag_RSTR   flag_S0   flag_S1  \\\n",
       "140535 -0.453815   -0.18843    -0.009419  -0.174880 -0.313124 -0.030535   \n",
       "70073   3.196020   -0.11205    -0.028606  -0.139982 -0.618438 -0.053906   \n",
       "144523 -0.453815   -0.18843    -0.009419  -0.174880 -0.313124 -0.030535   \n",
       "98462  -0.312889   -0.11205    -0.028606  -0.139982 -0.618438 -0.053906   \n",
       "133858 -0.453815   -0.18843    -0.009419  -0.174880 -0.313124 -0.030535   \n",
       "...          ...        ...          ...        ...       ...       ...   \n",
       "3677   -0.312889   -0.11205    -0.028606  -0.139982 -0.618438 -0.053906   \n",
       "141635  2.203539   -0.18843    -0.009419  -0.174880 -0.313124 -0.030535   \n",
       "112047 -0.312889   -0.11205    -0.028606  -0.139982 -0.618438 -0.053906   \n",
       "134808  2.203539   -0.18843    -0.009419  -0.174880 -0.313124 -0.030535   \n",
       "67024   3.196020   -0.11205    -0.028606  -0.139982 -0.618438 -0.053906   \n",
       "\n",
       "         flag_S2   flag_S3   flag_SF   flag_SH  \n",
       "140535 -0.025803 -0.105681  0.718027 -0.056997  \n",
       "70073  -0.031767 -0.019726 -1.211901 -0.046432  \n",
       "144523 -0.025803 -0.105681  0.718027 -0.056997  \n",
       "98462  -0.031767 -0.019726  0.825150 -0.046432  \n",
       "133858 -0.025803 -0.105681  0.718027 -0.056997  \n",
       "...          ...       ...       ...       ...  \n",
       "3677   -0.031767 -0.019726  0.825150 -0.046432  \n",
       "141635 -0.025803 -0.105681 -1.392705 -0.056997  \n",
       "112047 -0.031767 -0.019726  0.825150 -0.046432  \n",
       "134808 -0.025803 -0.105681 -1.392705 -0.056997  \n",
       "67024  -0.031767 -0.019726 -1.211901 -0.046432  \n",
       "\n",
       "[103961 rows x 122 columns]"
      ]
     },
     "execution_count": 90,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "140535    1\n",
       "70073     2\n",
       "144523    3\n",
       "98462     0\n",
       "133858    1\n",
       "         ..\n",
       "3677      0\n",
       "141635    1\n",
       "112047    0\n",
       "134808    1\n",
       "67024     2\n",
       "Name: Label, Length: 103961, dtype: int64"
      ]
     },
     "execution_count": 91,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LEVEL 0 - Weak models - Base Learner"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from sklearn.tree import DecisionTreeClassifier\n",
    "# start = time.time()\n",
    "\n",
    "# # Create a Decision Tree Classifier\n",
    "# dt_classifier = DecisionTreeClassifier(random_state=42)\n",
    "# # Train the classifier on the training data\n",
    "# dt_classifier.fit(X_train, y_train)\n",
    "# # Make predictions on the test data\n",
    "# preds_dt = dt_classifier.predict(X_test)\n",
    "# # Evaluate the accuracy of the model\n",
    "# preds_dt_prob = dt_classifier.predict_proba(X_test)\n",
    "# end = time.time()\n",
    "\n",
    "# time_taken = end - start\n",
    "# pred_label = preds_dt\n",
    "# name = 'dt'\n",
    "# metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "# Acc = metrics[0]\n",
    "# Precision = metrics[1]\n",
    "# Recall = metrics[2]\n",
    "# F1 = metrics[3]\n",
    "# BACC = metrics[4]\n",
    "# MCC = metrics[5]    \n",
    "\n",
    "# globals()[f\"{name}_acc_00\"] = Acc\n",
    "# globals()[f\"{name}_pre_00\"] = Precision\n",
    "# globals()[f\"{name}_rec_00\"] = Recall\n",
    "# globals()[f\"{name}_f1_00\"] = F1\n",
    "# globals()[f\"{name}_bacc_00\"] = BACC\n",
    "# globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "# globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining RF Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining ADA Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining LGBM Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining KNN Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining SVM Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining MLP Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Defining DNN Model\n",
      "---------------------------------------------------------------------------------\n",
      "Model: \"sequential_1\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_6 (Dense)              (None, 3)                 369       \n",
      "_________________________________________________________________\n",
      "dropout_5 (Dropout)          (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_7 (Dense)              (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_6 (Dropout)          (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_8 (Dense)              (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_7 (Dropout)          (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_9 (Dense)              (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_8 (Dropout)          (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_10 (Dense)             (None, 3)                 12        \n",
      "_________________________________________________________________\n",
      "dropout_9 (Dropout)          (None, 3)                 0         \n",
      "_________________________________________________________________\n",
      "dense_11 (Dense)             (None, 5)                 20        \n",
      "=================================================================\n",
      "Total params: 437\n",
      "Trainable params: 437\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "with open(output_file_name, \"a\") as f: print('------------START of WEAK LEARNERS (BASE MODELS) - STACK 00 -----------------', file = f)\n",
    "\n",
    "#Defining Basemodels\n",
    "\n",
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining RF Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#Random Forest\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "#------------------------------------------------------------------------------\n",
    "\n",
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining ADA Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#ADA\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "import time\n",
    "abc = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "\n",
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining LGBM Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "#LGBM\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "\n",
    "\n",
    "\n",
    "#KNN\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining KNN Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_clf=KNeighborsClassifier(n_neighbors = 5)\n",
    "\n",
    "\n",
    "#SVM\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining SVM Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "\n",
    "# Instantiate the SGDClassifier with additional hyperparameters\n",
    "clf = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "\n",
    "\n",
    "#MLP\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining MLP Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "\n",
    "\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "import time\n",
    "\n",
    "# create MLPClassifier instance\n",
    "mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "\n",
    "\n",
    "#DNN\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining DNN Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense\n",
    "\n",
    "# #Model Parameters\n",
    "# dropout_rate = 0.01\n",
    "# nodes = 70\n",
    "# out_layer = 5\n",
    "# optimizer='adam'\n",
    "# loss='sparse_categorical_crossentropy'\n",
    "# epochs=1\n",
    "# batch_size=2*256\n",
    "\n",
    "#Model Parameters\n",
    "dropout_rate = 0.2\n",
    "nodes = 3\n",
    "out_layer = 5\n",
    "optimizer='adam'\n",
    "loss='sparse_categorical_crossentropy'\n",
    "epochs=100\n",
    "batch_size=128\n",
    "\n",
    "\n",
    "num_columns = X_train.shape[1]\n",
    "\n",
    "dnn = tf.keras.Sequential()\n",
    "\n",
    "# Input layer\n",
    "dnn.add(tf.keras.Input(shape=(num_columns,)))\n",
    "\n",
    "# Dense layers with dropout\n",
    "dnn.add(tf.keras.layers.Dense(nodes))\n",
    "dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn.add(tf.keras.layers.Dense(nodes))\n",
    "dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn.add(tf.keras.layers.Dense(nodes))\n",
    "dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn.add(tf.keras.layers.Dense(nodes))\n",
    "dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "dnn.add(tf.keras.layers.Dense(nodes))\n",
    "dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "# Output layer\n",
    "dnn.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "\n",
    "dnn.compile(optimizer=optimizer, loss=loss,metrics=['accuracy'])\n",
    "\n",
    "dnn.summary()\n",
    "\n",
    "\n",
    "\n",
    "# dnn = Sequential()\n",
    "# dnn.add(Dense(128, input_dim=X_train.shape[1], activation='relu'))  # Input layer\n",
    "# dnn.add(Dense(64, activation='relu'))  # Hidden layer\n",
    "# dnn.add(Dense(5))  # Output layer\n",
    "\n",
    "# dnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "# # summary of model layers\n",
    "# dnn.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [],
   "source": [
    "# #SVM\n",
    "# # Wrap SGDClassifier with MultiOutputClassifier\n",
    "# multi_target_clf = MultiOutputClassifier(clf)\n",
    "\n",
    "# # Fit the model on the training data\n",
    "# multi_target_clf.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test data\n",
    "# y_pred = clf.predict(X_test)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Training Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training ADA\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Training RF\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training SVM\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training KNN\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training LGBM\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training MLP\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training DNN\n",
      "---------------------------------------------------------------------------------\n",
      "Epoch 1/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.9853 - accuracy: 0.6781 - val_loss: 0.4096 - val_accuracy: 0.8522\n",
      "Epoch 2/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.5899 - accuracy: 0.8116 - val_loss: 0.3732 - val_accuracy: 0.8590\n",
      "Epoch 3/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.5190 - accuracy: 0.8260 - val_loss: 0.3600 - val_accuracy: 0.8593\n",
      "Epoch 4/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.5018 - accuracy: 0.8289 - val_loss: 0.3560 - val_accuracy: 0.8580\n",
      "Epoch 5/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4867 - accuracy: 0.8307 - val_loss: 0.3518 - val_accuracy: 0.8588\n",
      "Epoch 6/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4754 - accuracy: 0.8305 - val_loss: 0.3504 - val_accuracy: 0.8581\n",
      "Epoch 7/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4667 - accuracy: 0.8351 - val_loss: 0.3443 - val_accuracy: 0.8605\n",
      "Epoch 8/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4556 - accuracy: 0.8402 - val_loss: 0.3454 - val_accuracy: 0.8600\n",
      "Epoch 9/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4523 - accuracy: 0.8409 - val_loss: 0.3407 - val_accuracy: 0.8584\n",
      "Epoch 10/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4478 - accuracy: 0.8410 - val_loss: 0.3405 - val_accuracy: 0.8599\n",
      "Epoch 11/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4478 - accuracy: 0.8415 - val_loss: 0.3414 - val_accuracy: 0.8589\n",
      "Epoch 12/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4453 - accuracy: 0.8428 - val_loss: 0.3413 - val_accuracy: 0.8596\n",
      "Epoch 13/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4440 - accuracy: 0.8429 - val_loss: 0.3381 - val_accuracy: 0.8594\n",
      "Epoch 14/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4447 - accuracy: 0.8421 - val_loss: 0.3392 - val_accuracy: 0.8600\n",
      "Epoch 15/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4410 - accuracy: 0.8433 - val_loss: 0.3408 - val_accuracy: 0.8588\n",
      "Epoch 16/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4458 - accuracy: 0.8415 - val_loss: 0.3401 - val_accuracy: 0.8586\n",
      "Epoch 17/100\n",
      "650/650 [==============================] - 2s 2ms/step - loss: 0.4434 - accuracy: 0.8428 - val_loss: 0.3392 - val_accuracy: 0.8589\n"
     ]
    }
   ],
   "source": [
    "#Training Basemodels\n",
    "import joblib\n",
    "from sklearn.model_selection import StratifiedKFold, cross_val_score\n",
    "n_splits = 5  # You can adjust the number of folds as needed\n",
    "\n",
    "\n",
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Training Model')\n",
    "with open(output_file_name, \"a\") as f: print('Training weak models - level 0', file = f)\n",
    "\n",
    "print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_ada == 1 and load_model_ada == 0:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training ADA')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training ADA', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #ADA\n",
    "\n",
    "\n",
    "    start = time.time()\n",
    "    ada = abc.fit(X_train, y_train)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(ada, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    ada_tr_time_taken= time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "\n",
    "    # Assuming 'model' is your trained model\n",
    "    joblib.dump(ada, 'ada_base_model.joblib')\n",
    "\n",
    "\n",
    "if use_model_rf == 1 and load_model_rf == 0:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training RF')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('Training RF', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #RF\n",
    "    start = time.time()\n",
    "    model_rf = rf.fit(X_train,y_train)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(model_rf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    rf_tr_time_taken = time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(model_rf, 'rf_base_model.joblib')\n",
    "\n",
    "if use_model_svm == 1 and load_model_svm == 0:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training SVM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training SVM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #SVM\n",
    "\n",
    "    start = time.time()\n",
    "    clf.fit(X_train, y_train)\n",
    "    end = time.time()\n",
    "    # clf.score(X_train, y_train)\n",
    "    svm_tr_time_taken= time_taken = end - start\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(clf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(clf, 'svm_base_model.joblib')\n",
    "\n",
    "\n",
    "if use_model_knn == 1 and load_model_knn == 0:\n",
    "\n",
    "    #KNN\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training KNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training KNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start = time.time()\n",
    "    knn_clf.fit(X_train,y_train)\n",
    "    end = time.time()\n",
    "\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(knn_clf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    knn_tr_time_taken = time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(knn_clf, 'knn_base_model.joblib')\n",
    "\n",
    "\n",
    "if use_model_lgbm == 1 and load_model_lgbm == 0:\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training LGBM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training LGBM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start = time.time()\n",
    "    lgbm.fit(X_train, y_train)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(lgbm, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    lgbm_tr_time_taken = time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(lgbm, 'lgbm_base_model.joblib')\n",
    "\n",
    "if use_model_mlp == 1 and load_model_mlp == 0:\n",
    "\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training MLP')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training MLP', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "    start = time.time()\n",
    "    MLP = mlp.fit(X_train, y_train)\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(MLP, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "    mlp_tr_time_taken= time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    joblib.dump(MLP, 'mlp_base_model.joblib')\n",
    "\n",
    "\n",
    "if use_model_dnn == 1 and load_model_dnn == 0:\n",
    "    from keras.callbacks import EarlyStopping\n",
    "\n",
    "    # Define EarlyStopping callback\n",
    "    early_stopping = EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training DNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training DNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    # Convert Y_test back to its original format\n",
    "    # y_test = np.argmax(Y_test, axis=1)\n",
    "\n",
    "    # Start the timer\n",
    "    start = time.time()\n",
    "    # dnn.fit(X_train, y_train, epochs=epochs, batch_size=batch_size)\n",
    "    dnn.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,validation_split=0.2, callbacks=[early_stopping])\n",
    "\n",
    "    # End the timer\n",
    "    end = time.time()\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(dnn, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    dnn_tr_time_taken= time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    dnn.save(\"DNN_base_model.h5\")\n",
    "\n",
    "    # Calculate the time taken and print it out\n",
    "    # print(f'Time taken for training: {time_taken} seconds')\n",
    "\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from keras.models import Sequential\n",
    "# from keras.layers import Dense\n",
    "# from keras.wrappers.scikit_learn import KerasClassifier\n",
    "# from sklearn.model_selection import GridSearchCV\n",
    "# from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# # Define your Keras model as a function\n",
    "# def create_model(optimizer='adam', hidden_layer_size=16):\n",
    "#     # model = Sequential()\n",
    "#     # model.add(Dense(hidden_layer_size, input_dim=input_size, activation='relu'))\n",
    "#     # model.add(Dense(1, activation='sigmoid'))\n",
    "#     # model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n",
    "\n",
    "        \n",
    "#     dnn = tf.keras.Sequential()\n",
    "\n",
    "#     # Input layer\n",
    "#     dnn.add(tf.keras.Input(shape=(num_columns,)))\n",
    "\n",
    "#     # Dense layers with dropout\n",
    "#     dnn.add(tf.keras.layers.Dense(nodes))\n",
    "#     dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "#     dnn.add(tf.keras.layers.Dense(nodes))\n",
    "#     dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "#     dnn.add(tf.keras.layers.Dense(nodes))\n",
    "#     dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "#     dnn.add(tf.keras.layers.Dense(nodes))\n",
    "#     dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "#     dnn.add(tf.keras.layers.Dense(nodes))\n",
    "#     dnn.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "\n",
    "#     # Output layer\n",
    "#     dnn.add(tf.keras.layers.Dense(out_layer))\n",
    "\n",
    "\n",
    "\n",
    "#     dnn.compile(optimizer=optimizer, loss=loss)\n",
    "\n",
    "#     dnn.summary()\n",
    "#     return dnn\n",
    "\n",
    "# # Create a KerasClassifier\n",
    "# dnn = KerasClassifier(build_fn=create_model, epochs=10, batch_size=32, verbose=0)\n",
    "\n",
    "# # Define the parameter grid for GridSearchCV\n",
    "# param_grid = {\n",
    "#     'optimizer': ['adam', 'sgd'],\n",
    "#     'hidden_layer_size': [8, 16, 32]\n",
    "# }\n",
    "\n",
    "# # Create the StratifiedKFold\n",
    "# cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n",
    "\n",
    "# # Create GridSearchCV\n",
    "# grid = GridSearchCV(estimator=dnn, param_grid=param_grid, cv=cv, scoring='accuracy')\n",
    "# grid_result = grid.fit(X_train, y_train)\n",
    "\n",
    "# # Print the best parameters and best accuracy\n",
    "# print(\"Best Parameters: \", grid_result.best_params_)\n",
    "# print(\"Best Accuracy: \", grid_result.best_score_)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [],
   "source": [
    "# stratified_kfold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Loading Models\n",
    "from tensorflow.keras.models import load_model\n",
    "\n",
    "if load_model_ada == 1:\n",
    "    ada = joblib.load('ada_base_model.joblib')\n",
    "\n",
    "if load_model_svm == 1:\n",
    "    clf =  joblib.load('svm_base_model.joblib')\n",
    "\n",
    "if load_model_dnn == 1:\n",
    "    dnn = load_model(\"DNN_base_model.h5\")\n",
    "\n",
    "if load_model_knn == 1:\n",
    "    knn_clf = joblib.load('knn_base_model.joblib')\n",
    "\n",
    "if load_model_mlp == 1:\n",
    "    MLP = joblib.load('mlp_base_model.joblib')\n",
    "\n",
    "if load_model_rf == 1:\n",
    "    rf = joblib.load('rf_base_model.joblib')\n",
    "\n",
    "if load_model_lgbm == 1:\n",
    "    lgbm = joblib.load('lgbm_base_model.joblib')\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Make predictions on the test data\n",
    "# preds_svm = clf.predict(X_test)\n",
    "\n",
    "\n",
    "\n",
    "# y_scores = y_pred\n",
    "# y_true = y_test\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Base leaners predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Prediction RF\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Prediction SVM\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction LGBM\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction DNN\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction ADA\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction MLP\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Prediction KNN\n",
      "---------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "from sklearn.calibration import CalibratedClassifierCV\n",
    "with open(output_file_name, \"a\") as f: print('Generating Predictions', file = f)\n",
    "\n",
    "if use_model_rf == 1:\n",
    "\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Prediction RF')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction RF', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #RF\n",
    "    start = time.time()\n",
    "    preds_rf = rf.predict(X_test)\n",
    "    preds_rf_prob = rf.predict_proba(X_test)\n",
    "    end = time.time()\n",
    "    rf_pr_time_taken=  time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_svm == 1:\n",
    "\n",
    "    print('Prediction SVM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction SVM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #SVM\n",
    "    start = time.time()\n",
    "    preds_svm = clf.predict(X_test)\n",
    "    # preds_svm_prob = clf.predict_proba(X_test)\n",
    "\n",
    "    #Since SVM does not deal with prob by nature we use a meta learner\n",
    "    # https://stackoverflow.com/questions/55250963/how-to-get-probabilities-for-sgdclassifier-linearsvm\n",
    "\n",
    "    model = CalibratedClassifierCV(clf)\n",
    "\n",
    "    model.fit(X, y)\n",
    "    preds_svm_prob = model.predict_proba(X)\n",
    "\n",
    "    end = time.time()\n",
    "    svm_pr_time_taken = time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_lgbm == 1:\n",
    "\n",
    "    print('Prediction LGBM')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction LGBM', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #LGBM\n",
    "    start = time.time()\n",
    "    preds_lgbm = lgbm.predict(X_test)\n",
    "    preds_lgbm_prob = lgbm.predict_proba(X_test)\n",
    "\n",
    "    end = time.time()\n",
    "    lgbm_pr_time_taken=time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_dnn == 1:\n",
    "\n",
    "    print('Prediction DNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction DNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #DNN\n",
    "    start = time.time()\n",
    "    pred_dnn = dnn.predict(X_test)\n",
    "    preds_dnn_prob = pred_dnn\n",
    "    preds_dnn = np.argmax(pred_dnn,axis = 1)\n",
    "    end = time.time()\n",
    "    dnn_pr_time_taken=time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_ada == 1:\n",
    "\n",
    "    print('Prediction ADA')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction ADA', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    #ADA\n",
    "    start = time.time()\n",
    "    preds_ada = ada.predict(X_test)\n",
    "    preds_ada_prob = ada.predict_proba(X_test)\n",
    "\n",
    "    end = time.time()\n",
    "    ada_pr_time_taken=time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Prediction MLP')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction MLP', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_mlp == 1:\n",
    "\n",
    "    #MLP\n",
    "    start = time.time()\n",
    "    y_pred = MLP.predict_proba(X_test)\n",
    "    preds_mlp_prob = y_pred\n",
    "    preds_mlp = np.argmax(y_pred,axis = 1)\n",
    "    end = time.time()\n",
    "    mlp_pr_time_taken=time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Prediction KNN')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Prediction KNN', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "\n",
    "if use_model_knn == 1:\n",
    "\n",
    "    #KNN\n",
    "    start = time.time()\n",
    "    preds_knn =knn_clf.predict(X_test)\n",
    "    preds_knn_prob =knn_clf.predict_proba(X_test)\n",
    "\n",
    "    preds_knn\n",
    "    end = time.time()\n",
    "    knn_pr_time_taken=time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from sklearn.calibration import CalibratedClassifierCV\n",
    "# model = CalibratedClassifierCV(clf)\n",
    "\n",
    "# model.fit(X, y)\n",
    "# preds_svm_prob = model.predict_proba(X)\n",
    "\n",
    "# print(preds_ada_prob)\n",
    "# print(preds_knn_prob)\n",
    "# print(preds_dnn_prob)\n",
    "# print(preds_mlp_prob)\n",
    "# print(preds_rf_prob)\n",
    "# print(preds_svm_prob)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[8.72093733e-01 7.45689618e-03 2.85238807e-02 9.04090624e-02\n",
      "  1.51642739e-03]\n",
      " [8.58613008e-01 2.04807623e-04 1.09593308e-01 3.09428029e-02\n",
      "  6.46073102e-04]\n",
      " [1.25468259e-01 8.27396976e-01 3.85425919e-02 8.41942724e-03\n",
      "  1.72745838e-04]\n",
      " ...\n",
      " [3.93095065e-01 6.06350746e-01 2.88756006e-04 8.55018688e-07\n",
      "  2.64577935e-04]\n",
      " [9.47981386e-01 2.33621240e-03 9.44433097e-03 3.99189637e-02\n",
      "  3.19107346e-04]\n",
      " [3.21028204e-01 3.57404714e-02 6.43035240e-01 5.19631068e-08\n",
      "  1.96033090e-04]]\n",
      "[0 0 1 ... 1 0 2]\n",
      "[0 1 0 ... 1 0 0]\n"
     ]
    }
   ],
   "source": [
    "print(preds_svm_prob)\n",
    "preds_3 = np.argmax(preds_svm_prob,axis = 1)\n",
    "print(preds_3)\n",
    "\n",
    "print(preds_svm)\n",
    "# print(y_train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### METRICS - Base Learners"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.metrics import balanced_accuracy_score\n",
    "from sklearn.metrics import matthews_corrcoef\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "\n",
    "\n",
    "# >>> \n",
    "# >>> roc_auc_score(y, clf.predict_proba(X)[:, 1])\n",
    "# 0.99...\n",
    "# >>> roc_auc_score(y, clf.decision_function(X))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [],
   "source": [
    "# y_test\n",
    "# pred_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3    4\n",
      "0  22972.0     47.0    53.0    1.0  0.0\n",
      "1   1239.0  14796.0     7.0    0.0  0.0\n",
      "2    258.0     72.0  3915.0    0.0  0.0\n",
      "3    913.0     10.0    43.0  197.0  0.0\n",
      "4     30.0      0.0     0.0    3.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.939940748720711\n",
      "Precision total:  0.7699608025678447\n",
      "Recall total:  0.6019204939666276\n",
      "F1 total:  0.6279287926180791\n",
      "BACC total:  0.6019204939666276\n",
      "MCC total:  0.8991350647044627\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "#RF\n",
    "if use_model_rf == 1:\n",
    "    # start = time.time()\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('RF base model', file = f)\n",
    "\n",
    "    pred_label = preds_rf\n",
    "    name = 'rf'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,rf_pr_time_taken + rf_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    globals()[f\"{name}_time_00\"] = rf_pr_time_taken + rf_tr_time_taken\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1    2    3    4\n",
      "0  22780.0    293.0  0.0  0.0  0.0\n",
      "1    503.0  15539.0  0.0  0.0  0.0\n",
      "2   1025.0   3220.0  0.0  0.0  0.0\n",
      "3   1137.0     26.0  0.0  0.0  0.0\n",
      "4     24.0      9.0  0.0  0.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy total:  0.8600188526797737\n",
      "Precision total:  0.34170699738191557\n",
      "Recall total:  0.391189196382726\n",
      "F1 total:  0.3646501667229524\n",
      "BACC total:  0.391189196382726\n",
      "MCC total:  0.7607402604897849\n"
     ]
    }
   ],
   "source": [
    "#DNN\n",
    "if use_model_dnn == 1:\n",
    "    start = time.time()\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('DNN base model', file = f)\n",
    "\n",
    "\n",
    "    pred_label = preds_dnn\n",
    "    name = 'dnn'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test, dnn_pr_time_taken + dnn_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_00\"] = dnn_pr_time_taken + dnn_tr_time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3     4\n",
      "0  22556.0     98.0   238.0  151.0  30.0\n",
      "1   2173.0  11145.0  2638.0   86.0   0.0\n",
      "2   1396.0    449.0  2396.0    4.0   0.0\n",
      "3    370.0     37.0     1.0  693.0  62.0\n",
      "4     11.0      0.0     0.0    4.0  18.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.826106472753389\n",
      "Precision total:  0.6316037290315586\n",
      "Recall total:  0.675617539225826\n",
      "F1 total:  0.6254876886479785\n",
      "BACC total:  0.675617539225826\n",
      "MCC total:  0.7128168705957457\n"
     ]
    }
   ],
   "source": [
    "#ADA\n",
    "if use_model_ada == 1:\n",
    "    start = time.time()\n",
    "    \n",
    "    pred_label = preds_ada\n",
    "    name = 'ada'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test, ada_pr_time_taken + ada_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_00\"] = ada_pr_time_taken + ada_tr_time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3    4\n",
      "0  22648.0    122.0   172.0  123.0  8.0\n",
      "1    284.0  15745.0    12.0    1.0  0.0\n",
      "2    165.0     70.0  4002.0    8.0  0.0\n",
      "3    343.0      2.0     7.0  811.0  0.0\n",
      "4     19.0      1.0     0.0    5.0  8.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9698805996947661\n",
      "Precision total:  0.8526257585342927\n",
      "Recall total:  0.7691162413842563\n",
      "F1 total:  0.8003013666868733\n",
      "BACC total:  0.7691162413842563\n",
      "MCC total:  0.9489669190170773\n"
     ]
    }
   ],
   "source": [
    "#SVM\n",
    "if use_model_svm == 1:\n",
    "    start = time.time()\n",
    "\n",
    "    pred_label = preds_svm\n",
    "    name = 'svm'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,svm_pr_time_taken + svm_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_00\"] = svm_pr_time_taken + svm_tr_time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22900.0     29.0    49.0    89.0   6.0\n",
      "1     18.0  16022.0     2.0     0.0   0.0\n",
      "2     52.0     74.0  4119.0     0.0   0.0\n",
      "3     77.0      7.0     3.0  1075.0   1.0\n",
      "4      8.0      0.0     1.0     7.0  17.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.990506329113924\n",
      "Precision total:  0.9199267025919337\n",
      "Recall total:  0.8802116975287835\n",
      "F1 total:  0.8970012881521608\n",
      "BACC total:  0.8802116975287835\n",
      "MCC total:  0.9839714411734178\n"
     ]
    }
   ],
   "source": [
    "#KNN\n",
    "if use_model_knn == 1:\n",
    "    start = time.time()\n",
    "    pred_label = preds_knn\n",
    "    name = 'knn'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,knn_pr_time_taken + knn_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_00\"] = knn_pr_time_taken + knn_tr_time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22925.0     19.0    43.0    84.0   2.0\n",
      "1     23.0  16009.0     9.0     1.0   0.0\n",
      "2     27.0      7.0  4209.0     2.0   0.0\n",
      "3     58.0      0.0     6.0  1094.0   5.0\n",
      "4     10.0      0.0     0.0     7.0  16.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9931995690816051\n",
      "Precision total:  0.9192385185293623\n",
      "Recall total:  0.8817134149715216\n",
      "F1 total:  0.8966891504020407\n",
      "BACC total:  0.8817134149715216\n",
      "MCC total:  0.9885292442510568\n"
     ]
    }
   ],
   "source": [
    "#MLP\n",
    "if use_model_mlp == 1:\n",
    "    start = time.time()\n",
    "    pred_label = preds_mlp\n",
    "    name = 'mlp'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,mlp_pr_time_taken + mlp_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    globals()[f\"{name}_time_00\"] = mlp_pr_time_taken + mlp_tr_time_taken"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22810.0     66.0    50.0   109.0  38.0\n",
      "1     75.0  15822.0   128.0    16.0   1.0\n",
      "2     62.0     68.0  4093.0    21.0   1.0\n",
      "3     37.0      4.0     1.0  1095.0  26.0\n",
      "4      9.0      0.0     0.0     7.0  17.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy total:  0.9838630038603107\n",
      "Precision total:  0.8047433627261628\n",
      "Recall total:  0.8791525205826197\n",
      "F1 total:  0.8283416453283202\n",
      "BACC total:  0.8791525205826197\n",
      "MCC total:  0.972843076315642\n"
     ]
    }
   ],
   "source": [
    "#lgbm\n",
    "start_lgbm = time.time()\n",
    "if use_model_lgbm == 1:\n",
    "\n",
    "    pred_label = preds_lgbm\n",
    "    name = 'lgbm'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,lgbm_pr_time_taken + lgbm_tr_time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    end = time.time()\n",
    "    time_taken = end - start_lgbm\n",
    "    globals()[f\"{name}_time_00\"] = lgbm_pr_time_taken + lgbm_tr_time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decision Tree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22985.0      8.0    20.0    59.0   1.0\n",
      "1      9.0  16020.0    12.0     1.0   0.0\n",
      "2     23.0      4.0  4214.0     2.0   2.0\n",
      "3     43.0      3.0     1.0  1113.0   3.0\n",
      "4      6.0      0.0     0.0     9.0  18.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9953766047221474\n",
      "Precision total:  0.9355632954358221\n",
      "Recall total:  0.897994838641415\n",
      "F1 total:  0.9135341892534491\n",
      "BACC total:  0.897994838641415\n",
      "MCC total:  0.9921980306933288\n"
     ]
    }
   ],
   "source": [
    "from sklearn.tree import DecisionTreeClassifier\n",
    "start = time.time()\n",
    "\n",
    "# Create a Decision Tree Classifier\n",
    "dt_classifier = DecisionTreeClassifier(random_state=42)\n",
    "# Train the classifier on the training data\n",
    "dt_classifier.fit(X_train, y_train)\n",
    "# Make predictions on the test data\n",
    "preds_dt = dt_classifier.predict(X_test)\n",
    "# Evaluate the accuracy of the model\n",
    "preds_dt_prob = dt_classifier.predict_proba(X_test)\n",
    "\n",
    "\n",
    "pred_label = preds_dt\n",
    "name = 'dt'\n",
    "\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### CATBOOST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.2844602\ttest: 1.2839571\tbest: 1.2839571 (0)\ttotal: 97.6ms\tremaining: 9.66s\n",
      "10:\tlearn: 0.4140287\ttest: 0.4150422\tbest: 0.4150422 (10)\ttotal: 396ms\tremaining: 3.2s\n",
      "20:\tlearn: 0.2053473\ttest: 0.2066253\tbest: 0.2066253 (20)\ttotal: 693ms\tremaining: 2.61s\n",
      "30:\tlearn: 0.1286296\ttest: 0.1296181\tbest: 0.1296181 (30)\ttotal: 964ms\tremaining: 2.15s\n",
      "40:\tlearn: 0.0925805\ttest: 0.0939857\tbest: 0.0939857 (40)\ttotal: 1.23s\tremaining: 1.77s\n",
      "50:\tlearn: 0.0716650\ttest: 0.0732082\tbest: 0.0732082 (50)\ttotal: 1.48s\tremaining: 1.43s\n",
      "60:\tlearn: 0.0587687\ttest: 0.0604376\tbest: 0.0604376 (60)\ttotal: 1.74s\tremaining: 1.11s\n",
      "70:\tlearn: 0.0501085\ttest: 0.0518844\tbest: 0.0518844 (70)\ttotal: 2s\tremaining: 816ms\n",
      "80:\tlearn: 0.0449737\ttest: 0.0470817\tbest: 0.0470817 (80)\ttotal: 2.23s\tremaining: 523ms\n",
      "90:\tlearn: 0.0408682\ttest: 0.0431420\tbest: 0.0431420 (90)\ttotal: 2.45s\tremaining: 243ms\n",
      "99:\tlearn: 0.0379858\ttest: 0.0403441\tbest: 0.0403441 (99)\ttotal: 2.66s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.040344105\n",
      "bestIteration = 99\n",
      "\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22940.0     27.0    40.0    61.0   5.0\n",
      "1     25.0  16009.0     8.0     0.0   0.0\n",
      "2     51.0     24.0  4167.0     3.0   0.0\n",
      "3     89.0      5.0     5.0  1063.0   1.0\n",
      "4     14.0      0.0     0.0     7.0  12.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9918080617649699\n",
      "Precision total:  0.9160537614970851\n",
      "Recall total:  0.8502911738335568\n",
      "F1 total:  0.8742282844578477\n",
      "BACC total:  0.8502911738335568\n",
      "MCC total:  0.9861600185463817\n"
     ]
    }
   ],
   "source": [
    "import catboost\n",
    "start = time.time()\n",
    "cat_00 = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "\n",
    "# Fit the model\n",
    "cat_00.fit(X_train, y_train, eval_set=(X_test, y_test), verbose=10)\n",
    "\n",
    "# Make predictions on the test set\n",
    "preds_cat = cat_00.predict(X_test)\n",
    "preds_cat_prob = cat_00.predict_proba(X_test)\n",
    "preds_cat = np.squeeze(preds_cat)\n",
    "\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Catboost base model', file = f)\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    pred_label = preds_cat\n",
    "    \n",
    "    \n",
    "\n",
    "    # pred_label = y_pred\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    name = 'cat'\n",
    "    metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "    globals()[f\"{name}_time_00\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xgboost as xgb\n",
    "start = time.time()\n",
    "# from sklearn.model_selection import train_test_split\n",
    "# from sklearn.metrics import accuracy_score\n",
    "\n",
    "# Assuming you have your features and labels as X and y\n",
    "# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# Create a DMatrix for XGBoost\n",
    "dtrain = xgb.DMatrix(X_train, label=y_train)\n",
    "dtest = xgb.DMatrix(X_test, label=y_test)\n",
    "\n",
    "# Set XGBoost parameters\n",
    "params = {\n",
    "    'objective': 'multi:softmax',  # for multi-class classification\n",
    "    'num_class': 5,  # specify the number of classes\n",
    "    'max_depth': 3,\n",
    "    'learning_rate': 0.1,\n",
    "    'eval_metric': 'mlogloss'  # metric for multi-class classification\n",
    "}\n",
    "\n",
    "# Train the XGBoost model\n",
    "num_round = 100\n",
    "xgb_00 = xgb.train(params, dtrain, num_round)\n",
    "\n",
    "# Make predictions on the test set\n",
    "preds_xgb = xgb_00.predict(dtest)\n",
    "# preds_xgb_prob = xgb_00.predict_proba(dtest)\n",
    "\n",
    "\n",
    "# Get class probabilities\n",
    "# Assuming binary classification, get the probability for the positive class (class 1)\n",
    "preds_xgb_margin = xgb_00.predict(dtest, output_margin=True)\n",
    "preds_xgb_prob = 1 / (1 + np.exp(-preds_xgb_margin))\n",
    "\n",
    "# Print or use positive_class_probabilities as needed\n",
    "# print(positive_class_probabilities)\n",
    "\n",
    "\n",
    "# Convert predicted probabilities to class labels (if necessary)\n",
    "# y_pred_labels = [round(value) for value in y_pred]\n",
    "\n",
    "# Evaluate the accuracy\n",
    "# accuracy = accuracy_score(y_test, y_pred)\n",
    "# print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0.0      1.0     2.0     3.0   4.0\n",
      "0.0  22911.0     18.0    63.0    75.0   6.0\n",
      "1.0     19.0  16016.0     6.0     1.0   0.0\n",
      "2.0     46.0     10.0  4184.0     5.0   0.0\n",
      "3.0     84.0      1.0     2.0  1073.0   3.0\n",
      "4.0     12.0      0.0     0.0     6.0  15.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9919876110961486\n",
      "Precision total:  0.9049056353101271\n",
      "Recall total:  0.8708295196027205\n",
      "F1 total:  0.8851756073947277\n",
      "BACC total:  0.8708295196027205\n",
      "MCC total:  0.9864751992211209\n"
     ]
    }
   ],
   "source": [
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('xgboost base model', file = f)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    pred_label = preds_xgb\n",
    "    # pred_label = label[ypred]\n",
    "    name = 'xgb'\n",
    "    # metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "    globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### LR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining Logistic Regression Model\n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "Training LR \n",
      "---------------------------------------------------------------------------------\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3     4\n",
      "0  22657.0    130.0   149.0  132.0   5.0\n",
      "1    110.0  15912.0    14.0    6.0   0.0\n",
      "2    147.0     66.0  4026.0    6.0   0.0\n",
      "3    183.0     12.0    13.0  953.0   2.0\n",
      "4      9.0      2.0     0.0    6.0  16.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9777358829338361\n",
      "Precision total:  0.8970633497584595\n",
      "Recall total:  0.8453114843110445\n",
      "F1 total:  0.8672987737685313\n",
      "BACC total:  0.8453114843110445\n",
      "MCC total:  0.9623694023954854\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "#Logistic Regression\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining Logistic Regression Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "logreg_00 = LogisticRegression()\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    print('Training LR ')\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "    with open(output_file_name, \"a\") as f: print('Training LR', file = f)\n",
    "    print('---------------------------------------------------------------------------------')\n",
    "    start_lr = start = time.time()\n",
    "    logreg_00.fit(X_train,y_train)\n",
    "    end = time.time()\n",
    "\n",
    "\n",
    "    # # Create the StratifiedKFold object\n",
    "    # stratified_kfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
    "    # # Perform cross-validation\n",
    "    # cv_scores = cross_val_score(knn_clf, X_train, y_train, cv=stratified_kfold, scoring='accuracy')\n",
    "    # # Print the cross-validation scores\n",
    "    # print(\"Cross-validation scores:\", cv_scores)\n",
    "    # print(\"Mean accuracy:\", cv_scores.mean())\n",
    "    # with open(output_file_name, \"a\") as f: print('mean accuracy', cv_scores.mean() , file = f)\n",
    "\n",
    "\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed training time ', time_taken, file = f)\n",
    "    # joblib.dump(logreg_01, 'logreg_01.joblib')\n",
    "\n",
    "\n",
    "# if 1 == 1:\n",
    "    # logreg_01 = joblib.load('logreg_01.joblib')\n",
    "\n",
    "if 1 == 1:\n",
    "\n",
    "    #lR\n",
    "    start = time.time()\n",
    "    preds_lr = preds_logreg =logreg_00.predict(X_test)\n",
    "    preds_lr_prob = logreg_00.predict_proba(X_test)\n",
    "    end = time.time()\n",
    "    time_taken = end - start\n",
    "    with open(output_file_name, \"a\") as f: print('Elapsed prediction time ', time_taken, file = f)\n",
    "    with open(output_file_name, \"a\") as f: print('---------------------------------------------------------------------------------', file = f)\n",
    "\n",
    "#LR\n",
    "if 1 == 1:\n",
    "    pred_label = preds_logreg\n",
    "    name = 'lr'\n",
    "\n",
    "    end = time.time()\n",
    "    time_taken = end - start_lr\n",
    "    metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "    Acc = metrics[0]\n",
    "    Precision = metrics[1]\n",
    "    Recall = metrics[2]\n",
    "    F1 = metrics[3]\n",
    "    BACC = metrics[4]\n",
    "    MCC = metrics[5]    \n",
    "\n",
    "    globals()[f\"{name}_acc_00\"] = Acc\n",
    "    globals()[f\"{name}_pre_00\"] = Precision\n",
    "    globals()[f\"{name}_rec_00\"] = Recall\n",
    "    globals()[f\"{name}_f1_00\"] = F1\n",
    "    globals()[f\"{name}_bacc_00\"] = BACC\n",
    "    globals()[f\"{name}_mcc_00\"] = MCC\n",
    "    \n",
    "\n",
    "    globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging DT  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  23018.0      6.0    13.0    36.0   0.0\n",
      "1      6.0  16032.0     4.0     0.0   0.0\n",
      "2     22.0      1.0  4222.0     0.0   0.0\n",
      "3     43.0      3.0     0.0  1112.0   5.0\n",
      "4      9.0      0.0     0.0     7.0  17.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9965212317084119\n",
      "Precision total:  0.9454801177374579\n",
      "Recall total:  0.8925748334608816\n",
      "F1 total:  0.9138735167739975\n",
      "BACC total:  0.8925748334608816\n",
      "MCC total:  0.9941259265135727\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import BaggingClassifier\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.datasets import load_iris\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score\n",
    "start = time.time()\n",
    "# # Define the base classifier (Decision Tree in this case)\n",
    "base_classifier = DecisionTreeClassifier(random_state=42)\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "\n",
    "pred_label = y_pred\n",
    "name = 'bag_dt'\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## bagging SVM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3    4\n",
      "0  22677.0    116.0   192.0   81.0  7.0\n",
      "1    311.0  15711.0    20.0    0.0  0.0\n",
      "2    164.0    107.0  3969.0    5.0  0.0\n",
      "3    438.0      4.0     8.0  713.0  0.0\n",
      "4     16.0      1.0     0.0    7.0  9.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9668507047311249\n",
      "Precision total:  0.8681875892625659\n",
      "Recall total:  0.7565965994322215\n",
      "F1 total:  0.7973796740298094\n",
      "BACC total:  0.7565965994322215\n",
      "MCC total:  0.94377605606385\n"
     ]
    }
   ],
   "source": [
    "## bagging  with SVM\n",
    "from sklearn.ensemble import BaggingClassifier\n",
    "# from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "\n",
    "# Instantiate the SGDClassifier with additional hyperparameters\n",
    "svm_01 = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "\n",
    "# # Define the base classifier (Decision Tree in this case)\n",
    "base_classifier = svm_01\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_svm'\n",
    "pred_label = y_pred\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Bagging MLP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22923.0     22.0    46.0    81.0   1.0\n",
      "1     19.0  16020.0     3.0     0.0   0.0\n",
      "2     29.0      8.0  4207.0     1.0   0.0\n",
      "3     53.0      0.0     3.0  1105.0   2.0\n",
      "4     10.0      0.0     0.0     7.0  16.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9936035550767573\n",
      "Precision total:  0.949733667737642\n",
      "Recall total:  0.8836306496956681\n",
      "F1 total:  0.9070305714127486\n",
      "BACC total:  0.8836306496956681\n",
      "MCC total:  0.9892110107888011\n"
     ]
    }
   ],
   "source": [
    "from sklearn.neural_network import MLPClassifier\n",
    "from sklearn.multioutput import MultiOutputClassifier\n",
    "import time\n",
    "start = time.time()\n",
    "# create MLPClassifier instance\n",
    "mlp_00 = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "\n",
    "base_classifier = mlp_00\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_00, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_mlp'\n",
    "pred_label = y_pred\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### bagging KNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_00=KNeighborsClassifier(n_neighbors = 5)\n",
    "start = time.time()\n",
    "base_classifier = knn_00\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_00, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_knn'\n",
    "\n",
    "pred_label = y_pred\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "### bag LogRegression\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "Defining baggin Logistic Regression Model\n",
      "---------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3     4\n",
      "0  22664.0    138.0   159.0  142.0   7.0\n",
      "1    105.0  15897.0    20.0   10.0   0.0\n",
      "2    153.0     38.0  4024.0   11.0   0.0\n",
      "3    169.0      4.0    10.0  970.0   2.0\n",
      "4      6.0      3.0     0.0    4.0  20.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9779827632642069\n",
      "Precision total:  0.8935577318874394\n",
      "Recall total:  0.8740736890020081\n",
      "F1 total:  0.8832650386482424\n",
      "BACC total:  0.8740736890020081\n",
      "MCC total:  0.962787586086353\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "start = time.time()\n",
    "#Logistic Regression\n",
    "print('---------------------------------------------------------------------------------')\n",
    "print('Defining baggin Logistic Regression Model')\n",
    "print('---------------------------------------------------------------------------------')\n",
    "logreg_00 = LogisticRegression()\n",
    "\n",
    "\n",
    "base_classifier = logreg_00\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "# Evaluate accuracy\n",
    "# accuracy = accuracy_score(y_test_00, y_pred)\n",
    "# print(f'Accuracy: {accuracy}')\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_lr'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2    3    4\n",
      "0  23052.0      7.0    51.0  0.0  0.0\n",
      "1    748.0  15282.0     2.0  0.0  0.0\n",
      "2    289.0     53.0  3884.0  0.0  0.0\n",
      "3   1134.0     15.0     6.0  0.0  0.0\n",
      "4     33.0      0.0     0.0  0.0  0.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9475267079630129\n",
      "Precision total:  0.5785773231049737\n",
      "Recall total:  0.5739562471453103\n",
      "F1 total:  0.5755721001018624\n",
      "BACC total:  0.5739562471453103\n",
      "MCC total:  0.911659240977031\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "\n",
    "base_classifier = rf\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_rf'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging ADA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3     4\n",
      "0  22662.0     27.0   272.0  137.0  12.0\n",
      "1   2132.0  11861.0  2039.0    0.0   0.0\n",
      "2   1523.0    138.0  2565.0    0.0   0.0\n",
      "3    419.0      1.0    21.0  712.0   2.0\n",
      "4      7.0      0.0     0.0   11.0  15.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.8487072448155131\n",
      "Precision total:  0.7405070945703415\n",
      "Recall total:  0.679679978243031\n",
      "F1 total:  0.7014942990114014\n",
      "BACC total:  0.679679978243031\n",
      "MCC total:  0.7495108318957361\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "import time\n",
    "ada = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "\n",
    "base_classifier = ada\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_ada'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging LGBM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  23030.0     15.0    25.0    36.0   4.0\n",
      "1     14.0  16013.0     5.0     0.0   0.0\n",
      "2     29.0      1.0  4196.0     0.0   0.0\n",
      "3     55.0      0.0     1.0  1097.0   2.0\n",
      "4      9.0      2.0     0.0     7.0  15.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9953990483885448\n",
      "Precision total:  0.9326970315620029\n",
      "Recall total:  0.8785166516397181\n",
      "F1 total:  0.8998265968885905\n",
      "BACC total:  0.8785166516397181\n",
      "MCC total:  0.992222698517305\n"
     ]
    }
   ],
   "source": [
    "start = time.time()\n",
    "\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "\n",
    "\n",
    "base_classifier = lgbm\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_lgbm'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging Catboost "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.2910621\ttotal: 48ms\tremaining: 4.75s\n",
      "1:\tlearn: 1.0914490\ttotal: 77.1ms\tremaining: 3.78s\n",
      "2:\tlearn: 0.9424614\ttotal: 108ms\tremaining: 3.5s\n",
      "3:\tlearn: 0.8313953\ttotal: 139ms\tremaining: 3.35s\n",
      "4:\tlearn: 0.7376175\ttotal: 173ms\tremaining: 3.29s\n",
      "5:\tlearn: 0.6633804\ttotal: 204ms\tremaining: 3.2s\n",
      "6:\tlearn: 0.6001672\ttotal: 234ms\tremaining: 3.12s\n",
      "7:\tlearn: 0.5438825\ttotal: 264ms\tremaining: 3.04s\n",
      "8:\tlearn: 0.4958505\ttotal: 292ms\tremaining: 2.95s\n",
      "9:\tlearn: 0.4540392\ttotal: 320ms\tremaining: 2.88s\n",
      "10:\tlearn: 0.4179708\ttotal: 352ms\tremaining: 2.85s\n",
      "11:\tlearn: 0.3880769\ttotal: 380ms\tremaining: 2.79s\n",
      "12:\tlearn: 0.3609518\ttotal: 409ms\tremaining: 2.73s\n",
      "13:\tlearn: 0.3357262\ttotal: 436ms\tremaining: 2.68s\n",
      "14:\tlearn: 0.3106128\ttotal: 468ms\tremaining: 2.65s\n",
      "15:\tlearn: 0.2896498\ttotal: 494ms\tremaining: 2.59s\n",
      "16:\tlearn: 0.2689241\ttotal: 526ms\tremaining: 2.57s\n",
      "17:\tlearn: 0.2516952\ttotal: 559ms\tremaining: 2.55s\n",
      "18:\tlearn: 0.2363459\ttotal: 591ms\tremaining: 2.52s\n",
      "19:\tlearn: 0.2223497\ttotal: 621ms\tremaining: 2.48s\n",
      "20:\tlearn: 0.2107660\ttotal: 649ms\tremaining: 2.44s\n",
      "21:\tlearn: 0.1983566\ttotal: 677ms\tremaining: 2.4s\n",
      "22:\tlearn: 0.1894295\ttotal: 704ms\tremaining: 2.35s\n",
      "23:\tlearn: 0.1794569\ttotal: 734ms\tremaining: 2.32s\n",
      "24:\tlearn: 0.1705824\ttotal: 760ms\tremaining: 2.28s\n",
      "25:\tlearn: 0.1624696\ttotal: 790ms\tremaining: 2.25s\n",
      "26:\tlearn: 0.1547573\ttotal: 819ms\tremaining: 2.21s\n",
      "27:\tlearn: 0.1469403\ttotal: 850ms\tremaining: 2.19s\n",
      "28:\tlearn: 0.1406994\ttotal: 876ms\tremaining: 2.15s\n",
      "29:\tlearn: 0.1359071\ttotal: 900ms\tremaining: 2.1s\n",
      "30:\tlearn: 0.1299411\ttotal: 930ms\tremaining: 2.07s\n",
      "31:\tlearn: 0.1250031\ttotal: 959ms\tremaining: 2.04s\n",
      "32:\tlearn: 0.1201775\ttotal: 989ms\tremaining: 2.01s\n",
      "33:\tlearn: 0.1155534\ttotal: 1.02s\tremaining: 1.97s\n",
      "34:\tlearn: 0.1107359\ttotal: 1.04s\tremaining: 1.94s\n",
      "35:\tlearn: 0.1062939\ttotal: 1.07s\tremaining: 1.91s\n",
      "36:\tlearn: 0.1034459\ttotal: 1.1s\tremaining: 1.87s\n",
      "37:\tlearn: 0.1005326\ttotal: 1.12s\tremaining: 1.83s\n",
      "38:\tlearn: 0.0987267\ttotal: 1.14s\tremaining: 1.79s\n",
      "39:\tlearn: 0.0945298\ttotal: 1.18s\tremaining: 1.76s\n",
      "40:\tlearn: 0.0916746\ttotal: 1.2s\tremaining: 1.73s\n",
      "41:\tlearn: 0.0893585\ttotal: 1.23s\tremaining: 1.7s\n",
      "42:\tlearn: 0.0877129\ttotal: 1.25s\tremaining: 1.66s\n",
      "43:\tlearn: 0.0855390\ttotal: 1.27s\tremaining: 1.62s\n",
      "44:\tlearn: 0.0835901\ttotal: 1.3s\tremaining: 1.58s\n",
      "45:\tlearn: 0.0816653\ttotal: 1.32s\tremaining: 1.55s\n",
      "46:\tlearn: 0.0796479\ttotal: 1.35s\tremaining: 1.52s\n",
      "47:\tlearn: 0.0776355\ttotal: 1.37s\tremaining: 1.49s\n",
      "48:\tlearn: 0.0756035\ttotal: 1.4s\tremaining: 1.46s\n",
      "49:\tlearn: 0.0739460\ttotal: 1.43s\tremaining: 1.43s\n",
      "50:\tlearn: 0.0729140\ttotal: 1.45s\tremaining: 1.39s\n",
      "51:\tlearn: 0.0710458\ttotal: 1.48s\tremaining: 1.36s\n",
      "52:\tlearn: 0.0695102\ttotal: 1.5s\tremaining: 1.33s\n",
      "53:\tlearn: 0.0673494\ttotal: 1.53s\tremaining: 1.31s\n",
      "54:\tlearn: 0.0658732\ttotal: 1.56s\tremaining: 1.27s\n",
      "55:\tlearn: 0.0638422\ttotal: 1.58s\tremaining: 1.25s\n",
      "56:\tlearn: 0.0629387\ttotal: 1.61s\tremaining: 1.21s\n",
      "57:\tlearn: 0.0616252\ttotal: 1.63s\tremaining: 1.18s\n",
      "58:\tlearn: 0.0602166\ttotal: 1.66s\tremaining: 1.15s\n",
      "59:\tlearn: 0.0591710\ttotal: 1.68s\tremaining: 1.12s\n",
      "60:\tlearn: 0.0583543\ttotal: 1.71s\tremaining: 1.09s\n",
      "61:\tlearn: 0.0574850\ttotal: 1.73s\tremaining: 1.06s\n",
      "62:\tlearn: 0.0561964\ttotal: 1.76s\tremaining: 1.03s\n",
      "63:\tlearn: 0.0555307\ttotal: 1.78s\tremaining: 1s\n",
      "64:\tlearn: 0.0548516\ttotal: 1.81s\tremaining: 973ms\n",
      "65:\tlearn: 0.0538765\ttotal: 1.83s\tremaining: 945ms\n",
      "66:\tlearn: 0.0528971\ttotal: 1.86s\tremaining: 917ms\n",
      "67:\tlearn: 0.0522194\ttotal: 1.89s\tremaining: 887ms\n",
      "68:\tlearn: 0.0510764\ttotal: 1.91s\tremaining: 860ms\n",
      "69:\tlearn: 0.0505294\ttotal: 1.94s\tremaining: 830ms\n",
      "70:\tlearn: 0.0498666\ttotal: 1.96s\tremaining: 800ms\n",
      "71:\tlearn: 0.0494285\ttotal: 1.98s\tremaining: 771ms\n",
      "72:\tlearn: 0.0489916\ttotal: 2s\tremaining: 741ms\n",
      "73:\tlearn: 0.0482078\ttotal: 2.03s\tremaining: 713ms\n",
      "74:\tlearn: 0.0473924\ttotal: 2.05s\tremaining: 684ms\n",
      "75:\tlearn: 0.0471813\ttotal: 2.07s\tremaining: 654ms\n",
      "76:\tlearn: 0.0463145\ttotal: 2.1s\tremaining: 626ms\n",
      "77:\tlearn: 0.0457369\ttotal: 2.12s\tremaining: 598ms\n",
      "78:\tlearn: 0.0451586\ttotal: 2.15s\tremaining: 571ms\n",
      "79:\tlearn: 0.0444418\ttotal: 2.17s\tremaining: 544ms\n",
      "80:\tlearn: 0.0439419\ttotal: 2.2s\tremaining: 515ms\n",
      "81:\tlearn: 0.0436144\ttotal: 2.22s\tremaining: 488ms\n",
      "82:\tlearn: 0.0431405\ttotal: 2.25s\tremaining: 461ms\n",
      "83:\tlearn: 0.0426076\ttotal: 2.28s\tremaining: 434ms\n",
      "84:\tlearn: 0.0422364\ttotal: 2.3s\tremaining: 406ms\n",
      "85:\tlearn: 0.0420576\ttotal: 2.32s\tremaining: 378ms\n",
      "86:\tlearn: 0.0415100\ttotal: 2.35s\tremaining: 351ms\n",
      "87:\tlearn: 0.0410449\ttotal: 2.38s\tremaining: 324ms\n",
      "88:\tlearn: 0.0408046\ttotal: 2.4s\tremaining: 296ms\n",
      "89:\tlearn: 0.0401563\ttotal: 2.42s\tremaining: 269ms\n",
      "90:\tlearn: 0.0399644\ttotal: 2.45s\tremaining: 242ms\n",
      "91:\tlearn: 0.0398175\ttotal: 2.47s\tremaining: 215ms\n",
      "92:\tlearn: 0.0394962\ttotal: 2.49s\tremaining: 188ms\n",
      "93:\tlearn: 0.0391292\ttotal: 2.52s\tremaining: 161ms\n",
      "94:\tlearn: 0.0387272\ttotal: 2.54s\tremaining: 134ms\n",
      "95:\tlearn: 0.0383196\ttotal: 2.56s\tremaining: 107ms\n",
      "96:\tlearn: 0.0378380\ttotal: 2.59s\tremaining: 80.1ms\n",
      "97:\tlearn: 0.0374970\ttotal: 2.61s\tremaining: 53.3ms\n",
      "98:\tlearn: 0.0373226\ttotal: 2.64s\tremaining: 26.6ms\n",
      "99:\tlearn: 0.0371260\ttotal: 2.66s\tremaining: 0us\n",
      "0:\tlearn: 1.2913131\ttotal: 42ms\tremaining: 4.16s\n",
      "1:\tlearn: 1.0913021\ttotal: 70.8ms\tremaining: 3.47s\n",
      "2:\tlearn: 0.9422210\ttotal: 102ms\tremaining: 3.29s\n",
      "3:\tlearn: 0.8303946\ttotal: 132ms\tremaining: 3.18s\n",
      "4:\tlearn: 0.7361766\ttotal: 164ms\tremaining: 3.12s\n",
      "5:\tlearn: 0.6629186\ttotal: 195ms\tremaining: 3.05s\n",
      "6:\tlearn: 0.5997391\ttotal: 226ms\tremaining: 3.01s\n",
      "7:\tlearn: 0.5451269\ttotal: 257ms\tremaining: 2.95s\n",
      "8:\tlearn: 0.4962009\ttotal: 286ms\tremaining: 2.89s\n",
      "9:\tlearn: 0.4536221\ttotal: 314ms\tremaining: 2.83s\n",
      "10:\tlearn: 0.4167443\ttotal: 347ms\tremaining: 2.81s\n",
      "11:\tlearn: 0.3859099\ttotal: 375ms\tremaining: 2.75s\n",
      "12:\tlearn: 0.3566919\ttotal: 407ms\tremaining: 2.72s\n",
      "13:\tlearn: 0.3324361\ttotal: 434ms\tremaining: 2.67s\n",
      "14:\tlearn: 0.3072865\ttotal: 467ms\tremaining: 2.65s\n",
      "15:\tlearn: 0.2873194\ttotal: 493ms\tremaining: 2.58s\n",
      "16:\tlearn: 0.2661405\ttotal: 527ms\tremaining: 2.57s\n",
      "17:\tlearn: 0.2476664\ttotal: 560ms\tremaining: 2.55s\n",
      "18:\tlearn: 0.2321346\ttotal: 593ms\tremaining: 2.53s\n",
      "19:\tlearn: 0.2179868\ttotal: 623ms\tremaining: 2.49s\n",
      "20:\tlearn: 0.2073890\ttotal: 648ms\tremaining: 2.44s\n",
      "21:\tlearn: 0.1960805\ttotal: 673ms\tremaining: 2.39s\n",
      "22:\tlearn: 0.1868787\ttotal: 701ms\tremaining: 2.35s\n",
      "23:\tlearn: 0.1776012\ttotal: 729ms\tremaining: 2.31s\n",
      "24:\tlearn: 0.1680401\ttotal: 760ms\tremaining: 2.28s\n",
      "25:\tlearn: 0.1609000\ttotal: 788ms\tremaining: 2.24s\n",
      "26:\tlearn: 0.1535555\ttotal: 815ms\tremaining: 2.2s\n",
      "27:\tlearn: 0.1474456\ttotal: 841ms\tremaining: 2.16s\n",
      "28:\tlearn: 0.1408519\ttotal: 869ms\tremaining: 2.13s\n",
      "29:\tlearn: 0.1346627\ttotal: 899ms\tremaining: 2.1s\n",
      "30:\tlearn: 0.1295088\ttotal: 929ms\tremaining: 2.07s\n",
      "31:\tlearn: 0.1242934\ttotal: 962ms\tremaining: 2.04s\n",
      "32:\tlearn: 0.1197789\ttotal: 990ms\tremaining: 2.01s\n",
      "33:\tlearn: 0.1157875\ttotal: 1.02s\tremaining: 1.98s\n",
      "34:\tlearn: 0.1113185\ttotal: 1.04s\tremaining: 1.94s\n",
      "35:\tlearn: 0.1071002\ttotal: 1.07s\tremaining: 1.91s\n",
      "36:\tlearn: 0.1036172\ttotal: 1.1s\tremaining: 1.88s\n",
      "37:\tlearn: 0.1000370\ttotal: 1.13s\tremaining: 1.84s\n",
      "38:\tlearn: 0.0964521\ttotal: 1.16s\tremaining: 1.81s\n",
      "39:\tlearn: 0.0933249\ttotal: 1.19s\tremaining: 1.78s\n",
      "40:\tlearn: 0.0902635\ttotal: 1.21s\tremaining: 1.74s\n",
      "41:\tlearn: 0.0883000\ttotal: 1.24s\tremaining: 1.71s\n",
      "42:\tlearn: 0.0866110\ttotal: 1.26s\tremaining: 1.67s\n",
      "43:\tlearn: 0.0844817\ttotal: 1.28s\tremaining: 1.63s\n",
      "44:\tlearn: 0.0823647\ttotal: 1.31s\tremaining: 1.6s\n",
      "45:\tlearn: 0.0808007\ttotal: 1.33s\tremaining: 1.57s\n",
      "46:\tlearn: 0.0784491\ttotal: 1.36s\tremaining: 1.53s\n",
      "47:\tlearn: 0.0766747\ttotal: 1.39s\tremaining: 1.5s\n",
      "48:\tlearn: 0.0746014\ttotal: 1.42s\tremaining: 1.47s\n",
      "49:\tlearn: 0.0735296\ttotal: 1.44s\tremaining: 1.44s\n",
      "50:\tlearn: 0.0724492\ttotal: 1.46s\tremaining: 1.4s\n",
      "51:\tlearn: 0.0704728\ttotal: 1.48s\tremaining: 1.37s\n",
      "52:\tlearn: 0.0690981\ttotal: 1.51s\tremaining: 1.34s\n",
      "53:\tlearn: 0.0672163\ttotal: 1.54s\tremaining: 1.31s\n",
      "54:\tlearn: 0.0656833\ttotal: 1.56s\tremaining: 1.27s\n",
      "55:\tlearn: 0.0643123\ttotal: 1.58s\tremaining: 1.24s\n",
      "56:\tlearn: 0.0631088\ttotal: 1.61s\tremaining: 1.21s\n",
      "57:\tlearn: 0.0613698\ttotal: 1.63s\tremaining: 1.18s\n",
      "58:\tlearn: 0.0600849\ttotal: 1.66s\tremaining: 1.15s\n",
      "59:\tlearn: 0.0592840\ttotal: 1.68s\tremaining: 1.12s\n",
      "60:\tlearn: 0.0586778\ttotal: 1.71s\tremaining: 1.09s\n",
      "61:\tlearn: 0.0581062\ttotal: 1.73s\tremaining: 1.06s\n",
      "62:\tlearn: 0.0573689\ttotal: 1.75s\tremaining: 1.03s\n",
      "63:\tlearn: 0.0561524\ttotal: 1.77s\tremaining: 999ms\n",
      "64:\tlearn: 0.0556050\ttotal: 1.8s\tremaining: 969ms\n",
      "65:\tlearn: 0.0543380\ttotal: 1.83s\tremaining: 942ms\n",
      "66:\tlearn: 0.0533777\ttotal: 1.85s\tremaining: 913ms\n",
      "67:\tlearn: 0.0528800\ttotal: 1.88s\tremaining: 884ms\n",
      "68:\tlearn: 0.0520057\ttotal: 1.91s\tremaining: 857ms\n",
      "69:\tlearn: 0.0510815\ttotal: 1.93s\tremaining: 829ms\n",
      "70:\tlearn: 0.0505261\ttotal: 1.95s\tremaining: 798ms\n",
      "71:\tlearn: 0.0501809\ttotal: 1.98s\tremaining: 769ms\n",
      "72:\tlearn: 0.0497559\ttotal: 2s\tremaining: 739ms\n",
      "73:\tlearn: 0.0488891\ttotal: 2.02s\tremaining: 711ms\n",
      "74:\tlearn: 0.0481952\ttotal: 2.05s\tremaining: 683ms\n",
      "75:\tlearn: 0.0479372\ttotal: 2.07s\tremaining: 654ms\n",
      "76:\tlearn: 0.0472110\ttotal: 2.09s\tremaining: 625ms\n",
      "77:\tlearn: 0.0466922\ttotal: 2.12s\tremaining: 597ms\n",
      "78:\tlearn: 0.0459690\ttotal: 2.14s\tremaining: 570ms\n",
      "79:\tlearn: 0.0453082\ttotal: 2.17s\tremaining: 543ms\n",
      "80:\tlearn: 0.0450745\ttotal: 2.19s\tremaining: 514ms\n",
      "81:\tlearn: 0.0446708\ttotal: 2.21s\tremaining: 486ms\n",
      "82:\tlearn: 0.0444552\ttotal: 2.23s\tremaining: 458ms\n",
      "83:\tlearn: 0.0441374\ttotal: 2.26s\tremaining: 430ms\n",
      "84:\tlearn: 0.0435489\ttotal: 2.28s\tremaining: 403ms\n",
      "85:\tlearn: 0.0433113\ttotal: 2.3s\tremaining: 375ms\n",
      "86:\tlearn: 0.0431839\ttotal: 2.33s\tremaining: 348ms\n",
      "87:\tlearn: 0.0427114\ttotal: 2.35s\tremaining: 321ms\n",
      "88:\tlearn: 0.0420951\ttotal: 2.38s\tremaining: 294ms\n",
      "89:\tlearn: 0.0418511\ttotal: 2.41s\tremaining: 268ms\n",
      "90:\tlearn: 0.0415564\ttotal: 2.43s\tremaining: 240ms\n",
      "91:\tlearn: 0.0411395\ttotal: 2.46s\tremaining: 214ms\n",
      "92:\tlearn: 0.0407046\ttotal: 2.48s\tremaining: 187ms\n",
      "93:\tlearn: 0.0403784\ttotal: 2.51s\tremaining: 160ms\n",
      "94:\tlearn: 0.0399907\ttotal: 2.53s\tremaining: 133ms\n",
      "95:\tlearn: 0.0395236\ttotal: 2.55s\tremaining: 106ms\n",
      "96:\tlearn: 0.0391171\ttotal: 2.58s\tremaining: 79.8ms\n",
      "97:\tlearn: 0.0387452\ttotal: 2.6s\tremaining: 53.1ms\n",
      "98:\tlearn: 0.0384306\ttotal: 2.63s\tremaining: 26.5ms\n",
      "99:\tlearn: 0.0382064\ttotal: 2.65s\tremaining: 0us\n",
      "0:\tlearn: 1.2941731\ttotal: 43.9ms\tremaining: 4.35s\n",
      "1:\tlearn: 1.0935470\ttotal: 72.9ms\tremaining: 3.57s\n",
      "2:\tlearn: 0.9440178\ttotal: 104ms\tremaining: 3.36s\n",
      "3:\tlearn: 0.8301561\ttotal: 135ms\tremaining: 3.24s\n",
      "4:\tlearn: 0.7385533\ttotal: 168ms\tremaining: 3.19s\n",
      "5:\tlearn: 0.6643900\ttotal: 199ms\tremaining: 3.12s\n",
      "6:\tlearn: 0.6005298\ttotal: 232ms\tremaining: 3.09s\n",
      "7:\tlearn: 0.5453494\ttotal: 264ms\tremaining: 3.04s\n",
      "8:\tlearn: 0.4987305\ttotal: 298ms\tremaining: 3.01s\n",
      "9:\tlearn: 0.4560777\ttotal: 327ms\tremaining: 2.94s\n",
      "10:\tlearn: 0.4196552\ttotal: 358ms\tremaining: 2.9s\n",
      "11:\tlearn: 0.3895951\ttotal: 388ms\tremaining: 2.84s\n",
      "12:\tlearn: 0.3600084\ttotal: 419ms\tremaining: 2.81s\n",
      "13:\tlearn: 0.3352793\ttotal: 446ms\tremaining: 2.74s\n",
      "14:\tlearn: 0.3108940\ttotal: 477ms\tremaining: 2.7s\n",
      "15:\tlearn: 0.2889932\ttotal: 505ms\tremaining: 2.65s\n",
      "16:\tlearn: 0.2713508\ttotal: 532ms\tremaining: 2.6s\n",
      "17:\tlearn: 0.2531026\ttotal: 568ms\tremaining: 2.59s\n",
      "18:\tlearn: 0.2374987\ttotal: 599ms\tremaining: 2.56s\n",
      "19:\tlearn: 0.2234081\ttotal: 631ms\tremaining: 2.52s\n",
      "20:\tlearn: 0.2103715\ttotal: 660ms\tremaining: 2.48s\n",
      "21:\tlearn: 0.1988439\ttotal: 690ms\tremaining: 2.44s\n",
      "22:\tlearn: 0.1897864\ttotal: 718ms\tremaining: 2.4s\n",
      "23:\tlearn: 0.1817641\ttotal: 742ms\tremaining: 2.35s\n",
      "24:\tlearn: 0.1727417\ttotal: 773ms\tremaining: 2.32s\n",
      "25:\tlearn: 0.1635712\ttotal: 803ms\tremaining: 2.28s\n",
      "26:\tlearn: 0.1556580\ttotal: 832ms\tremaining: 2.25s\n",
      "27:\tlearn: 0.1476098\ttotal: 865ms\tremaining: 2.22s\n",
      "28:\tlearn: 0.1413124\ttotal: 895ms\tremaining: 2.19s\n",
      "29:\tlearn: 0.1356934\ttotal: 923ms\tremaining: 2.15s\n",
      "30:\tlearn: 0.1305799\ttotal: 948ms\tremaining: 2.11s\n",
      "31:\tlearn: 0.1261232\ttotal: 973ms\tremaining: 2.07s\n",
      "32:\tlearn: 0.1211236\ttotal: 1s\tremaining: 2.04s\n",
      "33:\tlearn: 0.1170164\ttotal: 1.03s\tremaining: 2s\n",
      "34:\tlearn: 0.1124001\ttotal: 1.05s\tremaining: 1.96s\n",
      "35:\tlearn: 0.1081508\ttotal: 1.08s\tremaining: 1.93s\n",
      "36:\tlearn: 0.1050354\ttotal: 1.11s\tremaining: 1.89s\n",
      "37:\tlearn: 0.1012548\ttotal: 1.14s\tremaining: 1.86s\n",
      "38:\tlearn: 0.0981581\ttotal: 1.16s\tremaining: 1.82s\n",
      "39:\tlearn: 0.0957843\ttotal: 1.19s\tremaining: 1.78s\n",
      "40:\tlearn: 0.0923328\ttotal: 1.22s\tremaining: 1.75s\n",
      "41:\tlearn: 0.0901973\ttotal: 1.24s\tremaining: 1.72s\n",
      "42:\tlearn: 0.0882114\ttotal: 1.27s\tremaining: 1.68s\n",
      "43:\tlearn: 0.0861222\ttotal: 1.29s\tremaining: 1.65s\n",
      "44:\tlearn: 0.0837824\ttotal: 1.32s\tremaining: 1.62s\n",
      "45:\tlearn: 0.0818305\ttotal: 1.35s\tremaining: 1.59s\n",
      "46:\tlearn: 0.0803162\ttotal: 1.38s\tremaining: 1.55s\n",
      "47:\tlearn: 0.0782771\ttotal: 1.41s\tremaining: 1.52s\n",
      "48:\tlearn: 0.0767239\ttotal: 1.43s\tremaining: 1.49s\n",
      "49:\tlearn: 0.0747866\ttotal: 1.46s\tremaining: 1.46s\n",
      "50:\tlearn: 0.0736596\ttotal: 1.49s\tremaining: 1.43s\n",
      "51:\tlearn: 0.0717496\ttotal: 1.51s\tremaining: 1.4s\n",
      "52:\tlearn: 0.0702731\ttotal: 1.54s\tremaining: 1.36s\n",
      "53:\tlearn: 0.0685475\ttotal: 1.57s\tremaining: 1.34s\n",
      "54:\tlearn: 0.0675001\ttotal: 1.59s\tremaining: 1.3s\n",
      "55:\tlearn: 0.0661839\ttotal: 1.62s\tremaining: 1.27s\n",
      "56:\tlearn: 0.0652228\ttotal: 1.65s\tremaining: 1.24s\n",
      "57:\tlearn: 0.0644265\ttotal: 1.67s\tremaining: 1.21s\n",
      "58:\tlearn: 0.0631343\ttotal: 1.7s\tremaining: 1.18s\n",
      "59:\tlearn: 0.0622927\ttotal: 1.72s\tremaining: 1.15s\n",
      "60:\tlearn: 0.0616140\ttotal: 1.74s\tremaining: 1.11s\n",
      "61:\tlearn: 0.0606793\ttotal: 1.77s\tremaining: 1.08s\n",
      "62:\tlearn: 0.0597220\ttotal: 1.79s\tremaining: 1.05s\n",
      "63:\tlearn: 0.0586449\ttotal: 1.82s\tremaining: 1.02s\n",
      "64:\tlearn: 0.0579178\ttotal: 1.85s\tremaining: 994ms\n",
      "65:\tlearn: 0.0564404\ttotal: 1.88s\tremaining: 967ms\n",
      "66:\tlearn: 0.0553302\ttotal: 1.9s\tremaining: 936ms\n",
      "67:\tlearn: 0.0545773\ttotal: 1.92s\tremaining: 905ms\n",
      "68:\tlearn: 0.0539909\ttotal: 1.95s\tremaining: 876ms\n",
      "69:\tlearn: 0.0530263\ttotal: 1.97s\tremaining: 846ms\n",
      "70:\tlearn: 0.0525262\ttotal: 2s\tremaining: 815ms\n",
      "71:\tlearn: 0.0516412\ttotal: 2.02s\tremaining: 787ms\n",
      "72:\tlearn: 0.0508981\ttotal: 2.05s\tremaining: 757ms\n",
      "73:\tlearn: 0.0502232\ttotal: 2.07s\tremaining: 728ms\n",
      "74:\tlearn: 0.0497766\ttotal: 2.1s\tremaining: 699ms\n",
      "75:\tlearn: 0.0491486\ttotal: 2.12s\tremaining: 670ms\n",
      "76:\tlearn: 0.0484107\ttotal: 2.15s\tremaining: 642ms\n",
      "77:\tlearn: 0.0477051\ttotal: 2.17s\tremaining: 614ms\n",
      "78:\tlearn: 0.0468382\ttotal: 2.21s\tremaining: 587ms\n",
      "79:\tlearn: 0.0464184\ttotal: 2.23s\tremaining: 557ms\n",
      "80:\tlearn: 0.0460181\ttotal: 2.25s\tremaining: 528ms\n",
      "81:\tlearn: 0.0455572\ttotal: 2.27s\tremaining: 499ms\n",
      "82:\tlearn: 0.0450050\ttotal: 2.3s\tremaining: 471ms\n",
      "83:\tlearn: 0.0447341\ttotal: 2.32s\tremaining: 442ms\n",
      "84:\tlearn: 0.0439604\ttotal: 2.35s\tremaining: 414ms\n",
      "85:\tlearn: 0.0436332\ttotal: 2.37s\tremaining: 386ms\n",
      "86:\tlearn: 0.0432036\ttotal: 2.39s\tremaining: 358ms\n",
      "87:\tlearn: 0.0430906\ttotal: 2.41s\tremaining: 329ms\n",
      "88:\tlearn: 0.0428218\ttotal: 2.43s\tremaining: 301ms\n",
      "89:\tlearn: 0.0422920\ttotal: 2.46s\tremaining: 273ms\n",
      "90:\tlearn: 0.0419470\ttotal: 2.48s\tremaining: 245ms\n",
      "91:\tlearn: 0.0415355\ttotal: 2.5s\tremaining: 218ms\n",
      "92:\tlearn: 0.0412168\ttotal: 2.53s\tremaining: 190ms\n",
      "93:\tlearn: 0.0408237\ttotal: 2.55s\tremaining: 163ms\n",
      "94:\tlearn: 0.0402952\ttotal: 2.58s\tremaining: 136ms\n",
      "95:\tlearn: 0.0396479\ttotal: 2.61s\tremaining: 109ms\n",
      "96:\tlearn: 0.0392975\ttotal: 2.63s\tremaining: 81.3ms\n",
      "97:\tlearn: 0.0391192\ttotal: 2.65s\tremaining: 54.1ms\n",
      "98:\tlearn: 0.0388448\ttotal: 2.67s\tremaining: 27ms\n",
      "99:\tlearn: 0.0387527\ttotal: 2.69s\tremaining: 0us\n",
      "0:\tlearn: 1.2938743\ttotal: 43.1ms\tremaining: 4.26s\n",
      "1:\tlearn: 1.0947285\ttotal: 76.1ms\tremaining: 3.73s\n",
      "2:\tlearn: 0.9471874\ttotal: 106ms\tremaining: 3.44s\n",
      "3:\tlearn: 0.8356665\ttotal: 137ms\tremaining: 3.28s\n",
      "4:\tlearn: 0.7456461\ttotal: 166ms\tremaining: 3.15s\n",
      "5:\tlearn: 0.6721753\ttotal: 196ms\tremaining: 3.06s\n",
      "6:\tlearn: 0.6079154\ttotal: 227ms\tremaining: 3.01s\n",
      "7:\tlearn: 0.5530668\ttotal: 257ms\tremaining: 2.95s\n",
      "8:\tlearn: 0.5036249\ttotal: 285ms\tremaining: 2.88s\n",
      "9:\tlearn: 0.4606247\ttotal: 314ms\tremaining: 2.82s\n",
      "10:\tlearn: 0.4224851\ttotal: 345ms\tremaining: 2.79s\n",
      "11:\tlearn: 0.3922691\ttotal: 374ms\tremaining: 2.74s\n",
      "12:\tlearn: 0.3626392\ttotal: 404ms\tremaining: 2.71s\n",
      "13:\tlearn: 0.3377352\ttotal: 431ms\tremaining: 2.65s\n",
      "14:\tlearn: 0.3127542\ttotal: 466ms\tremaining: 2.64s\n",
      "15:\tlearn: 0.2926292\ttotal: 494ms\tremaining: 2.59s\n",
      "16:\tlearn: 0.2711228\ttotal: 528ms\tremaining: 2.58s\n",
      "17:\tlearn: 0.2536515\ttotal: 562ms\tremaining: 2.56s\n",
      "18:\tlearn: 0.2374270\ttotal: 593ms\tremaining: 2.53s\n",
      "19:\tlearn: 0.2236047\ttotal: 623ms\tremaining: 2.49s\n",
      "20:\tlearn: 0.2110710\ttotal: 654ms\tremaining: 2.46s\n",
      "21:\tlearn: 0.1995871\ttotal: 681ms\tremaining: 2.42s\n",
      "22:\tlearn: 0.1901821\ttotal: 705ms\tremaining: 2.36s\n",
      "23:\tlearn: 0.1818557\ttotal: 730ms\tremaining: 2.31s\n",
      "24:\tlearn: 0.1725389\ttotal: 761ms\tremaining: 2.28s\n",
      "25:\tlearn: 0.1644106\ttotal: 792ms\tremaining: 2.25s\n",
      "26:\tlearn: 0.1562263\ttotal: 826ms\tremaining: 2.23s\n",
      "27:\tlearn: 0.1502266\ttotal: 851ms\tremaining: 2.19s\n",
      "28:\tlearn: 0.1442572\ttotal: 878ms\tremaining: 2.15s\n",
      "29:\tlearn: 0.1377900\ttotal: 908ms\tremaining: 2.12s\n",
      "30:\tlearn: 0.1331553\ttotal: 935ms\tremaining: 2.08s\n",
      "31:\tlearn: 0.1274396\ttotal: 963ms\tremaining: 2.05s\n",
      "32:\tlearn: 0.1227966\ttotal: 990ms\tremaining: 2.01s\n",
      "33:\tlearn: 0.1182253\ttotal: 1.02s\tremaining: 1.97s\n",
      "34:\tlearn: 0.1144995\ttotal: 1.04s\tremaining: 1.94s\n",
      "35:\tlearn: 0.1107368\ttotal: 1.07s\tremaining: 1.9s\n",
      "36:\tlearn: 0.1072700\ttotal: 1.09s\tremaining: 1.87s\n",
      "37:\tlearn: 0.1040557\ttotal: 1.12s\tremaining: 1.83s\n",
      "38:\tlearn: 0.1005250\ttotal: 1.15s\tremaining: 1.79s\n",
      "39:\tlearn: 0.0977086\ttotal: 1.17s\tremaining: 1.76s\n",
      "40:\tlearn: 0.0943104\ttotal: 1.2s\tremaining: 1.73s\n",
      "41:\tlearn: 0.0915513\ttotal: 1.22s\tremaining: 1.69s\n",
      "42:\tlearn: 0.0891834\ttotal: 1.25s\tremaining: 1.66s\n",
      "43:\tlearn: 0.0874907\ttotal: 1.27s\tremaining: 1.62s\n",
      "44:\tlearn: 0.0851676\ttotal: 1.3s\tremaining: 1.59s\n",
      "45:\tlearn: 0.0833105\ttotal: 1.33s\tremaining: 1.56s\n",
      "46:\tlearn: 0.0820275\ttotal: 1.35s\tremaining: 1.52s\n",
      "47:\tlearn: 0.0800226\ttotal: 1.38s\tremaining: 1.49s\n",
      "48:\tlearn: 0.0784169\ttotal: 1.4s\tremaining: 1.46s\n",
      "49:\tlearn: 0.0764339\ttotal: 1.43s\tremaining: 1.43s\n",
      "50:\tlearn: 0.0744729\ttotal: 1.46s\tremaining: 1.4s\n",
      "51:\tlearn: 0.0732276\ttotal: 1.48s\tremaining: 1.37s\n",
      "52:\tlearn: 0.0716183\ttotal: 1.51s\tremaining: 1.34s\n",
      "53:\tlearn: 0.0699214\ttotal: 1.54s\tremaining: 1.31s\n",
      "54:\tlearn: 0.0688423\ttotal: 1.56s\tremaining: 1.28s\n",
      "55:\tlearn: 0.0674696\ttotal: 1.59s\tremaining: 1.25s\n",
      "56:\tlearn: 0.0663645\ttotal: 1.61s\tremaining: 1.22s\n",
      "57:\tlearn: 0.0651658\ttotal: 1.63s\tremaining: 1.18s\n",
      "58:\tlearn: 0.0634644\ttotal: 1.66s\tremaining: 1.16s\n",
      "59:\tlearn: 0.0625952\ttotal: 1.68s\tremaining: 1.12s\n",
      "60:\tlearn: 0.0616514\ttotal: 1.71s\tremaining: 1.09s\n",
      "61:\tlearn: 0.0608275\ttotal: 1.73s\tremaining: 1.06s\n",
      "62:\tlearn: 0.0601388\ttotal: 1.75s\tremaining: 1.03s\n",
      "63:\tlearn: 0.0595353\ttotal: 1.77s\tremaining: 997ms\n",
      "64:\tlearn: 0.0582630\ttotal: 1.8s\tremaining: 971ms\n",
      "65:\tlearn: 0.0574390\ttotal: 1.83s\tremaining: 941ms\n",
      "66:\tlearn: 0.0565579\ttotal: 1.85s\tremaining: 913ms\n",
      "67:\tlearn: 0.0559672\ttotal: 1.88s\tremaining: 884ms\n",
      "68:\tlearn: 0.0551662\ttotal: 1.9s\tremaining: 854ms\n",
      "69:\tlearn: 0.0544022\ttotal: 1.93s\tremaining: 825ms\n",
      "70:\tlearn: 0.0534630\ttotal: 1.95s\tremaining: 798ms\n",
      "71:\tlearn: 0.0526284\ttotal: 1.98s\tremaining: 769ms\n",
      "72:\tlearn: 0.0521134\ttotal: 2s\tremaining: 741ms\n",
      "73:\tlearn: 0.0517053\ttotal: 2.02s\tremaining: 712ms\n",
      "74:\tlearn: 0.0507011\ttotal: 2.05s\tremaining: 684ms\n",
      "75:\tlearn: 0.0500277\ttotal: 2.07s\tremaining: 655ms\n",
      "76:\tlearn: 0.0495951\ttotal: 2.1s\tremaining: 627ms\n",
      "77:\tlearn: 0.0487741\ttotal: 2.12s\tremaining: 599ms\n",
      "78:\tlearn: 0.0480363\ttotal: 2.15s\tremaining: 571ms\n",
      "79:\tlearn: 0.0472157\ttotal: 2.18s\tremaining: 545ms\n",
      "80:\tlearn: 0.0465866\ttotal: 2.2s\tremaining: 517ms\n",
      "81:\tlearn: 0.0464903\ttotal: 2.22s\tremaining: 488ms\n",
      "82:\tlearn: 0.0460851\ttotal: 2.25s\tremaining: 460ms\n",
      "83:\tlearn: 0.0456806\ttotal: 2.27s\tremaining: 432ms\n",
      "84:\tlearn: 0.0454370\ttotal: 2.29s\tremaining: 404ms\n",
      "85:\tlearn: 0.0449367\ttotal: 2.31s\tremaining: 377ms\n",
      "86:\tlearn: 0.0445915\ttotal: 2.33s\tremaining: 349ms\n",
      "87:\tlearn: 0.0443794\ttotal: 2.35s\tremaining: 321ms\n",
      "88:\tlearn: 0.0438715\ttotal: 2.38s\tremaining: 294ms\n",
      "89:\tlearn: 0.0431989\ttotal: 2.4s\tremaining: 267ms\n",
      "90:\tlearn: 0.0430354\ttotal: 2.42s\tremaining: 240ms\n",
      "91:\tlearn: 0.0426389\ttotal: 2.45s\tremaining: 213ms\n",
      "92:\tlearn: 0.0422896\ttotal: 2.48s\tremaining: 186ms\n",
      "93:\tlearn: 0.0417624\ttotal: 2.5s\tremaining: 160ms\n",
      "94:\tlearn: 0.0412893\ttotal: 2.53s\tremaining: 133ms\n",
      "95:\tlearn: 0.0408513\ttotal: 2.55s\tremaining: 106ms\n",
      "96:\tlearn: 0.0406716\ttotal: 2.58s\tremaining: 79.7ms\n",
      "97:\tlearn: 0.0403459\ttotal: 2.6s\tremaining: 53.1ms\n",
      "98:\tlearn: 0.0399696\ttotal: 2.63s\tremaining: 26.5ms\n",
      "99:\tlearn: 0.0398888\ttotal: 2.65s\tremaining: 0us\n",
      "0:\tlearn: 1.2936176\ttotal: 41.5ms\tremaining: 4.1s\n",
      "1:\tlearn: 1.0940240\ttotal: 71.7ms\tremaining: 3.52s\n",
      "2:\tlearn: 0.9460475\ttotal: 102ms\tremaining: 3.29s\n",
      "3:\tlearn: 0.8308986\ttotal: 132ms\tremaining: 3.17s\n",
      "4:\tlearn: 0.7366854\ttotal: 167ms\tremaining: 3.17s\n",
      "5:\tlearn: 0.6619229\ttotal: 197ms\tremaining: 3.08s\n",
      "6:\tlearn: 0.5959534\ttotal: 230ms\tremaining: 3.05s\n",
      "7:\tlearn: 0.5404037\ttotal: 259ms\tremaining: 2.98s\n",
      "8:\tlearn: 0.4926327\ttotal: 288ms\tremaining: 2.91s\n",
      "9:\tlearn: 0.4515582\ttotal: 316ms\tremaining: 2.84s\n",
      "10:\tlearn: 0.4154880\ttotal: 346ms\tremaining: 2.8s\n",
      "11:\tlearn: 0.3853210\ttotal: 375ms\tremaining: 2.75s\n",
      "12:\tlearn: 0.3568708\ttotal: 404ms\tremaining: 2.7s\n",
      "13:\tlearn: 0.3301530\ttotal: 436ms\tremaining: 2.68s\n",
      "14:\tlearn: 0.3053643\ttotal: 471ms\tremaining: 2.67s\n",
      "15:\tlearn: 0.2840741\ttotal: 498ms\tremaining: 2.61s\n",
      "16:\tlearn: 0.2639029\ttotal: 529ms\tremaining: 2.58s\n",
      "17:\tlearn: 0.2466630\ttotal: 558ms\tremaining: 2.54s\n",
      "18:\tlearn: 0.2318146\ttotal: 590ms\tremaining: 2.52s\n",
      "19:\tlearn: 0.2190821\ttotal: 619ms\tremaining: 2.48s\n",
      "20:\tlearn: 0.2061649\ttotal: 647ms\tremaining: 2.43s\n",
      "21:\tlearn: 0.1950851\ttotal: 672ms\tremaining: 2.38s\n",
      "22:\tlearn: 0.1853857\ttotal: 698ms\tremaining: 2.34s\n",
      "23:\tlearn: 0.1768917\ttotal: 725ms\tremaining: 2.29s\n",
      "24:\tlearn: 0.1688076\ttotal: 752ms\tremaining: 2.26s\n",
      "25:\tlearn: 0.1605900\ttotal: 780ms\tremaining: 2.22s\n",
      "26:\tlearn: 0.1527164\ttotal: 815ms\tremaining: 2.2s\n",
      "27:\tlearn: 0.1451115\ttotal: 849ms\tremaining: 2.18s\n",
      "28:\tlearn: 0.1379956\ttotal: 878ms\tremaining: 2.15s\n",
      "29:\tlearn: 0.1331570\ttotal: 905ms\tremaining: 2.11s\n",
      "30:\tlearn: 0.1281358\ttotal: 934ms\tremaining: 2.08s\n",
      "31:\tlearn: 0.1227698\ttotal: 962ms\tremaining: 2.04s\n",
      "32:\tlearn: 0.1186210\ttotal: 991ms\tremaining: 2.01s\n",
      "33:\tlearn: 0.1144476\ttotal: 1.02s\tremaining: 1.98s\n",
      "34:\tlearn: 0.1104929\ttotal: 1.05s\tremaining: 1.96s\n",
      "35:\tlearn: 0.1067597\ttotal: 1.08s\tremaining: 1.93s\n",
      "36:\tlearn: 0.1033773\ttotal: 1.11s\tremaining: 1.89s\n",
      "37:\tlearn: 0.0997570\ttotal: 1.14s\tremaining: 1.86s\n",
      "38:\tlearn: 0.0974591\ttotal: 1.16s\tremaining: 1.82s\n",
      "39:\tlearn: 0.0950135\ttotal: 1.18s\tremaining: 1.77s\n",
      "40:\tlearn: 0.0926885\ttotal: 1.21s\tremaining: 1.75s\n",
      "41:\tlearn: 0.0899039\ttotal: 1.24s\tremaining: 1.71s\n",
      "42:\tlearn: 0.0882476\ttotal: 1.26s\tremaining: 1.67s\n",
      "43:\tlearn: 0.0858964\ttotal: 1.28s\tremaining: 1.64s\n",
      "44:\tlearn: 0.0833037\ttotal: 1.31s\tremaining: 1.6s\n",
      "45:\tlearn: 0.0815436\ttotal: 1.33s\tremaining: 1.57s\n",
      "46:\tlearn: 0.0798662\ttotal: 1.36s\tremaining: 1.53s\n",
      "47:\tlearn: 0.0779490\ttotal: 1.39s\tremaining: 1.5s\n",
      "48:\tlearn: 0.0762005\ttotal: 1.41s\tremaining: 1.47s\n",
      "49:\tlearn: 0.0744944\ttotal: 1.44s\tremaining: 1.44s\n",
      "50:\tlearn: 0.0728199\ttotal: 1.47s\tremaining: 1.41s\n",
      "51:\tlearn: 0.0711833\ttotal: 1.5s\tremaining: 1.38s\n",
      "52:\tlearn: 0.0698619\ttotal: 1.52s\tremaining: 1.34s\n",
      "53:\tlearn: 0.0683310\ttotal: 1.54s\tremaining: 1.31s\n",
      "54:\tlearn: 0.0674795\ttotal: 1.56s\tremaining: 1.28s\n",
      "55:\tlearn: 0.0661681\ttotal: 1.59s\tremaining: 1.25s\n",
      "56:\tlearn: 0.0650084\ttotal: 1.61s\tremaining: 1.21s\n",
      "57:\tlearn: 0.0637438\ttotal: 1.64s\tremaining: 1.18s\n",
      "58:\tlearn: 0.0619434\ttotal: 1.66s\tremaining: 1.16s\n",
      "59:\tlearn: 0.0610000\ttotal: 1.69s\tremaining: 1.13s\n",
      "60:\tlearn: 0.0599645\ttotal: 1.71s\tremaining: 1.09s\n",
      "61:\tlearn: 0.0590194\ttotal: 1.74s\tremaining: 1.07s\n",
      "62:\tlearn: 0.0580444\ttotal: 1.77s\tremaining: 1.04s\n",
      "63:\tlearn: 0.0572556\ttotal: 1.79s\tremaining: 1s\n",
      "64:\tlearn: 0.0560226\ttotal: 1.81s\tremaining: 976ms\n",
      "65:\tlearn: 0.0552928\ttotal: 1.83s\tremaining: 945ms\n",
      "66:\tlearn: 0.0545883\ttotal: 1.86s\tremaining: 915ms\n",
      "67:\tlearn: 0.0539051\ttotal: 1.88s\tremaining: 884ms\n",
      "68:\tlearn: 0.0533889\ttotal: 1.9s\tremaining: 855ms\n",
      "69:\tlearn: 0.0520036\ttotal: 1.93s\tremaining: 828ms\n",
      "70:\tlearn: 0.0511322\ttotal: 1.96s\tremaining: 799ms\n",
      "71:\tlearn: 0.0501520\ttotal: 1.99s\tremaining: 773ms\n",
      "72:\tlearn: 0.0494442\ttotal: 2.01s\tremaining: 744ms\n",
      "73:\tlearn: 0.0486663\ttotal: 2.03s\tremaining: 714ms\n",
      "74:\tlearn: 0.0479697\ttotal: 2.06s\tremaining: 686ms\n",
      "75:\tlearn: 0.0476826\ttotal: 2.08s\tremaining: 657ms\n",
      "76:\tlearn: 0.0472051\ttotal: 2.1s\tremaining: 628ms\n",
      "77:\tlearn: 0.0467029\ttotal: 2.12s\tremaining: 599ms\n",
      "78:\tlearn: 0.0461350\ttotal: 2.15s\tremaining: 571ms\n",
      "79:\tlearn: 0.0456740\ttotal: 2.17s\tremaining: 543ms\n",
      "80:\tlearn: 0.0453346\ttotal: 2.19s\tremaining: 514ms\n",
      "81:\tlearn: 0.0450743\ttotal: 2.21s\tremaining: 486ms\n",
      "82:\tlearn: 0.0447967\ttotal: 2.23s\tremaining: 457ms\n",
      "83:\tlearn: 0.0442645\ttotal: 2.25s\tremaining: 430ms\n",
      "84:\tlearn: 0.0439653\ttotal: 2.27s\tremaining: 402ms\n",
      "85:\tlearn: 0.0435632\ttotal: 2.3s\tremaining: 374ms\n",
      "86:\tlearn: 0.0432668\ttotal: 2.32s\tremaining: 347ms\n",
      "87:\tlearn: 0.0429502\ttotal: 2.35s\tremaining: 320ms\n",
      "88:\tlearn: 0.0422425\ttotal: 2.38s\tremaining: 294ms\n",
      "89:\tlearn: 0.0417984\ttotal: 2.4s\tremaining: 267ms\n",
      "90:\tlearn: 0.0416235\ttotal: 2.42s\tremaining: 240ms\n",
      "91:\tlearn: 0.0412089\ttotal: 2.45s\tremaining: 213ms\n",
      "92:\tlearn: 0.0405921\ttotal: 2.47s\tremaining: 186ms\n",
      "93:\tlearn: 0.0403048\ttotal: 2.49s\tremaining: 159ms\n",
      "94:\tlearn: 0.0399188\ttotal: 2.52s\tremaining: 133ms\n",
      "95:\tlearn: 0.0396582\ttotal: 2.54s\tremaining: 106ms\n",
      "96:\tlearn: 0.0393477\ttotal: 2.57s\tremaining: 79.4ms\n",
      "97:\tlearn: 0.0391462\ttotal: 2.59s\tremaining: 52.8ms\n",
      "98:\tlearn: 0.0390701\ttotal: 2.6s\tremaining: 26.3ms\n",
      "99:\tlearn: 0.0389116\ttotal: 2.62s\tremaining: 0us\n",
      "0:\tlearn: 1.2940085\ttotal: 42.6ms\tremaining: 4.22s\n",
      "1:\tlearn: 1.0945382\ttotal: 73.8ms\tremaining: 3.62s\n",
      "2:\tlearn: 0.9447208\ttotal: 106ms\tremaining: 3.42s\n",
      "3:\tlearn: 0.8328806\ttotal: 138ms\tremaining: 3.3s\n",
      "4:\tlearn: 0.7395459\ttotal: 169ms\tremaining: 3.21s\n",
      "5:\tlearn: 0.6620083\ttotal: 199ms\tremaining: 3.11s\n",
      "6:\tlearn: 0.5965425\ttotal: 228ms\tremaining: 3.03s\n",
      "7:\tlearn: 0.5420620\ttotal: 258ms\tremaining: 2.96s\n",
      "8:\tlearn: 0.4968625\ttotal: 282ms\tremaining: 2.85s\n",
      "9:\tlearn: 0.4547513\ttotal: 310ms\tremaining: 2.79s\n",
      "10:\tlearn: 0.4191305\ttotal: 338ms\tremaining: 2.74s\n",
      "11:\tlearn: 0.3887025\ttotal: 366ms\tremaining: 2.69s\n",
      "12:\tlearn: 0.3597844\ttotal: 397ms\tremaining: 2.66s\n",
      "13:\tlearn: 0.3315805\ttotal: 432ms\tremaining: 2.65s\n",
      "14:\tlearn: 0.3064657\ttotal: 464ms\tremaining: 2.63s\n",
      "15:\tlearn: 0.2849140\ttotal: 492ms\tremaining: 2.58s\n",
      "16:\tlearn: 0.2675148\ttotal: 519ms\tremaining: 2.53s\n",
      "17:\tlearn: 0.2489777\ttotal: 553ms\tremaining: 2.52s\n",
      "18:\tlearn: 0.2342595\ttotal: 583ms\tremaining: 2.49s\n",
      "19:\tlearn: 0.2208549\ttotal: 611ms\tremaining: 2.44s\n",
      "20:\tlearn: 0.2094768\ttotal: 640ms\tremaining: 2.41s\n",
      "21:\tlearn: 0.1986583\ttotal: 665ms\tremaining: 2.36s\n",
      "22:\tlearn: 0.1889587\ttotal: 691ms\tremaining: 2.31s\n",
      "23:\tlearn: 0.1782978\ttotal: 719ms\tremaining: 2.28s\n",
      "24:\tlearn: 0.1691484\ttotal: 746ms\tremaining: 2.24s\n",
      "25:\tlearn: 0.1610214\ttotal: 774ms\tremaining: 2.2s\n",
      "26:\tlearn: 0.1527995\ttotal: 808ms\tremaining: 2.18s\n",
      "27:\tlearn: 0.1454392\ttotal: 833ms\tremaining: 2.14s\n",
      "28:\tlearn: 0.1394184\ttotal: 857ms\tremaining: 2.1s\n",
      "29:\tlearn: 0.1335229\ttotal: 887ms\tremaining: 2.07s\n",
      "30:\tlearn: 0.1286009\ttotal: 912ms\tremaining: 2.03s\n",
      "31:\tlearn: 0.1235981\ttotal: 939ms\tremaining: 2s\n",
      "32:\tlearn: 0.1187363\ttotal: 968ms\tremaining: 1.97s\n",
      "33:\tlearn: 0.1141724\ttotal: 1000ms\tremaining: 1.94s\n",
      "34:\tlearn: 0.1099198\ttotal: 1.03s\tremaining: 1.91s\n",
      "35:\tlearn: 0.1057160\ttotal: 1.05s\tremaining: 1.88s\n",
      "36:\tlearn: 0.1023996\ttotal: 1.08s\tremaining: 1.84s\n",
      "37:\tlearn: 0.0990759\ttotal: 1.11s\tremaining: 1.81s\n",
      "38:\tlearn: 0.0972081\ttotal: 1.13s\tremaining: 1.77s\n",
      "39:\tlearn: 0.0934538\ttotal: 1.16s\tremaining: 1.74s\n",
      "40:\tlearn: 0.0902101\ttotal: 1.19s\tremaining: 1.71s\n",
      "41:\tlearn: 0.0883725\ttotal: 1.21s\tremaining: 1.67s\n",
      "42:\tlearn: 0.0859594\ttotal: 1.23s\tremaining: 1.63s\n",
      "43:\tlearn: 0.0836451\ttotal: 1.26s\tremaining: 1.6s\n",
      "44:\tlearn: 0.0811318\ttotal: 1.29s\tremaining: 1.57s\n",
      "45:\tlearn: 0.0793636\ttotal: 1.31s\tremaining: 1.54s\n",
      "46:\tlearn: 0.0778465\ttotal: 1.33s\tremaining: 1.5s\n",
      "47:\tlearn: 0.0758405\ttotal: 1.36s\tremaining: 1.47s\n",
      "48:\tlearn: 0.0744677\ttotal: 1.38s\tremaining: 1.44s\n",
      "49:\tlearn: 0.0728957\ttotal: 1.41s\tremaining: 1.41s\n",
      "50:\tlearn: 0.0715563\ttotal: 1.43s\tremaining: 1.38s\n",
      "51:\tlearn: 0.0695957\ttotal: 1.46s\tremaining: 1.35s\n",
      "52:\tlearn: 0.0680940\ttotal: 1.49s\tremaining: 1.32s\n",
      "53:\tlearn: 0.0668637\ttotal: 1.51s\tremaining: 1.29s\n",
      "54:\tlearn: 0.0657936\ttotal: 1.53s\tremaining: 1.25s\n",
      "55:\tlearn: 0.0645158\ttotal: 1.56s\tremaining: 1.23s\n",
      "56:\tlearn: 0.0635814\ttotal: 1.58s\tremaining: 1.19s\n",
      "57:\tlearn: 0.0622653\ttotal: 1.61s\tremaining: 1.16s\n",
      "58:\tlearn: 0.0608526\ttotal: 1.64s\tremaining: 1.14s\n",
      "59:\tlearn: 0.0599804\ttotal: 1.66s\tremaining: 1.1s\n",
      "60:\tlearn: 0.0593513\ttotal: 1.68s\tremaining: 1.07s\n",
      "61:\tlearn: 0.0581098\ttotal: 1.71s\tremaining: 1.04s\n",
      "62:\tlearn: 0.0568541\ttotal: 1.73s\tremaining: 1.02s\n",
      "63:\tlearn: 0.0561446\ttotal: 1.75s\tremaining: 986ms\n",
      "64:\tlearn: 0.0551923\ttotal: 1.78s\tremaining: 958ms\n",
      "65:\tlearn: 0.0546579\ttotal: 1.8s\tremaining: 928ms\n",
      "66:\tlearn: 0.0538086\ttotal: 1.82s\tremaining: 899ms\n",
      "67:\tlearn: 0.0535609\ttotal: 1.84s\tremaining: 868ms\n",
      "68:\tlearn: 0.0527661\ttotal: 1.87s\tremaining: 840ms\n",
      "69:\tlearn: 0.0521131\ttotal: 1.89s\tremaining: 811ms\n",
      "70:\tlearn: 0.0513360\ttotal: 1.92s\tremaining: 783ms\n",
      "71:\tlearn: 0.0510408\ttotal: 1.94s\tremaining: 753ms\n",
      "72:\tlearn: 0.0505689\ttotal: 1.96s\tremaining: 725ms\n",
      "73:\tlearn: 0.0497637\ttotal: 1.98s\tremaining: 697ms\n",
      "74:\tlearn: 0.0492563\ttotal: 2s\tremaining: 669ms\n",
      "75:\tlearn: 0.0488488\ttotal: 2.02s\tremaining: 640ms\n",
      "76:\tlearn: 0.0485749\ttotal: 2.04s\tremaining: 611ms\n",
      "77:\tlearn: 0.0483159\ttotal: 2.06s\tremaining: 582ms\n",
      "78:\tlearn: 0.0475294\ttotal: 2.09s\tremaining: 556ms\n",
      "79:\tlearn: 0.0471393\ttotal: 2.11s\tremaining: 529ms\n",
      "80:\tlearn: 0.0467590\ttotal: 2.14s\tremaining: 501ms\n",
      "81:\tlearn: 0.0462383\ttotal: 2.16s\tremaining: 474ms\n",
      "82:\tlearn: 0.0458345\ttotal: 2.18s\tremaining: 447ms\n",
      "83:\tlearn: 0.0452928\ttotal: 2.21s\tremaining: 420ms\n",
      "84:\tlearn: 0.0449759\ttotal: 2.23s\tremaining: 393ms\n",
      "85:\tlearn: 0.0448004\ttotal: 2.25s\tremaining: 366ms\n",
      "86:\tlearn: 0.0445529\ttotal: 2.27s\tremaining: 339ms\n",
      "87:\tlearn: 0.0442949\ttotal: 2.29s\tremaining: 312ms\n",
      "88:\tlearn: 0.0440839\ttotal: 2.31s\tremaining: 285ms\n",
      "89:\tlearn: 0.0433465\ttotal: 2.33s\tremaining: 259ms\n",
      "90:\tlearn: 0.0430968\ttotal: 2.36s\tremaining: 233ms\n",
      "91:\tlearn: 0.0427252\ttotal: 2.38s\tremaining: 207ms\n",
      "92:\tlearn: 0.0420726\ttotal: 2.4s\tremaining: 181ms\n",
      "93:\tlearn: 0.0415977\ttotal: 2.43s\tremaining: 155ms\n",
      "94:\tlearn: 0.0412550\ttotal: 2.46s\tremaining: 129ms\n",
      "95:\tlearn: 0.0409133\ttotal: 2.48s\tremaining: 103ms\n",
      "96:\tlearn: 0.0406979\ttotal: 2.5s\tremaining: 77.4ms\n",
      "97:\tlearn: 0.0403457\ttotal: 2.52s\tremaining: 51.5ms\n",
      "98:\tlearn: 0.0399820\ttotal: 2.55s\tremaining: 25.7ms\n",
      "99:\tlearn: 0.0397243\ttotal: 2.57s\tremaining: 0us\n",
      "0:\tlearn: 1.2905896\ttotal: 40.6ms\tremaining: 4.02s\n",
      "1:\tlearn: 1.0915651\ttotal: 70.9ms\tremaining: 3.47s\n",
      "2:\tlearn: 0.9425005\ttotal: 103ms\tremaining: 3.34s\n",
      "3:\tlearn: 0.8304788\ttotal: 132ms\tremaining: 3.17s\n",
      "4:\tlearn: 0.7400236\ttotal: 165ms\tremaining: 3.13s\n",
      "5:\tlearn: 0.6671627\ttotal: 195ms\tremaining: 3.05s\n",
      "6:\tlearn: 0.6039823\ttotal: 226ms\tremaining: 3.01s\n",
      "7:\tlearn: 0.5489595\ttotal: 256ms\tremaining: 2.94s\n",
      "8:\tlearn: 0.4995197\ttotal: 284ms\tremaining: 2.87s\n",
      "9:\tlearn: 0.4567615\ttotal: 317ms\tremaining: 2.85s\n",
      "10:\tlearn: 0.4204167\ttotal: 348ms\tremaining: 2.82s\n",
      "11:\tlearn: 0.3901178\ttotal: 376ms\tremaining: 2.76s\n",
      "12:\tlearn: 0.3629352\ttotal: 404ms\tremaining: 2.7s\n",
      "13:\tlearn: 0.3377914\ttotal: 432ms\tremaining: 2.65s\n",
      "14:\tlearn: 0.3125892\ttotal: 463ms\tremaining: 2.62s\n",
      "15:\tlearn: 0.2910303\ttotal: 488ms\tremaining: 2.56s\n",
      "16:\tlearn: 0.2709493\ttotal: 521ms\tremaining: 2.54s\n",
      "17:\tlearn: 0.2523436\ttotal: 556ms\tremaining: 2.53s\n",
      "18:\tlearn: 0.2364134\ttotal: 585ms\tremaining: 2.49s\n",
      "19:\tlearn: 0.2228303\ttotal: 612ms\tremaining: 2.45s\n",
      "20:\tlearn: 0.2115150\ttotal: 638ms\tremaining: 2.4s\n",
      "21:\tlearn: 0.1992801\ttotal: 665ms\tremaining: 2.36s\n",
      "22:\tlearn: 0.1895279\ttotal: 692ms\tremaining: 2.32s\n",
      "23:\tlearn: 0.1789669\ttotal: 723ms\tremaining: 2.29s\n",
      "24:\tlearn: 0.1703751\ttotal: 750ms\tremaining: 2.25s\n",
      "25:\tlearn: 0.1623037\ttotal: 782ms\tremaining: 2.23s\n",
      "26:\tlearn: 0.1548330\ttotal: 814ms\tremaining: 2.2s\n",
      "27:\tlearn: 0.1472309\ttotal: 847ms\tremaining: 2.18s\n",
      "28:\tlearn: 0.1414449\ttotal: 874ms\tremaining: 2.14s\n",
      "29:\tlearn: 0.1350286\ttotal: 904ms\tremaining: 2.11s\n",
      "30:\tlearn: 0.1300778\ttotal: 932ms\tremaining: 2.08s\n",
      "31:\tlearn: 0.1249264\ttotal: 958ms\tremaining: 2.03s\n",
      "32:\tlearn: 0.1200617\ttotal: 987ms\tremaining: 2s\n",
      "33:\tlearn: 0.1163831\ttotal: 1.01s\tremaining: 1.97s\n",
      "34:\tlearn: 0.1121055\ttotal: 1.04s\tremaining: 1.93s\n",
      "35:\tlearn: 0.1080307\ttotal: 1.07s\tremaining: 1.9s\n",
      "36:\tlearn: 0.1046328\ttotal: 1.09s\tremaining: 1.86s\n",
      "37:\tlearn: 0.1009165\ttotal: 1.12s\tremaining: 1.83s\n",
      "38:\tlearn: 0.0974574\ttotal: 1.15s\tremaining: 1.8s\n",
      "39:\tlearn: 0.0945732\ttotal: 1.18s\tremaining: 1.77s\n",
      "40:\tlearn: 0.0911086\ttotal: 1.21s\tremaining: 1.74s\n",
      "41:\tlearn: 0.0892610\ttotal: 1.23s\tremaining: 1.69s\n",
      "42:\tlearn: 0.0872279\ttotal: 1.25s\tremaining: 1.66s\n",
      "43:\tlearn: 0.0849431\ttotal: 1.27s\tremaining: 1.62s\n",
      "44:\tlearn: 0.0826193\ttotal: 1.3s\tremaining: 1.59s\n",
      "45:\tlearn: 0.0803444\ttotal: 1.33s\tremaining: 1.56s\n",
      "46:\tlearn: 0.0778610\ttotal: 1.35s\tremaining: 1.53s\n",
      "47:\tlearn: 0.0761245\ttotal: 1.38s\tremaining: 1.5s\n",
      "48:\tlearn: 0.0744090\ttotal: 1.41s\tremaining: 1.47s\n",
      "49:\tlearn: 0.0725390\ttotal: 1.44s\tremaining: 1.44s\n",
      "50:\tlearn: 0.0712038\ttotal: 1.46s\tremaining: 1.41s\n",
      "51:\tlearn: 0.0696301\ttotal: 1.49s\tremaining: 1.37s\n",
      "52:\tlearn: 0.0678108\ttotal: 1.52s\tremaining: 1.35s\n",
      "53:\tlearn: 0.0662647\ttotal: 1.54s\tremaining: 1.31s\n",
      "54:\tlearn: 0.0649977\ttotal: 1.57s\tremaining: 1.28s\n",
      "55:\tlearn: 0.0637459\ttotal: 1.59s\tremaining: 1.25s\n",
      "56:\tlearn: 0.0626666\ttotal: 1.61s\tremaining: 1.22s\n",
      "57:\tlearn: 0.0617701\ttotal: 1.64s\tremaining: 1.19s\n",
      "58:\tlearn: 0.0606490\ttotal: 1.66s\tremaining: 1.15s\n",
      "59:\tlearn: 0.0597155\ttotal: 1.69s\tremaining: 1.12s\n",
      "60:\tlearn: 0.0589931\ttotal: 1.71s\tremaining: 1.09s\n",
      "61:\tlearn: 0.0582584\ttotal: 1.73s\tremaining: 1.06s\n",
      "62:\tlearn: 0.0570967\ttotal: 1.75s\tremaining: 1.03s\n",
      "63:\tlearn: 0.0558844\ttotal: 1.78s\tremaining: 1s\n",
      "64:\tlearn: 0.0549930\ttotal: 1.81s\tremaining: 973ms\n",
      "65:\tlearn: 0.0544740\ttotal: 1.83s\tremaining: 943ms\n",
      "66:\tlearn: 0.0534789\ttotal: 1.86s\tremaining: 915ms\n",
      "67:\tlearn: 0.0526640\ttotal: 1.88s\tremaining: 885ms\n",
      "68:\tlearn: 0.0516322\ttotal: 1.91s\tremaining: 856ms\n",
      "69:\tlearn: 0.0513942\ttotal: 1.93s\tremaining: 826ms\n",
      "70:\tlearn: 0.0509703\ttotal: 1.95s\tremaining: 795ms\n",
      "71:\tlearn: 0.0499992\ttotal: 1.97s\tremaining: 767ms\n",
      "72:\tlearn: 0.0488467\ttotal: 2s\tremaining: 739ms\n",
      "73:\tlearn: 0.0483650\ttotal: 2.02s\tremaining: 711ms\n",
      "74:\tlearn: 0.0476764\ttotal: 2.05s\tremaining: 683ms\n",
      "75:\tlearn: 0.0473583\ttotal: 2.07s\tremaining: 654ms\n",
      "76:\tlearn: 0.0470609\ttotal: 2.09s\tremaining: 624ms\n",
      "77:\tlearn: 0.0462564\ttotal: 2.11s\tremaining: 596ms\n",
      "78:\tlearn: 0.0454764\ttotal: 2.14s\tremaining: 570ms\n",
      "79:\tlearn: 0.0449971\ttotal: 2.17s\tremaining: 542ms\n",
      "80:\tlearn: 0.0445082\ttotal: 2.19s\tremaining: 514ms\n",
      "81:\tlearn: 0.0443002\ttotal: 2.21s\tremaining: 486ms\n",
      "82:\tlearn: 0.0440945\ttotal: 2.23s\tremaining: 457ms\n",
      "83:\tlearn: 0.0436566\ttotal: 2.25s\tremaining: 429ms\n",
      "84:\tlearn: 0.0435365\ttotal: 2.28s\tremaining: 402ms\n",
      "85:\tlearn: 0.0430132\ttotal: 2.3s\tremaining: 375ms\n",
      "86:\tlearn: 0.0423929\ttotal: 2.33s\tremaining: 348ms\n",
      "87:\tlearn: 0.0420464\ttotal: 2.35s\tremaining: 321ms\n",
      "88:\tlearn: 0.0414882\ttotal: 2.37s\tremaining: 293ms\n",
      "89:\tlearn: 0.0407886\ttotal: 2.4s\tremaining: 267ms\n",
      "90:\tlearn: 0.0405933\ttotal: 2.42s\tremaining: 240ms\n",
      "91:\tlearn: 0.0402167\ttotal: 2.45s\tremaining: 213ms\n",
      "92:\tlearn: 0.0396681\ttotal: 2.47s\tremaining: 186ms\n",
      "93:\tlearn: 0.0394725\ttotal: 2.49s\tremaining: 159ms\n",
      "94:\tlearn: 0.0388928\ttotal: 2.52s\tremaining: 133ms\n",
      "95:\tlearn: 0.0386097\ttotal: 2.55s\tremaining: 106ms\n",
      "96:\tlearn: 0.0381861\ttotal: 2.57s\tremaining: 79.5ms\n",
      "97:\tlearn: 0.0380259\ttotal: 2.59s\tremaining: 52.9ms\n",
      "98:\tlearn: 0.0377404\ttotal: 2.62s\tremaining: 26.4ms\n",
      "99:\tlearn: 0.0375048\ttotal: 2.64s\tremaining: 0us\n",
      "0:\tlearn: 1.3022499\ttotal: 40ms\tremaining: 3.96s\n",
      "1:\tlearn: 1.1024183\ttotal: 70.6ms\tremaining: 3.46s\n",
      "2:\tlearn: 0.9519779\ttotal: 103ms\tremaining: 3.32s\n",
      "3:\tlearn: 0.8384388\ttotal: 131ms\tremaining: 3.14s\n",
      "4:\tlearn: 0.7420122\ttotal: 164ms\tremaining: 3.12s\n",
      "5:\tlearn: 0.6676978\ttotal: 195ms\tremaining: 3.06s\n",
      "6:\tlearn: 0.6021379\ttotal: 226ms\tremaining: 3s\n",
      "7:\tlearn: 0.5471536\ttotal: 257ms\tremaining: 2.95s\n",
      "8:\tlearn: 0.4996019\ttotal: 286ms\tremaining: 2.89s\n",
      "9:\tlearn: 0.4575002\ttotal: 315ms\tremaining: 2.83s\n",
      "10:\tlearn: 0.4210297\ttotal: 347ms\tremaining: 2.81s\n",
      "11:\tlearn: 0.3897618\ttotal: 377ms\tremaining: 2.77s\n",
      "12:\tlearn: 0.3605835\ttotal: 409ms\tremaining: 2.74s\n",
      "13:\tlearn: 0.3352899\ttotal: 437ms\tremaining: 2.68s\n",
      "14:\tlearn: 0.3107561\ttotal: 472ms\tremaining: 2.67s\n",
      "15:\tlearn: 0.2898983\ttotal: 500ms\tremaining: 2.62s\n",
      "16:\tlearn: 0.2707170\ttotal: 531ms\tremaining: 2.59s\n",
      "17:\tlearn: 0.2525026\ttotal: 565ms\tremaining: 2.57s\n",
      "18:\tlearn: 0.2365684\ttotal: 601ms\tremaining: 2.56s\n",
      "19:\tlearn: 0.2223573\ttotal: 633ms\tremaining: 2.53s\n",
      "20:\tlearn: 0.2106872\ttotal: 658ms\tremaining: 2.48s\n",
      "21:\tlearn: 0.1993377\ttotal: 682ms\tremaining: 2.42s\n",
      "22:\tlearn: 0.1897088\ttotal: 709ms\tremaining: 2.37s\n",
      "23:\tlearn: 0.1805807\ttotal: 738ms\tremaining: 2.34s\n",
      "24:\tlearn: 0.1713848\ttotal: 770ms\tremaining: 2.31s\n",
      "25:\tlearn: 0.1618354\ttotal: 800ms\tremaining: 2.27s\n",
      "26:\tlearn: 0.1539801\ttotal: 833ms\tremaining: 2.25s\n",
      "27:\tlearn: 0.1464995\ttotal: 868ms\tremaining: 2.23s\n",
      "28:\tlearn: 0.1401352\ttotal: 899ms\tremaining: 2.2s\n",
      "29:\tlearn: 0.1342372\ttotal: 930ms\tremaining: 2.17s\n",
      "30:\tlearn: 0.1285306\ttotal: 959ms\tremaining: 2.13s\n",
      "31:\tlearn: 0.1232217\ttotal: 985ms\tremaining: 2.09s\n",
      "32:\tlearn: 0.1183155\ttotal: 1.01s\tremaining: 2.06s\n",
      "33:\tlearn: 0.1144198\ttotal: 1.04s\tremaining: 2.02s\n",
      "34:\tlearn: 0.1100191\ttotal: 1.07s\tremaining: 1.99s\n",
      "35:\tlearn: 0.1065884\ttotal: 1.1s\tremaining: 1.95s\n",
      "36:\tlearn: 0.1030223\ttotal: 1.13s\tremaining: 1.92s\n",
      "37:\tlearn: 0.0995723\ttotal: 1.16s\tremaining: 1.89s\n",
      "38:\tlearn: 0.0967193\ttotal: 1.18s\tremaining: 1.85s\n",
      "39:\tlearn: 0.0942877\ttotal: 1.21s\tremaining: 1.81s\n",
      "40:\tlearn: 0.0911812\ttotal: 1.23s\tremaining: 1.78s\n",
      "41:\tlearn: 0.0889734\ttotal: 1.26s\tremaining: 1.74s\n",
      "42:\tlearn: 0.0870218\ttotal: 1.28s\tremaining: 1.7s\n",
      "43:\tlearn: 0.0850329\ttotal: 1.31s\tremaining: 1.67s\n",
      "44:\tlearn: 0.0825542\ttotal: 1.34s\tremaining: 1.63s\n",
      "45:\tlearn: 0.0814173\ttotal: 1.36s\tremaining: 1.59s\n",
      "46:\tlearn: 0.0792020\ttotal: 1.39s\tremaining: 1.56s\n",
      "47:\tlearn: 0.0772303\ttotal: 1.42s\tremaining: 1.53s\n",
      "48:\tlearn: 0.0751661\ttotal: 1.44s\tremaining: 1.5s\n",
      "49:\tlearn: 0.0735393\ttotal: 1.47s\tremaining: 1.47s\n",
      "50:\tlearn: 0.0717074\ttotal: 1.49s\tremaining: 1.43s\n",
      "51:\tlearn: 0.0698060\ttotal: 1.52s\tremaining: 1.4s\n",
      "52:\tlearn: 0.0685312\ttotal: 1.54s\tremaining: 1.37s\n",
      "53:\tlearn: 0.0668582\ttotal: 1.57s\tremaining: 1.34s\n",
      "54:\tlearn: 0.0658103\ttotal: 1.59s\tremaining: 1.3s\n",
      "55:\tlearn: 0.0647051\ttotal: 1.61s\tremaining: 1.27s\n",
      "56:\tlearn: 0.0630324\ttotal: 1.64s\tremaining: 1.24s\n",
      "57:\tlearn: 0.0619252\ttotal: 1.67s\tremaining: 1.21s\n",
      "58:\tlearn: 0.0606049\ttotal: 1.69s\tremaining: 1.17s\n",
      "59:\tlearn: 0.0597431\ttotal: 1.71s\tremaining: 1.14s\n",
      "60:\tlearn: 0.0591322\ttotal: 1.73s\tremaining: 1.11s\n",
      "61:\tlearn: 0.0578366\ttotal: 1.76s\tremaining: 1.08s\n",
      "62:\tlearn: 0.0571035\ttotal: 1.78s\tremaining: 1.04s\n",
      "63:\tlearn: 0.0562920\ttotal: 1.8s\tremaining: 1.01s\n",
      "64:\tlearn: 0.0551025\ttotal: 1.83s\tremaining: 986ms\n",
      "65:\tlearn: 0.0542309\ttotal: 1.86s\tremaining: 957ms\n",
      "66:\tlearn: 0.0536708\ttotal: 1.88s\tremaining: 926ms\n",
      "67:\tlearn: 0.0529139\ttotal: 1.91s\tremaining: 897ms\n",
      "68:\tlearn: 0.0522903\ttotal: 1.93s\tremaining: 866ms\n",
      "69:\tlearn: 0.0517758\ttotal: 1.95s\tremaining: 836ms\n",
      "70:\tlearn: 0.0511255\ttotal: 1.97s\tremaining: 806ms\n",
      "71:\tlearn: 0.0504907\ttotal: 2s\tremaining: 776ms\n",
      "72:\tlearn: 0.0498581\ttotal: 2.02s\tremaining: 746ms\n",
      "73:\tlearn: 0.0487537\ttotal: 2.05s\tremaining: 720ms\n",
      "74:\tlearn: 0.0482313\ttotal: 2.07s\tremaining: 691ms\n",
      "75:\tlearn: 0.0479695\ttotal: 2.09s\tremaining: 660ms\n",
      "76:\tlearn: 0.0475310\ttotal: 2.11s\tremaining: 630ms\n",
      "77:\tlearn: 0.0467773\ttotal: 2.13s\tremaining: 602ms\n",
      "78:\tlearn: 0.0463100\ttotal: 2.16s\tremaining: 574ms\n",
      "79:\tlearn: 0.0453515\ttotal: 2.18s\tremaining: 546ms\n",
      "80:\tlearn: 0.0450096\ttotal: 2.2s\tremaining: 517ms\n",
      "81:\tlearn: 0.0445181\ttotal: 2.23s\tremaining: 489ms\n",
      "82:\tlearn: 0.0442467\ttotal: 2.25s\tremaining: 460ms\n",
      "83:\tlearn: 0.0436829\ttotal: 2.27s\tremaining: 432ms\n",
      "84:\tlearn: 0.0432642\ttotal: 2.29s\tremaining: 405ms\n",
      "85:\tlearn: 0.0430744\ttotal: 2.31s\tremaining: 376ms\n",
      "86:\tlearn: 0.0425884\ttotal: 2.34s\tremaining: 349ms\n",
      "87:\tlearn: 0.0422575\ttotal: 2.36s\tremaining: 322ms\n",
      "88:\tlearn: 0.0419314\ttotal: 2.38s\tremaining: 294ms\n",
      "89:\tlearn: 0.0416370\ttotal: 2.4s\tremaining: 267ms\n",
      "90:\tlearn: 0.0407218\ttotal: 2.43s\tremaining: 240ms\n",
      "91:\tlearn: 0.0404844\ttotal: 2.45s\tremaining: 213ms\n",
      "92:\tlearn: 0.0399081\ttotal: 2.47s\tremaining: 186ms\n",
      "93:\tlearn: 0.0396676\ttotal: 2.49s\tremaining: 159ms\n",
      "94:\tlearn: 0.0393958\ttotal: 2.52s\tremaining: 132ms\n",
      "95:\tlearn: 0.0390689\ttotal: 2.54s\tremaining: 106ms\n",
      "96:\tlearn: 0.0388556\ttotal: 2.56s\tremaining: 79.1ms\n",
      "97:\tlearn: 0.0386784\ttotal: 2.58s\tremaining: 52.6ms\n",
      "98:\tlearn: 0.0383835\ttotal: 2.6s\tremaining: 26.3ms\n",
      "99:\tlearn: 0.0381989\ttotal: 2.62s\tremaining: 0us\n",
      "0:\tlearn: 1.2925588\ttotal: 40.8ms\tremaining: 4.04s\n",
      "1:\tlearn: 1.0921758\ttotal: 69.5ms\tremaining: 3.41s\n",
      "2:\tlearn: 0.9450691\ttotal: 98.8ms\tremaining: 3.19s\n",
      "3:\tlearn: 0.8278819\ttotal: 129ms\tremaining: 3.09s\n",
      "4:\tlearn: 0.7351762\ttotal: 162ms\tremaining: 3.08s\n",
      "5:\tlearn: 0.6602318\ttotal: 191ms\tremaining: 2.99s\n",
      "6:\tlearn: 0.5966143\ttotal: 221ms\tremaining: 2.94s\n",
      "7:\tlearn: 0.5419153\ttotal: 251ms\tremaining: 2.89s\n",
      "8:\tlearn: 0.4942512\ttotal: 280ms\tremaining: 2.83s\n",
      "9:\tlearn: 0.4511327\ttotal: 307ms\tremaining: 2.76s\n",
      "10:\tlearn: 0.4142099\ttotal: 334ms\tremaining: 2.7s\n",
      "11:\tlearn: 0.3840689\ttotal: 362ms\tremaining: 2.65s\n",
      "12:\tlearn: 0.3552899\ttotal: 389ms\tremaining: 2.6s\n",
      "13:\tlearn: 0.3285556\ttotal: 419ms\tremaining: 2.57s\n",
      "14:\tlearn: 0.3036275\ttotal: 450ms\tremaining: 2.55s\n",
      "15:\tlearn: 0.2820680\ttotal: 476ms\tremaining: 2.5s\n",
      "16:\tlearn: 0.2615280\ttotal: 509ms\tremaining: 2.48s\n",
      "17:\tlearn: 0.2448606\ttotal: 538ms\tremaining: 2.45s\n",
      "18:\tlearn: 0.2288770\ttotal: 568ms\tremaining: 2.42s\n",
      "19:\tlearn: 0.2154083\ttotal: 594ms\tremaining: 2.38s\n",
      "20:\tlearn: 0.2034037\ttotal: 625ms\tremaining: 2.35s\n",
      "21:\tlearn: 0.1929062\ttotal: 648ms\tremaining: 2.3s\n",
      "22:\tlearn: 0.1832323\ttotal: 676ms\tremaining: 2.26s\n",
      "23:\tlearn: 0.1741424\ttotal: 704ms\tremaining: 2.23s\n",
      "24:\tlearn: 0.1664280\ttotal: 730ms\tremaining: 2.19s\n",
      "25:\tlearn: 0.1585746\ttotal: 761ms\tremaining: 2.16s\n",
      "26:\tlearn: 0.1503802\ttotal: 794ms\tremaining: 2.15s\n",
      "27:\tlearn: 0.1437673\ttotal: 820ms\tremaining: 2.11s\n",
      "28:\tlearn: 0.1367252\ttotal: 850ms\tremaining: 2.08s\n",
      "29:\tlearn: 0.1307297\ttotal: 879ms\tremaining: 2.05s\n",
      "30:\tlearn: 0.1259369\ttotal: 904ms\tremaining: 2.01s\n",
      "31:\tlearn: 0.1213449\ttotal: 929ms\tremaining: 1.97s\n",
      "32:\tlearn: 0.1161064\ttotal: 957ms\tremaining: 1.94s\n",
      "33:\tlearn: 0.1116534\ttotal: 987ms\tremaining: 1.92s\n",
      "34:\tlearn: 0.1075654\ttotal: 1.01s\tremaining: 1.89s\n",
      "35:\tlearn: 0.1045820\ttotal: 1.04s\tremaining: 1.85s\n",
      "36:\tlearn: 0.1011010\ttotal: 1.06s\tremaining: 1.81s\n",
      "37:\tlearn: 0.0980276\ttotal: 1.09s\tremaining: 1.77s\n",
      "38:\tlearn: 0.0959070\ttotal: 1.11s\tremaining: 1.74s\n",
      "39:\tlearn: 0.0930766\ttotal: 1.13s\tremaining: 1.7s\n",
      "40:\tlearn: 0.0905885\ttotal: 1.16s\tremaining: 1.67s\n",
      "41:\tlearn: 0.0871323\ttotal: 1.2s\tremaining: 1.65s\n",
      "42:\tlearn: 0.0849702\ttotal: 1.22s\tremaining: 1.62s\n",
      "43:\tlearn: 0.0831507\ttotal: 1.24s\tremaining: 1.58s\n",
      "44:\tlearn: 0.0804565\ttotal: 1.27s\tremaining: 1.55s\n",
      "45:\tlearn: 0.0784689\ttotal: 1.29s\tremaining: 1.52s\n",
      "46:\tlearn: 0.0765677\ttotal: 1.32s\tremaining: 1.48s\n",
      "47:\tlearn: 0.0743500\ttotal: 1.34s\tremaining: 1.45s\n",
      "48:\tlearn: 0.0723120\ttotal: 1.37s\tremaining: 1.42s\n",
      "49:\tlearn: 0.0710448\ttotal: 1.39s\tremaining: 1.39s\n",
      "50:\tlearn: 0.0692804\ttotal: 1.42s\tremaining: 1.36s\n",
      "51:\tlearn: 0.0679736\ttotal: 1.44s\tremaining: 1.33s\n",
      "52:\tlearn: 0.0662711\ttotal: 1.46s\tremaining: 1.3s\n",
      "53:\tlearn: 0.0648655\ttotal: 1.49s\tremaining: 1.27s\n",
      "54:\tlearn: 0.0639379\ttotal: 1.51s\tremaining: 1.24s\n",
      "55:\tlearn: 0.0626371\ttotal: 1.54s\tremaining: 1.21s\n",
      "56:\tlearn: 0.0617352\ttotal: 1.56s\tremaining: 1.18s\n",
      "57:\tlearn: 0.0611055\ttotal: 1.58s\tremaining: 1.14s\n",
      "58:\tlearn: 0.0599240\ttotal: 1.6s\tremaining: 1.11s\n",
      "59:\tlearn: 0.0586717\ttotal: 1.63s\tremaining: 1.09s\n",
      "60:\tlearn: 0.0578384\ttotal: 1.65s\tremaining: 1.06s\n",
      "61:\tlearn: 0.0568735\ttotal: 1.68s\tremaining: 1.03s\n",
      "62:\tlearn: 0.0557404\ttotal: 1.7s\tremaining: 1s\n",
      "63:\tlearn: 0.0547818\ttotal: 1.73s\tremaining: 972ms\n",
      "64:\tlearn: 0.0538302\ttotal: 1.75s\tremaining: 945ms\n",
      "65:\tlearn: 0.0529353\ttotal: 1.77s\tremaining: 915ms\n",
      "66:\tlearn: 0.0518245\ttotal: 1.8s\tremaining: 888ms\n",
      "67:\tlearn: 0.0507922\ttotal: 1.83s\tremaining: 861ms\n",
      "68:\tlearn: 0.0503602\ttotal: 1.85s\tremaining: 832ms\n",
      "69:\tlearn: 0.0496037\ttotal: 1.87s\tremaining: 803ms\n",
      "70:\tlearn: 0.0489279\ttotal: 1.9s\tremaining: 775ms\n",
      "71:\tlearn: 0.0481017\ttotal: 1.92s\tremaining: 747ms\n",
      "72:\tlearn: 0.0476261\ttotal: 1.94s\tremaining: 718ms\n",
      "73:\tlearn: 0.0466326\ttotal: 1.97s\tremaining: 691ms\n",
      "74:\tlearn: 0.0462809\ttotal: 1.99s\tremaining: 663ms\n",
      "75:\tlearn: 0.0459802\ttotal: 2.01s\tremaining: 634ms\n",
      "76:\tlearn: 0.0455048\ttotal: 2.03s\tremaining: 606ms\n",
      "77:\tlearn: 0.0451628\ttotal: 2.05s\tremaining: 578ms\n",
      "78:\tlearn: 0.0446152\ttotal: 2.08s\tremaining: 552ms\n",
      "79:\tlearn: 0.0441520\ttotal: 2.1s\tremaining: 526ms\n",
      "80:\tlearn: 0.0438318\ttotal: 2.13s\tremaining: 499ms\n",
      "81:\tlearn: 0.0434173\ttotal: 2.15s\tremaining: 472ms\n",
      "82:\tlearn: 0.0428884\ttotal: 2.17s\tremaining: 445ms\n",
      "83:\tlearn: 0.0426495\ttotal: 2.19s\tremaining: 418ms\n",
      "84:\tlearn: 0.0425237\ttotal: 2.21s\tremaining: 391ms\n",
      "85:\tlearn: 0.0421707\ttotal: 2.23s\tremaining: 364ms\n",
      "86:\tlearn: 0.0419402\ttotal: 2.25s\tremaining: 337ms\n",
      "87:\tlearn: 0.0417143\ttotal: 2.27s\tremaining: 310ms\n",
      "88:\tlearn: 0.0410692\ttotal: 2.29s\tremaining: 284ms\n",
      "89:\tlearn: 0.0407458\ttotal: 2.32s\tremaining: 257ms\n",
      "90:\tlearn: 0.0404674\ttotal: 2.34s\tremaining: 231ms\n",
      "91:\tlearn: 0.0398985\ttotal: 2.36s\tremaining: 205ms\n",
      "92:\tlearn: 0.0396474\ttotal: 2.38s\tremaining: 179ms\n",
      "93:\tlearn: 0.0393737\ttotal: 2.4s\tremaining: 153ms\n",
      "94:\tlearn: 0.0391019\ttotal: 2.42s\tremaining: 127ms\n",
      "95:\tlearn: 0.0388814\ttotal: 2.44s\tremaining: 102ms\n",
      "96:\tlearn: 0.0386237\ttotal: 2.46s\tremaining: 76.2ms\n",
      "97:\tlearn: 0.0383739\ttotal: 2.48s\tremaining: 50.6ms\n",
      "98:\tlearn: 0.0382698\ttotal: 2.5s\tremaining: 25.3ms\n",
      "99:\tlearn: 0.0380434\ttotal: 2.52s\tremaining: 0us\n",
      "0:\tlearn: 1.2918738\ttotal: 38.4ms\tremaining: 3.81s\n",
      "1:\tlearn: 1.0926608\ttotal: 65.6ms\tremaining: 3.21s\n",
      "2:\tlearn: 0.9436932\ttotal: 94.8ms\tremaining: 3.06s\n",
      "3:\tlearn: 0.8322893\ttotal: 121ms\tremaining: 2.91s\n",
      "4:\tlearn: 0.7385567\ttotal: 152ms\tremaining: 2.89s\n",
      "5:\tlearn: 0.6658442\ttotal: 181ms\tremaining: 2.84s\n",
      "6:\tlearn: 0.6012735\ttotal: 211ms\tremaining: 2.8s\n",
      "7:\tlearn: 0.5468855\ttotal: 239ms\tremaining: 2.75s\n",
      "8:\tlearn: 0.5015749\ttotal: 264ms\tremaining: 2.67s\n",
      "9:\tlearn: 0.4600336\ttotal: 294ms\tremaining: 2.65s\n",
      "10:\tlearn: 0.4227890\ttotal: 324ms\tremaining: 2.62s\n",
      "11:\tlearn: 0.3919836\ttotal: 352ms\tremaining: 2.58s\n",
      "12:\tlearn: 0.3620891\ttotal: 383ms\tremaining: 2.56s\n",
      "13:\tlearn: 0.3373968\ttotal: 410ms\tremaining: 2.52s\n",
      "14:\tlearn: 0.3121272\ttotal: 440ms\tremaining: 2.49s\n",
      "15:\tlearn: 0.2919116\ttotal: 464ms\tremaining: 2.44s\n",
      "16:\tlearn: 0.2736594\ttotal: 491ms\tremaining: 2.4s\n",
      "17:\tlearn: 0.2556576\ttotal: 522ms\tremaining: 2.38s\n",
      "18:\tlearn: 0.2393883\ttotal: 550ms\tremaining: 2.34s\n",
      "19:\tlearn: 0.2256700\ttotal: 579ms\tremaining: 2.31s\n",
      "20:\tlearn: 0.2127304\ttotal: 605ms\tremaining: 2.27s\n",
      "21:\tlearn: 0.2010504\ttotal: 630ms\tremaining: 2.23s\n",
      "22:\tlearn: 0.1898850\ttotal: 660ms\tremaining: 2.21s\n",
      "23:\tlearn: 0.1811949\ttotal: 686ms\tremaining: 2.17s\n",
      "24:\tlearn: 0.1717603\ttotal: 713ms\tremaining: 2.14s\n",
      "25:\tlearn: 0.1641186\ttotal: 736ms\tremaining: 2.09s\n",
      "26:\tlearn: 0.1559937\ttotal: 769ms\tremaining: 2.08s\n",
      "27:\tlearn: 0.1487474\ttotal: 797ms\tremaining: 2.05s\n",
      "28:\tlearn: 0.1429226\ttotal: 821ms\tremaining: 2.01s\n",
      "29:\tlearn: 0.1363823\ttotal: 850ms\tremaining: 1.98s\n",
      "30:\tlearn: 0.1316301\ttotal: 874ms\tremaining: 1.94s\n",
      "31:\tlearn: 0.1263136\ttotal: 902ms\tremaining: 1.92s\n",
      "32:\tlearn: 0.1210763\ttotal: 930ms\tremaining: 1.89s\n",
      "33:\tlearn: 0.1165966\ttotal: 958ms\tremaining: 1.86s\n",
      "34:\tlearn: 0.1129685\ttotal: 982ms\tremaining: 1.82s\n",
      "35:\tlearn: 0.1099940\ttotal: 1s\tremaining: 1.79s\n",
      "36:\tlearn: 0.1061850\ttotal: 1.03s\tremaining: 1.76s\n",
      "37:\tlearn: 0.1027215\ttotal: 1.06s\tremaining: 1.73s\n",
      "38:\tlearn: 0.0995317\ttotal: 1.08s\tremaining: 1.7s\n",
      "39:\tlearn: 0.0968168\ttotal: 1.11s\tremaining: 1.67s\n",
      "40:\tlearn: 0.0935590\ttotal: 1.14s\tremaining: 1.64s\n",
      "41:\tlearn: 0.0913641\ttotal: 1.16s\tremaining: 1.6s\n",
      "42:\tlearn: 0.0897569\ttotal: 1.18s\tremaining: 1.56s\n",
      "43:\tlearn: 0.0882721\ttotal: 1.2s\tremaining: 1.53s\n",
      "44:\tlearn: 0.0854098\ttotal: 1.23s\tremaining: 1.5s\n",
      "45:\tlearn: 0.0831270\ttotal: 1.26s\tremaining: 1.48s\n",
      "46:\tlearn: 0.0804062\ttotal: 1.29s\tremaining: 1.45s\n",
      "47:\tlearn: 0.0785160\ttotal: 1.32s\tremaining: 1.43s\n",
      "48:\tlearn: 0.0769956\ttotal: 1.34s\tremaining: 1.4s\n",
      "49:\tlearn: 0.0753339\ttotal: 1.36s\tremaining: 1.36s\n",
      "50:\tlearn: 0.0737308\ttotal: 1.39s\tremaining: 1.34s\n",
      "51:\tlearn: 0.0722534\ttotal: 1.42s\tremaining: 1.31s\n",
      "52:\tlearn: 0.0711617\ttotal: 1.44s\tremaining: 1.27s\n",
      "53:\tlearn: 0.0693974\ttotal: 1.46s\tremaining: 1.25s\n",
      "54:\tlearn: 0.0684846\ttotal: 1.48s\tremaining: 1.21s\n",
      "55:\tlearn: 0.0668126\ttotal: 1.5s\tremaining: 1.18s\n",
      "56:\tlearn: 0.0649829\ttotal: 1.53s\tremaining: 1.16s\n",
      "57:\tlearn: 0.0637784\ttotal: 1.56s\tremaining: 1.13s\n",
      "58:\tlearn: 0.0626465\ttotal: 1.58s\tremaining: 1.1s\n",
      "59:\tlearn: 0.0618539\ttotal: 1.6s\tremaining: 1.07s\n",
      "60:\tlearn: 0.0613060\ttotal: 1.62s\tremaining: 1.04s\n",
      "61:\tlearn: 0.0602356\ttotal: 1.64s\tremaining: 1.01s\n",
      "62:\tlearn: 0.0589062\ttotal: 1.67s\tremaining: 980ms\n",
      "63:\tlearn: 0.0572906\ttotal: 1.7s\tremaining: 953ms\n",
      "64:\tlearn: 0.0563983\ttotal: 1.72s\tremaining: 925ms\n",
      "65:\tlearn: 0.0558617\ttotal: 1.74s\tremaining: 897ms\n",
      "66:\tlearn: 0.0551628\ttotal: 1.76s\tremaining: 869ms\n",
      "67:\tlearn: 0.0545192\ttotal: 1.79s\tremaining: 842ms\n",
      "68:\tlearn: 0.0532528\ttotal: 1.81s\tremaining: 815ms\n",
      "69:\tlearn: 0.0523664\ttotal: 1.84s\tremaining: 787ms\n",
      "70:\tlearn: 0.0516589\ttotal: 1.86s\tremaining: 759ms\n",
      "71:\tlearn: 0.0509093\ttotal: 1.88s\tremaining: 732ms\n",
      "72:\tlearn: 0.0504318\ttotal: 1.91s\tremaining: 705ms\n",
      "73:\tlearn: 0.0494711\ttotal: 1.93s\tremaining: 679ms\n",
      "74:\tlearn: 0.0487066\ttotal: 1.95s\tremaining: 651ms\n",
      "75:\tlearn: 0.0480724\ttotal: 1.98s\tremaining: 625ms\n",
      "76:\tlearn: 0.0477068\ttotal: 2s\tremaining: 597ms\n",
      "77:\tlearn: 0.0472761\ttotal: 2.02s\tremaining: 571ms\n",
      "78:\tlearn: 0.0463486\ttotal: 2.05s\tremaining: 546ms\n",
      "79:\tlearn: 0.0458530\ttotal: 2.07s\tremaining: 518ms\n",
      "80:\tlearn: 0.0455569\ttotal: 2.09s\tremaining: 491ms\n",
      "81:\tlearn: 0.0452026\ttotal: 2.12s\tremaining: 465ms\n",
      "82:\tlearn: 0.0447890\ttotal: 2.14s\tremaining: 439ms\n",
      "83:\tlearn: 0.0441592\ttotal: 2.17s\tremaining: 413ms\n",
      "84:\tlearn: 0.0437127\ttotal: 2.19s\tremaining: 387ms\n",
      "85:\tlearn: 0.0434905\ttotal: 2.21s\tremaining: 360ms\n",
      "86:\tlearn: 0.0429287\ttotal: 2.24s\tremaining: 334ms\n",
      "87:\tlearn: 0.0426733\ttotal: 2.26s\tremaining: 308ms\n",
      "88:\tlearn: 0.0421925\ttotal: 2.28s\tremaining: 282ms\n",
      "89:\tlearn: 0.0413553\ttotal: 2.31s\tremaining: 256ms\n",
      "90:\tlearn: 0.0412135\ttotal: 2.32s\tremaining: 230ms\n",
      "91:\tlearn: 0.0408335\ttotal: 2.35s\tremaining: 205ms\n",
      "92:\tlearn: 0.0405268\ttotal: 2.37s\tremaining: 178ms\n",
      "93:\tlearn: 0.0400822\ttotal: 2.39s\tremaining: 153ms\n",
      "94:\tlearn: 0.0396850\ttotal: 2.42s\tremaining: 127ms\n",
      "95:\tlearn: 0.0392478\ttotal: 2.44s\tremaining: 102ms\n",
      "96:\tlearn: 0.0389379\ttotal: 2.46s\tremaining: 76.2ms\n",
      "97:\tlearn: 0.0385956\ttotal: 2.49s\tremaining: 50.8ms\n",
      "98:\tlearn: 0.0383235\ttotal: 2.51s\tremaining: 25.4ms\n",
      "99:\tlearn: 0.0379333\ttotal: 2.53s\tremaining: 0us\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2       3     4\n",
      "0  22981.0     26.0    34.0    64.0   5.0\n",
      "1     26.0  15997.0     9.0     0.0   0.0\n",
      "2     66.0     10.0  4148.0     2.0   0.0\n",
      "3     83.0      1.0     2.0  1068.0   1.0\n",
      "4      9.0      0.0     0.0    11.0  13.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9921671604273274\n",
      "Precision total:  0.9191957663018202\n",
      "Recall total:  0.8584784831693989\n",
      "F1 total:  0.8810153121413229\n",
      "BACC total:  0.8584784831693989\n",
      "MCC total:  0.9867546568971854\n"
     ]
    }
   ],
   "source": [
    "import catboost\n",
    "start = time.time()\n",
    "\n",
    "bag_cat = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "\n",
    "base_classifier = bag_cat\n",
    "\n",
    "# Define the BaggingClassifier\n",
    "bagging_classifier = BaggingClassifier(base_classifier, n_estimators=10, random_state=42)\n",
    "\n",
    "# Train the BaggingClassifier\n",
    "bagging_classifier.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = bagging_classifier.predict(X_test)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('--------------------------------------------------------------------------', file = f)\n",
    "\n",
    "name = 'bag_cat'\n",
    "\n",
    "pred_label = y_pred\n",
    "\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test, time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Bagging Combined"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 1.2956596\ttest: 1.2971494\tbest: 1.2971494 (0)\ttotal: 46.4ms\tremaining: 4.6s\n",
      "10:\tlearn: 0.4130708\ttest: 0.4174936\tbest: 0.4174936 (10)\ttotal: 339ms\tremaining: 2.74s\n",
      "20:\tlearn: 0.2047760\ttest: 0.2078961\tbest: 0.2078961 (20)\ttotal: 626ms\tremaining: 2.35s\n",
      "30:\tlearn: 0.1279137\ttest: 0.1301767\tbest: 0.1301767 (30)\ttotal: 895ms\tremaining: 1.99s\n",
      "40:\tlearn: 0.0917756\ttest: 0.0934984\tbest: 0.0934984 (40)\ttotal: 1.16s\tremaining: 1.67s\n",
      "50:\tlearn: 0.0739342\ttest: 0.0754584\tbest: 0.0754584 (50)\ttotal: 1.39s\tremaining: 1.34s\n",
      "60:\tlearn: 0.0610582\ttest: 0.0628873\tbest: 0.0628873 (60)\ttotal: 1.63s\tremaining: 1.04s\n",
      "70:\tlearn: 0.0526105\ttest: 0.0545140\tbest: 0.0545140 (70)\ttotal: 1.86s\tremaining: 760ms\n",
      "80:\tlearn: 0.0471213\ttest: 0.0490687\tbest: 0.0490687 (80)\ttotal: 2.08s\tremaining: 488ms\n",
      "90:\tlearn: 0.0439232\ttest: 0.0461529\tbest: 0.0461529 (90)\ttotal: 2.29s\tremaining: 226ms\n",
      "99:\tlearn: 0.0401535\ttest: 0.0424004\tbest: 0.0424004 (99)\ttotal: 2.48s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.04240041501\n",
      "bestIteration = 99\n",
      "\n",
      "Epoch 1/100\n",
      "650/650 [==============================] - 3s 3ms/step - loss: 0.8301 - accuracy: 0.7601 - val_loss: 0.3923 - val_accuracy: 0.8566\n",
      "Epoch 2/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.5415 - accuracy: 0.8311 - val_loss: 0.3733 - val_accuracy: 0.8592\n",
      "Epoch 3/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4855 - accuracy: 0.8393 - val_loss: 0.3691 - val_accuracy: 0.8600\n",
      "Epoch 4/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4654 - accuracy: 0.8425 - val_loss: 0.3682 - val_accuracy: 0.8584\n",
      "Epoch 5/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4538 - accuracy: 0.8426 - val_loss: 0.3726 - val_accuracy: 0.8579\n",
      "Epoch 6/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4492 - accuracy: 0.8427 - val_loss: 0.3799 - val_accuracy: 0.8597\n",
      "Epoch 7/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4414 - accuracy: 0.8427 - val_loss: 0.3874 - val_accuracy: 0.8585\n",
      "Epoch 8/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4386 - accuracy: 0.8439 - val_loss: 0.3891 - val_accuracy: 0.8594\n",
      "Epoch 9/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4396 - accuracy: 0.8428 - val_loss: 0.3899 - val_accuracy: 0.8575\n",
      "Epoch 10/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4355 - accuracy: 0.8447 - val_loss: 0.3992 - val_accuracy: 0.8588\n",
      "Epoch 11/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4357 - accuracy: 0.8435 - val_loss: 0.3955 - val_accuracy: 0.8575\n",
      "Epoch 12/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4347 - accuracy: 0.8428 - val_loss: 0.4086 - val_accuracy: 0.8575\n",
      "Epoch 13/100\n",
      "650/650 [==============================] - 2s 3ms/step - loss: 0.4323 - accuracy: 0.8444 - val_loss: 0.4111 - val_accuracy: 0.8566\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "       model_0  model_1  model_2  model_3  model_4  model_5  model_6  model_7  \\\n",
      "0            0        0        0        0        0        0        0        0   \n",
      "1            1        2        1        1        1        1        1        1   \n",
      "2            0        0        0        0        0        0        0        0   \n",
      "3            1        2        1        1        1        1        1        1   \n",
      "4            0        0        0        0        0        0        0        0   \n",
      "...        ...      ...      ...      ...      ...      ...      ...      ...   \n",
      "44551        0        0        0        0        0        0        0        0   \n",
      "44552        1        1        1        1        1        1        1        1   \n",
      "44553        0        0        0        0        0        0        0        0   \n",
      "44554        2        2        2        2        2        2        2        1   \n",
      "44555        0        0        0        0        0        0        0        0   \n",
      "\n",
      "       model_8  model_9  \n",
      "0            0      0.0  \n",
      "1            1      1.0  \n",
      "2            0      0.0  \n",
      "3            1      1.0  \n",
      "4            0      0.0  \n",
      "...        ...      ...  \n",
      "44551        0      0.0  \n",
      "44552        1      1.0  \n",
      "44553        0      0.0  \n",
      "44554        2      2.0  \n",
      "44555        0      0.0  \n",
      "\n",
      "[44556 rows x 10 columns]\n",
      "       model_0  model_1  model_2  model_3  model_4  model_5  model_6  model_7  \\\n",
      "0            0        0        0        0        0        0        0        0   \n",
      "1            1        2        1        1        1        1        1        1   \n",
      "2            0        0        0        0        0        0        0        0   \n",
      "3            1        2        1        1        1        1        1        1   \n",
      "4            0        0        0        0        0        0        0        0   \n",
      "...        ...      ...      ...      ...      ...      ...      ...      ...   \n",
      "44551        0        0        0        0        0        0        0        0   \n",
      "44552        1        1        1        1        1        1        1        1   \n",
      "44553        0        0        0        0        0        0        0        0   \n",
      "44554        2        2        2        2        2        2        2        1   \n",
      "44555        0        0        0        0        0        0        0        0   \n",
      "\n",
      "       model_8  model_9  ensemble  \n",
      "0            0      0.0         0  \n",
      "1            1      1.0         1  \n",
      "2            0      0.0         0  \n",
      "3            1      1.0         1  \n",
      "4            0      0.0         0  \n",
      "...        ...      ...       ...  \n",
      "44551        0      0.0         0  \n",
      "44552        1      1.0         1  \n",
      "44553        0      0.0         0  \n",
      "44554        2      2.0         2  \n",
      "44555        0      0.0         0  \n",
      "\n",
      "[44556 rows x 11 columns]\n",
      "---------------------------------------------------------------------------------\n",
      "CONFUSION MATRIX\n",
      "---------------------------------------------------------------------------------\n",
      "         0        1       2      3     4\n",
      "0  22992.0     25.0    51.0   40.0   2.0\n",
      "1    111.0  15920.0     1.0    0.0   0.0\n",
      "2     95.0      9.0  4118.0    4.0   0.0\n",
      "3    160.0      1.0     0.0  991.0   3.0\n",
      "4     12.0      1.0     0.0    3.0  17.0\n",
      "---------------------------------------------------------------------------------\n",
      "METRICS\n",
      "---------------------------------------------------------------------------------\n",
      "Accuracy total:  0.9883741808061764\n",
      "Precision total:  0.9393094155911557\n",
      "Recall total:  0.8671024098205928\n",
      "F1 total:  0.8975223805519974\n",
      "BACC total:  0.8671024098205928\n",
      "MCC total:  0.98032734176107\n"
     ]
    }
   ],
   "source": [
    "### Bagging with many models\n",
    "##### do bootstrapping \n",
    "##### 1. Multiple subsets are created from the original dataset, selecting observations with replacement.\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "num_bootstraps = 10  # Adjust the number of bootstraps as needed\n",
    "\n",
    "original_data_df = X_train.assign(label = y_train)\n",
    "boot_df = []\n",
    "for i in range(0,num_bootstraps): \n",
    "    boot_df.append(original_data_df.sample(frac = 1, replace=True).reset_index(drop=True))\n",
    "\n",
    "# boot_df[5]\n",
    "\n",
    "#### 2.A base model (weak model) is created on each of these subsets.\n",
    "bag_comb_pred = []\n",
    "\n",
    "# SVM\n",
    "from sklearn.linear_model import SGDClassifier\n",
    "clf = SGDClassifier(\n",
    "    loss='hinge',           # hinge loss for linear SVM\n",
    "    penalty='l2',           # L2 regularization to prevent overfitting\n",
    "    alpha=1e-4,             # Learning rate (small value for fine-grained updates)\n",
    "    max_iter=1000,          # Number of passes over the training data\n",
    "    random_state=42,        # Seed for reproducible results\n",
    "    learning_rate='optimal' # Automatically adjusts the learning rate based on the training data\n",
    ")\n",
    "y_train_boot = boot_df[0].pop('label')\n",
    "X_train_boot = boot_df[0]\n",
    "clf.fit(X_train_boot, y_train_boot)\n",
    "preds_svm_00 = clf.predict(X_test)\n",
    "bag_comb_pred.append(preds_svm_00)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#ADA\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "abc = AdaBoostClassifier(n_estimators=50, learning_rate=1.0)\n",
    "ada = abc.fit(X_train, y_train)\n",
    "y_train_boot = boot_df[1].pop('label')\n",
    "X_train_boot = boot_df[1]\n",
    "preds_ada_00 = ada.predict(X_test)\n",
    "bag_comb_pred.append(preds_ada_00)\n",
    "\n",
    "#Catboost\n",
    "import catboost\n",
    "cat_00 = catboost.CatBoostClassifier(iterations=100, depth=6, learning_rate=0.1, loss_function='MultiClass', custom_metric='Accuracy')\n",
    "y_train_boot = boot_df[2].pop('label')\n",
    "X_train_boot = boot_df[2]\n",
    "cat_00.fit(X_train_boot, y_train_boot, eval_set=(X_test, y_test), verbose=10)\n",
    "preds_cat = cat_00.predict(X_test)\n",
    "preds_cat = np.squeeze(preds_cat)\n",
    "pred_label = preds_cat\n",
    "bag_comb_pred.append(preds_cat)\n",
    "\n",
    "#MLP\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=200, random_state=1)\n",
    "y_train_boot = boot_df[3].pop('label')\n",
    "X_train_boot = boot_df[3]\n",
    "if 1 == 1 and 0 == 0:\n",
    "    MLP = mlp.fit(X_train_boot, y_train_boot)\n",
    "    y_pred = MLP.predict_proba(X_test)\n",
    "    preds_mlp_00 = np.argmax(y_pred,axis = 1)\n",
    "\n",
    "bag_comb_pred.append(preds_mlp_00)\n",
    "\n",
    "#LGBM\n",
    "from lightgbm import LGBMClassifier\n",
    "lgbm = LGBMClassifier()\n",
    "y_train_boot = boot_df[4].pop('label')\n",
    "X_train_boot = boot_df[4]\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "    lgbm.fit(X_train_boot, y_train_boot)\n",
    "    preds_lgbm_00 = lgbm.predict(X_test)\n",
    "    bag_comb_pred.append(preds_lgbm_00)\n",
    "#KNN\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "knn_clf_00=KNeighborsClassifier(n_neighbors = 5)\n",
    "y_train_boot = boot_df[5].pop('label')\n",
    "X_train_boot = boot_df[5]\n",
    "\n",
    "if 1 == 1 and 0 == 0:\n",
    "    knn_clf_00.fit(X_train_boot,y_train_boot)\n",
    "if use_model_knn == 1:\n",
    "    preds_knn =knn_clf_00.predict(X_test)\n",
    "    bag_comb_pred.append(preds_knn)\n",
    "#Random Forest\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "rf = RandomForestClassifier(max_depth = 5,  n_estimators = 10, min_samples_split = 2, n_jobs = -1)\n",
    "y_train_boot = boot_df[6].pop('label')\n",
    "X_train_boot = boot_df[6]\n",
    "\n",
    "if True == True:\n",
    "    model_rf_00 = rf.fit(X_train_boot,y_train_boot)\n",
    "    preds_rf_00 = model_rf_00.predict(X_test)\n",
    "    bag_comb_pred.append(preds_rf_00)\n",
    "#DNN\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense\n",
    "#Model Parameters\n",
    "y_train_boot = boot_df[7].pop('label')\n",
    "X_train_boot = boot_df[7]\n",
    "\n",
    "\n",
    "dropout_rate = 0.2\n",
    "nodes = 3\n",
    "out_layer = 5\n",
    "optimizer='adam'\n",
    "loss='sparse_categorical_crossentropy'\n",
    "epochs=100\n",
    "batch_size=128\n",
    "num_columns = X_train_boot.shape[1]\n",
    "dnn_00 = tf.keras.Sequential()\n",
    "# Input layer\n",
    "dnn_00.add(tf.keras.Input(shape=(num_columns,)))\n",
    "# Dense layers with dropout\n",
    "dnn_00.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_00.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_00.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_00.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_00.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_00.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_00.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_00.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "dnn_00.add(tf.keras.layers.Dense(nodes))\n",
    "dnn_00.add(tf.keras.layers.Dropout(dropout_rate))\n",
    "# Output layer\n",
    "# dnn_00.add(tf.keras.layers.Dense(out_layer))\n",
    "dnn_00.add(tf.keras.layers.Dense(out_layer, activation='softmax'))\n",
    "dnn_00.compile(optimizer=optimizer, loss=loss,metrics=['accuracy'])\n",
    "from keras.callbacks import EarlyStopping\n",
    "# Define EarlyStopping callback\n",
    "early_stopping = EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True)\n",
    "dnn_00.fit(X_train_boot, y_train_boot, epochs=epochs, batch_size=batch_size,validation_split=0.2, callbacks=[early_stopping])\n",
    "pred_dnn = dnn_00.predict(X_test)\n",
    "preds_dnn_00 = np.argmax(pred_dnn,axis = 1)\n",
    "bag_comb_pred.append(preds_dnn_00)\n",
    "#LogReg\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "logreg_00 = LogisticRegression()\n",
    "y_train_boot = boot_df[8].pop('label')\n",
    "X_train_boot = boot_df[8]\n",
    "\n",
    "logreg_00.fit(X_train_boot,y_train_boot)\n",
    "preds_logreg =logreg_00.predict(X_test)\n",
    "bag_comb_pred.append(preds_logreg)\n",
    "import xgboost as xgb\n",
    "y_train_boot = boot_df[9].pop('label')\n",
    "X_train_boot = boot_df[9]\n",
    "\n",
    "# Create a DMatrix for XGBoost\n",
    "dtrain = xgb.DMatrix(X_train_boot, label=y_train_boot)\n",
    "dtest = xgb.DMatrix(X_test, label=y_test)\n",
    "# Set XGBoost parameters\n",
    "params = {\n",
    "    'objective': 'multi:softmax',  # for multi-class classification\n",
    "    'num_class': 5,  # specify the number of classes\n",
    "    'max_depth': 3,\n",
    "    'learning_rate': 0.1,\n",
    "    'eval_metric': 'mlogloss'  # metric for multi-class classification\n",
    "}\n",
    "# Train the XGBoost model\n",
    "num_round = 100\n",
    "xgb_00 = xgb.train(params, dtrain, num_round)\n",
    "preds_xgb_00 = xgb_00.predict(dtest)\n",
    "bag_comb_pred.append(preds_xgb_00)\n",
    "### 3. The models run in parallel and are independent of each other.\n",
    "bag_vot_df = pd.DataFrame()\n",
    "for i in range(0,len(bag_comb_pred)):\n",
    "    bag_vot_df[f'model_{i}'] =  bag_comb_pred[i]\n",
    "print(bag_vot_df)\n",
    "# Voting start\n",
    "from scipy.stats import mode\n",
    "# bag_comb_pred_df = pd.DataFrame(bag_comb_pred)\n",
    "# Extract predictions columns\n",
    "\n",
    "# predictions = df[['dnn', 'rf', 'lgbm', 'ada', 'knn', 'mlp', 'svm','cat','xgb']]\n",
    "    # selected_columns = df.loc[:, ~df.columns.isin(['rf'])]\n",
    "predictions = bag_vot_df \n",
    "\n",
    "# predictions = bag_comb_pred_df.loc[:, ~df.columns.isin(['label'])] #df[column_features]\n",
    "\n",
    "# Use the mode function along axis 1 to get the most common prediction for each row\n",
    "ensemble_predictions, _ = mode(predictions.values, axis=1)\n",
    "\n",
    "# Add the ensemble predictions to the DataFrame\n",
    "bag_vot_df['ensemble'] = ensemble_predictions.astype(int)\n",
    "\n",
    "# Display the DataFrame with ensemble predictions\n",
    "print(bag_vot_df)\n",
    "\n",
    "pred_label = bag_vot_df ['ensemble'].values\n",
    "bag_vot_df.pop('ensemble')\n",
    "\n",
    "\n",
    "name='bag_comb'\n",
    "end = time.time()\n",
    "time_taken = end - start\n",
    "metrics = confusion_metrics(name, pred_label, y_test,time_taken)\n",
    "\n",
    "Acc = metrics[0]\n",
    "Precision = metrics[1]\n",
    "Recall = metrics[2]\n",
    "F1 = metrics[3]\n",
    "BACC = metrics[4]\n",
    "MCC = metrics[5]    \n",
    "\n",
    "\n",
    "globals()[f\"{name}_acc_00\"] = Acc\n",
    "globals()[f\"{name}_pre_00\"] = Precision\n",
    "globals()[f\"{name}_rec_00\"] = Recall\n",
    "globals()[f\"{name}_f1_00\"] = F1\n",
    "globals()[f\"{name}_bacc_00\"] = BACC\n",
    "globals()[f\"{name}_mcc_00\"] = MCC\n",
    "\n",
    "globals()[f\"{name}_time_00\"] = time_taken\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Creating new dataset for level 01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "44556 44556\n"
     ]
    }
   ],
   "source": [
    "print(len(preds_dnn_prob), len(y_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "45823     0\n",
      "33694     1\n",
      "86676     0\n",
      "15752     1\n",
      "79342     0\n",
      "         ..\n",
      "6366      0\n",
      "62927     1\n",
      "113180    0\n",
      "51321     2\n",
      "147982    0\n",
      "Name: Label, Length: 44556, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "print(y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "        index  Label\n",
      "0       45823      0\n",
      "1       33694      1\n",
      "2       86676      0\n",
      "3       15752      1\n",
      "4       79342      0\n",
      "...       ...    ...\n",
      "44551    6366      0\n",
      "44552   62927      1\n",
      "44553  113180      0\n",
      "44554   51321      2\n",
      "44555  147982      0\n",
      "\n",
      "[44556 rows x 2 columns]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0         45823\n",
       "1         33694\n",
       "2         86676\n",
       "3         15752\n",
       "4         79342\n",
       "          ...  \n",
       "44551      6366\n",
       "44552     62927\n",
       "44553    113180\n",
       "44554     51321\n",
       "44555    147982\n",
       "Name: index, Length: 44556, dtype: int64"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_from_series = y_test.to_frame()\n",
    "y_test_reset_index = df_from_series.reset_index()\n",
    "# y_test2 = y_test.reset_index(inplace=True)\n",
    "print(y_test_reset_index)\n",
    "y_test_reset_index.pop('index')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_test_reset_index.values[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "preds_dnn_2 = []\n",
    "preds_svm_2 = []\n",
    "preds_rf_2 = []\n",
    "preds_mlp_2 = []\n",
    "preds_ada_2 = []\n",
    "preds_knn_2 = []\n",
    "preds_lgbm_2 = []\n",
    "preds_cat_2 = []\n",
    "preds_xgb_2 = []\n",
    "\n",
    "preds_lr_2 = []\n",
    "preds_dt_2 = []\n",
    "\n",
    "for i in range(0,len(preds_dnn_prob)):  \n",
    "    # print(i)\n",
    "    # print(preds_dnn_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_dnn_2.append(preds_dnn_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_svm_2.append(preds_svm_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_rf_2.append(preds_rf_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_mlp_2.append(preds_mlp_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_ada_2.append(preds_ada_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_knn_2.append(preds_knn_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_lgbm_2.append(preds_lgbm_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_cat_2.append(preds_cat_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_xgb_2.append(preds_xgb_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_lr_2.append(preds_lr_prob[i][y_test_reset_index.values[i][0]])\n",
    "    preds_dt_2.append(preds_dt_prob[i][y_test_reset_index.values[i][0]])\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0. 0. 0. ... 0. 0. 0.]\n",
      " [1. 1. 1. ... 1. 1. 1.]\n",
      " [0. 0. 0. ... 0. 0. 0.]\n",
      " ...\n",
      " [0. 0. 0. ... 0. 0. 0.]\n",
      " [1. 2. 2. ... 2. 2. 2.]\n",
      " [0. 0. 0. ... 0. 0. 0.]]\n",
      "[[0.99392456 0.95346196 0.99998955 ... 0.9999914  1.         0.        ]\n",
      " [0.96875101 0.75538063 1.         ... 0.97482324 1.         1.        ]\n",
      " [0.98000962 0.93182577 0.99993634 ... 0.9960457  1.         0.        ]\n",
      " ...\n",
      " [0.98407745 0.76730863 0.99980734 ... 0.95672259 1.         0.        ]\n",
      " [0.34318045 0.55928096 0.99990814 ... 0.99999128 1.         2.        ]\n",
      " [0.99998724 0.65764796 0.99989923 ... 0.99981526 1.         0.        ]]\n"
     ]
    }
   ],
   "source": [
    "with open(output_file_name, \"a\") as f: print('------------------------------------------------------------------', file = f)\n",
    "with open(output_file_name, \"a\") as f: print('------------------------------------------------------------------', file = f)\n",
    "with open(output_file_name, \"a\") as f: print('------------------------------------------------------------------', file = f)\n",
    "\n",
    "with open(output_file_name, \"a\") as f: print('------------START of STRONGER LEARNER - STACK 01 -----------------', file = f)\n",
    "\n",
    "\n",
    "# Stack the vectors horizontally to create a matrix\n",
    "column_features = ['dnn','rf','lgbm','ada','knn','mlp','svm','cat','xgb','lr','dt','label']\n",
    "training_matrix2 = np.column_stack((\n",
    "                          preds_dnn_2,\n",
    "                          preds_rf_2,\n",
    "                          preds_lgbm_2,\n",
    "                          preds_ada_2,\n",
    "                          preds_knn_2, \n",
    "                          preds_mlp_2,\n",
    "                          preds_svm_2,\n",
    "                          preds_cat_2,\n",
    "                          preds_xgb_2,\n",
    "                          preds_lr_2,\n",
    "                          preds_dt_2,\n",
    "                          y_test\n",
    "                          ))\n",
    "\n",
    "training_matrix = np.column_stack((\n",
    "                          preds_dnn,\n",
    "                          preds_rf,\n",
    "                          preds_lgbm,\n",
    "                          preds_ada,\n",
    "                          preds_knn, \n",
    "                          preds_mlp,\n",
    "                          preds_svm,\n",
    "                          preds_cat,\n",
    "                          preds_xgb,\n",
    "                          preds_lr,\n",
    "                          preds_dt,\n",
    "                        #   preds\n",
    "                          y_test\n",
    "                          ))\n",
    "# Print the resulting matrix\n",
    "print(training_matrix)\n",
    "print(training_matrix2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_level_00_0 = pd.DataFrame(training_matrix, columns=column_features)\n",
    "df_level_00_1 = pd.DataFrame(training_matrix2, columns=column_features)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Assuming df is your DataFrame\n",
    "if feature_selection_bit == 1:\n",
    "\n",
    "    df_level_00_1.to_csv('base_models_prob_feature_selection.csv', index=False)\n",
    "    df_level_00_0.to_csv('base_models_class_feature_selection.csv', index=False)\n",
    "    \n",
    "if feature_selection_bit == 0:\n",
    "\n",
    "    df_level_00_1.to_csv('base_models_prob_all_features.csv', index=False)\n",
    "    df_level_00_0.to_csv('base_models_class_all_features.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.993925</td>\n",
       "      <td>0.953462</td>\n",
       "      <td>0.999990</td>\n",
       "      <td>0.241649</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.872094</td>\n",
       "      <td>0.996421</td>\n",
       "      <td>0.993838</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.968751</td>\n",
       "      <td>0.755381</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.229428</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000205</td>\n",
       "      <td>0.972762</td>\n",
       "      <td>0.987883</td>\n",
       "      <td>0.974823</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.980010</td>\n",
       "      <td>0.931826</td>\n",
       "      <td>0.999936</td>\n",
       "      <td>0.247061</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.999999</td>\n",
       "      <td>0.125468</td>\n",
       "      <td>0.990564</td>\n",
       "      <td>0.979741</td>\n",
       "      <td>0.996046</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.698289</td>\n",
       "      <td>0.847230</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.242380</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.028396</td>\n",
       "      <td>0.988092</td>\n",
       "      <td>0.993120</td>\n",
       "      <td>0.966385</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.991085</td>\n",
       "      <td>0.966415</td>\n",
       "      <td>0.999992</td>\n",
       "      <td>0.255779</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.916044</td>\n",
       "      <td>0.996865</td>\n",
       "      <td>0.994128</td>\n",
       "      <td>0.999776</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44551</th>\n",
       "      <td>0.999963</td>\n",
       "      <td>0.883479</td>\n",
       "      <td>0.999994</td>\n",
       "      <td>0.259007</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.968749</td>\n",
       "      <td>0.994279</td>\n",
       "      <td>0.996916</td>\n",
       "      <td>0.999781</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44552</th>\n",
       "      <td>0.983328</td>\n",
       "      <td>0.983763</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.316873</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.009048</td>\n",
       "      <td>0.994402</td>\n",
       "      <td>0.997916</td>\n",
       "      <td>0.999954</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44553</th>\n",
       "      <td>0.984077</td>\n",
       "      <td>0.767309</td>\n",
       "      <td>0.999807</td>\n",
       "      <td>0.254683</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.944438</td>\n",
       "      <td>0.983802</td>\n",
       "      <td>0.976063</td>\n",
       "      <td>0.956723</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44554</th>\n",
       "      <td>0.343180</td>\n",
       "      <td>0.559281</td>\n",
       "      <td>0.999908</td>\n",
       "      <td>0.292847</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000832</td>\n",
       "      <td>0.962144</td>\n",
       "      <td>0.976610</td>\n",
       "      <td>0.999991</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44555</th>\n",
       "      <td>0.999987</td>\n",
       "      <td>0.657648</td>\n",
       "      <td>0.999899</td>\n",
       "      <td>0.237328</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.997933</td>\n",
       "      <td>0.977548</td>\n",
       "      <td>0.988660</td>\n",
       "      <td>0.999815</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>44556 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            dnn        rf      lgbm       ada  knn       mlp       svm  \\\n",
       "0      0.993925  0.953462  0.999990  0.241649  1.0  1.000000  0.872094   \n",
       "1      0.968751  0.755381  1.000000  0.229428  1.0  1.000000  0.000205   \n",
       "2      0.980010  0.931826  0.999936  0.247061  1.0  0.999999  0.125468   \n",
       "3      0.698289  0.847230  1.000000  0.242380  1.0  1.000000  0.028396   \n",
       "4      0.991085  0.966415  0.999992  0.255779  1.0  1.000000  0.916044   \n",
       "...         ...       ...       ...       ...  ...       ...       ...   \n",
       "44551  0.999963  0.883479  0.999994  0.259007  1.0  1.000000  0.968749   \n",
       "44552  0.983328  0.983763  1.000000  0.316873  1.0  1.000000  0.009048   \n",
       "44553  0.984077  0.767309  0.999807  0.254683  1.0  1.000000  0.944438   \n",
       "44554  0.343180  0.559281  0.999908  0.292847  1.0  1.000000  0.000832   \n",
       "44555  0.999987  0.657648  0.999899  0.237328  1.0  1.000000  0.997933   \n",
       "\n",
       "            cat       xgb        lr   dt  label  \n",
       "0      0.996421  0.993838  0.999991  1.0    0.0  \n",
       "1      0.972762  0.987883  0.974823  1.0    1.0  \n",
       "2      0.990564  0.979741  0.996046  1.0    0.0  \n",
       "3      0.988092  0.993120  0.966385  1.0    1.0  \n",
       "4      0.996865  0.994128  0.999776  1.0    0.0  \n",
       "...         ...       ...       ...  ...    ...  \n",
       "44551  0.994279  0.996916  0.999781  1.0    0.0  \n",
       "44552  0.994402  0.997916  0.999954  1.0    1.0  \n",
       "44553  0.983802  0.976063  0.956723  1.0    0.0  \n",
       "44554  0.962144  0.976610  0.999991  1.0    2.0  \n",
       "44555  0.977548  0.988660  0.999815  1.0    0.0  \n",
       "\n",
       "[44556 rows x 12 columns]"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_level_00_1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dnn</th>\n",
       "      <th>rf</th>\n",
       "      <th>lgbm</th>\n",
       "      <th>ada</th>\n",
       "      <th>knn</th>\n",
       "      <th>mlp</th>\n",
       "      <th>svm</th>\n",
       "      <th>cat</th>\n",
       "      <th>xgb</th>\n",
       "      <th>lr</th>\n",
       "      <th>dt</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44551</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44552</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44553</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44554</th>\n",
       "      <td>1.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44555</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>44556 rows × 12 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       dnn   rf  lgbm  ada  knn  mlp  svm  cat  xgb   lr   dt  label\n",
       "0      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "1      1.0  1.0   1.0  2.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "2      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "3      1.0  1.0   1.0  2.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "4      0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "...    ...  ...   ...  ...  ...  ...  ...  ...  ...  ...  ...    ...\n",
       "44551  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "44552  1.0  1.0   1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0  1.0    1.0\n",
       "44553  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "44554  1.0  2.0   2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0    2.0\n",
       "44555  0.0  0.0   0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0    0.0\n",
       "\n",
       "[44556 rows x 12 columns]"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_level_00_0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# y_01 = df_level_01.pop('label')\n",
    "# X_01 = df_level_01\n",
    "# df_level_01 = df_level_01.assign(label = y_01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# X_01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# y_01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# df_level_01"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# split = 0.7\n",
    "\n",
    "# X_train_01,X_test_01, y_train_01, y_test_01 = sklearn.model_selection.train_test_split(X_01, y_01, train_size=split)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from tabulate import tabulate\n",
    "\n",
    "# # Assuming data is a 110x4 list, where each row is a sublist\n",
    "# # data =  [[\"Row {} Col {}\".format(i + 1, j + 1) for j in range(4)] for i in range(110)]\n",
    "# data = [[\"\" for _ in range(3)] for _ in range(12)]\n",
    "\n",
    "# # Manually insert data at specific row and column\n",
    "# # data[0][0] = \"ADA\"\n",
    "# # data[1][0] = \"DNN\"\n",
    "# # data[2][0] = \"SVM\"\n",
    "# # data[3][0] = \"ADA\"\n",
    "# # data[4][0] = \"DNN\"\n",
    "# # data[2][0] = \"SVM\"\n",
    "\n",
    "\n",
    "# names_models = ['ADA',\n",
    "#                 'SVM',\n",
    "#                 'DNN',\n",
    "#                 'MLP',\n",
    "#                 'KNN',\n",
    "#                 'CAT',\n",
    "#                 'XGB',\n",
    "#                 'LGBM',\n",
    "#                 'RF',\n",
    "#                 'LR',\n",
    "#                 'VOTING'\n",
    "#                 ]\n",
    "# level_00_f1 = [ada_f1_00,\n",
    "#                 svm_f1_00,\n",
    "#                 dnn_f1_00,\n",
    "#                 mlp_f1_00,\n",
    "#                 knn_f1_00,\n",
    "#                 cat_f1_00,\n",
    "#                 xgb_f1_00,\n",
    "#                 lgbm_f1_00,\n",
    "#                 rf_f1_00,\n",
    "#                 lr_f1_00,\n",
    "#                 voting_f1_00]  \n",
    "\n",
    "                 \n",
    "\n",
    "# for i in range(0,len(names_models)):\n",
    "#     data[i][0] =  names_models[i]\n",
    "#     data[i][1] = level_00_f1[i]\n",
    "#     data[i][2] = level_01_f1[i]\n",
    "\n",
    "\n",
    " \n",
    "# # data[0][1] = ada_acc_00\n",
    "# # data\n",
    "\n",
    "# # Define column headers\n",
    "# headers = [\"F1\", \"Level 00\", \"Level 01\"]\n",
    "\n",
    "# # Print the table\n",
    "# table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "# print(table)\n",
    "# with open(output_file_name, \"a\") as f: print(table, file = f)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# lr_acc_00 = 0 \n",
    "# voting_acc_00 = 0\n",
    "\n",
    "# lr_pre_00 = 0 \n",
    "# voting_pre_00 = 0\n",
    "\n",
    "# lr_rec_00 = 0 \n",
    "# voting_rec_00 = 0\n",
    "\n",
    "# lr_f1_00 = 0 \n",
    "# voting_f1_00 = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Models   | ACC-00             | PRE-00              | REC-00             | F1-00               |\n",
      "+==========+====================+=====================+====================+=====================+\n",
      "| ADA      | 0.8397746655893706 | 0.6839200512420371  | 0.6442847056071805 | 0.6581826833025187  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| SVM      | 0.9686686417093097 | 0.9028531429506866  | 0.7798472302216962 | 0.8196285935045303  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| DNN      | 0.858919113026304  | 0.34125096267087296 | 0.3904399282352647 | 0.36406088439634254 |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| MLP      | 0.9928180267528504 | 0.8966192390354735  | 0.9067550250823111 | 0.9015674671251788  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| KNN      | 0.99037166711554   | 0.9308264988646778  | 0.8839890861999631 | 0.9041412250206184  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| CAT      | 0.991942723763354  | 0.9272633251565002  | 0.8667802665360602 | 0.8911656860485214  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| XGB      | 0.9922120477601221 | 0.9118816127614935  | 0.8909545879022922 | 0.900195095871814   |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| LGBM     | 0.9810799892270401 | 0.8061670854344731  | 0.897063118260298  | 0.8344309304949139  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| RF       | 0.9447436933297424 | 0.7708185365724234  | 0.6023789911204002 | 0.6403908180408727  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| LR       | 0.9770176856091211 | 0.8831549325128254  | 0.8673773306973656 | 0.8749641466329727  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| DT       | 0.9949052877278032 | 0.8788185568532312  | 0.8955102915207653 | 0.8955102915207653  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_svm  | 0.9664467187359727 | 0.894907476770992   | 0.7693770391390485 | 0.8082703847582711  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_knn  | 0.9902594487835533 | 0.9290398520065928  | 0.8849039971931741 | 0.9037188293825078  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_DT   | 0.9955785977197235 | 0.9114785694270487  | 0.9026695025444156 | 0.9069542681210585  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_LR   | 0.9779827632642069 | 0.8935577318874394  | 0.8740736890020081 | 0.8832650386482424  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_mlp  | 0.9930424634168238 | 0.88796620134592    | 0.869342792282491  | 0.8772761155746978  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_rf   | 0.9475267079630129 | 0.5785773231049737  | 0.5739562471453103 | 0.5755721001018624  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_ada  | 0.8487072448155131 | 0.7405070945703415  | 0.679679978243031  | 0.7014942990114014  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_lgbm | 0.9953990483885448 | 0.9326970315620029  | 0.8785166516397181 | 0.8998265968885905  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_cat  | 0.9921671604273274 | 0.9191957663018202  | 0.8584784831693989 | 0.8810153121413229  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_comb | 0.9883741808061764 | 0.9393094155911557  | 0.8671024098205928 | 0.8975223805519974  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n"
     ]
    }
   ],
   "source": [
    "from tabulate import tabulate\n",
    "\n",
    "# Assuming data is a 110x4 list, where each row is a sublist\n",
    "# data =  [[\"Row {} Col {}\".format(i + 1, j + 1) for j in range(4)] for i in range(110)]\n",
    "data = [[\"\" for _ in range(5)] for _ in range(24)]\n",
    "\n",
    "# Manually insert data at specific row and column\n",
    "# data[0][0] = \"ADA\"\n",
    "# data[1][0] = \"DNN\"\n",
    "# data[2][0] = \"SVM\"\n",
    "# data[3][0] = \"ADA\"\n",
    "# data[4][0] = \"DNN\"\n",
    "# data[2][0] = \"SVM\"\n",
    "\n",
    "\n",
    "# names_models = ['ADA',\n",
    "#                 'SVM',\n",
    "#                 'DNN',\n",
    "#                 'MLP',\n",
    "#                 'KNN',\n",
    "#                 'CAT',\n",
    "#                 'XGB',\n",
    "#                 'LGBM',\n",
    "#                 'RF',\n",
    "#                 'LR',\n",
    "#                 'VOTING',\n",
    "#                 '   '\n",
    "#                 ]\n",
    "\n",
    "# names_models = ['ADA',\n",
    "#                 'SVM',\n",
    "#                 'DNN',\n",
    "#                 'MLP',\n",
    "#                 'KNN',\n",
    "#                 'CAT',\n",
    "#                 'XGB',\n",
    "#                 'LGBM',\n",
    "#                 'RF',\n",
    "#                 'LR',\n",
    "#                 'DT',\n",
    "#                 # 'VOTING',\n",
    "#                 'Bag_svm',\n",
    "#                 'Bag_knn',\n",
    "#                 'Bag_DT',\n",
    "#                 'Bag_LR',\n",
    "#                 'Bag_mlp',\n",
    "#                 # 'avg',\n",
    "#                 # 'weighed_avg'\n",
    "#                 ]\n",
    "\n",
    "names_models = ['ADA',\n",
    "                'SVM',\n",
    "                'DNN',\n",
    "                'MLP',\n",
    "                'KNN',\n",
    "                'CAT',\n",
    "                'XGB',\n",
    "                'LGBM',\n",
    "                'RF',\n",
    "                'LR',\n",
    "                'DT',\n",
    "                # 'VOTING',\n",
    "                'Bag_svm',\n",
    "                'Bag_knn',\n",
    "                'Bag_DT',\n",
    "                'Bag_LR',\n",
    "                'Bag_mlp',\n",
    "\n",
    "                'Bag_rf',\n",
    "                'Bag_ada',\n",
    "                'Bag_lgbm',\n",
    "                # 'Bag_xgb',\n",
    "                'Bag_cat',\n",
    "                'Bag_comb',\n",
    "\n",
    "                # 'avg',\n",
    "                # 'weighed_avg'\n",
    "                ]\n",
    "\n",
    "\n",
    "level_00_acc = [ada_acc_00,\n",
    "                svm_acc_00,\n",
    "                dnn_acc_00,\n",
    "                mlp_acc_00,\n",
    "                knn_acc_00,\n",
    "                cat_acc_00,\n",
    "                xgb_acc_00,\n",
    "                lgbm_acc_00,\n",
    "                rf_acc_00,\n",
    "                lr_acc_00,\n",
    "                dt_acc_00,\n",
    "                # voting_acc_00,\n",
    "                bag_svm_acc_00,\n",
    "                bag_knn_acc_00,\n",
    "                bag_dt_acc_00,\n",
    "                bag_lr_acc_00,\n",
    "                bag_mlp_acc_00,\n",
    "               \n",
    "                bag_rf_acc_00,\n",
    "                bag_ada_acc_00,\n",
    "                bag_lgbm_acc_00,\n",
    "\n",
    "                bag_cat_acc_00,\n",
    "                bag_comb_acc_00,\n",
    "               \n",
    "               \n",
    "                \n",
    "                # avg_acc_00,\n",
    "                # weighed_avg_acc_00\n",
    "                ]  \n",
    "\n",
    "                # ]  \n",
    "\n",
    "level_00_pre = [ada_pre_00,\n",
    "                svm_pre_00,\n",
    "                dnn_pre_00,\n",
    "                mlp_pre_00,\n",
    "                knn_pre_00,\n",
    "                cat_pre_00,\n",
    "                xgb_pre_00,\n",
    "                lgbm_pre_00,\n",
    "                rf_pre_00,\n",
    "                lr_pre_00,\n",
    "                dt_pre_00,\n",
    "                # voting_pre_00,\n",
    "                bag_svm_pre_00,\n",
    "                bag_knn_pre_00,\n",
    "                bag_dt_pre_00,\n",
    "                bag_lr_pre_00,\n",
    "                bag_mlp_pre_00,\n",
    "\n",
    "                bag_rf_pre_00,\n",
    "                bag_ada_pre_00,\n",
    "                bag_lgbm_pre_00,\n",
    "\n",
    "                bag_cat_pre_00,\n",
    "                bag_comb_pre_00,\n",
    "               \n",
    "                # avg_pre_00,\n",
    "                # weighed_avg_pre_00\n",
    "                ]  \n",
    "\n",
    "level_00_rec = [ada_rec_00,\n",
    "                svm_rec_00,\n",
    "                dnn_rec_00,\n",
    "                mlp_rec_00,\n",
    "                knn_rec_00,\n",
    "                cat_rec_00,\n",
    "                xgb_rec_00,\n",
    "                lgbm_rec_00,\n",
    "                rf_rec_00,\n",
    "                lr_rec_00,\n",
    "                dt_rec_00,\n",
    "                # voting_rec_00,\n",
    "                bag_svm_rec_00,\n",
    "                bag_knn_rec_00,\n",
    "                bag_dt_rec_00,\n",
    "                bag_lr_rec_00,\n",
    "                bag_mlp_rec_00,\n",
    "\n",
    "                bag_rf_rec_00,\n",
    "                bag_ada_rec_00,\n",
    "                bag_lgbm_rec_00,\n",
    "\n",
    "                bag_cat_rec_00,\n",
    "                bag_comb_rec_00,\n",
    "               \n",
    "                # avg_rec_00,\n",
    "                # weighed_avg_rec_00\n",
    "                ]  \n",
    "\n",
    "level_00_f1 = [ada_f1_00,\n",
    "                svm_f1_00,\n",
    "                dnn_f1_00,\n",
    "                mlp_f1_00,\n",
    "                knn_f1_00,\n",
    "                cat_f1_00,\n",
    "                xgb_f1_00,\n",
    "                lgbm_f1_00,\n",
    "                rf_f1_00,\n",
    "                lr_f1_00,\n",
    "                dt_rec_00,\n",
    "                # voting_f1_00,\n",
    "                bag_svm_f1_00,\n",
    "                bag_knn_f1_00,\n",
    "                bag_dt_f1_00,\n",
    "                bag_lr_f1_00,\n",
    "                bag_mlp_f1_00,\n",
    "\n",
    "                bag_rf_f1_00,\n",
    "                bag_ada_f1_00,\n",
    "                bag_lgbm_f1_00,\n",
    "\n",
    "                bag_cat_f1_00,\n",
    "                bag_comb_f1_00,\n",
    "               \n",
    "                # avg_f1_00,\n",
    "                # weighed_avg_f1_00\n",
    "                ]                   \n",
    "\n",
    "for i in range(0,len(names_models)):\n",
    "    data[i][0] =  names_models[i]\n",
    "\n",
    "    data[i][1] = level_00_acc[i]\n",
    "    # data[i][2] = level_01_acc[i]\n",
    "\n",
    "    data[i][2] = level_00_pre[i] \n",
    "    # data[i][4] = level_01_pre[i]\n",
    "\n",
    "    data[i][3] = level_00_rec[i] \n",
    "    # data[i][6] = level_01_rec[i]\n",
    "\n",
    "    data[i][4] = level_00_f1[i]\n",
    "    # data[i][8] = level_01_f1[i]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    " \n",
    "# data[0][1] = ada_acc_00\n",
    "# data\n",
    "\n",
    "# Define column headers\n",
    "# headers = [\"Models\", \"ACC-00\", \" ACC-01\",\"PRE-00\", \" PRE-01\",\"REC-00\", \" REC-01\",\"F1-00\", \" F1-01\",]\n",
    "headers = [\"Models\", \"ACC-00\",\"PRE-00\",\"REC-00\",\"F1-00\"]\n",
    "\n",
    "\n",
    "# Print the table\n",
    "table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "print(table)\n",
    "# with open(output_file_name, \"a\") as f: print(table, file = f)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Models   | ACC-00             | PRE-00              | REC-00             | F1-00               |\n",
      "+==========+====================+=====================+====================+=====================+\n",
      "| Bag_DT   | 0.9955785977197235 | 0.9114785694270487  | 0.9026695025444156 | 0.9069542681210585  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| KNN      | 0.99037166711554   | 0.9308264988646778  | 0.8839890861999631 | 0.9041412250206184  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_knn  | 0.9902594487835533 | 0.9290398520065928  | 0.8849039971931741 | 0.9037188293825078  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| MLP      | 0.9928180267528504 | 0.8966192390354735  | 0.9067550250823111 | 0.9015674671251788  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| XGB      | 0.9922120477601221 | 0.9118816127614935  | 0.8909545879022922 | 0.900195095871814   |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_lgbm | 0.9953990483885448 | 0.9326970315620029  | 0.8785166516397181 | 0.8998265968885905  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_comb | 0.9883741808061764 | 0.9393094155911557  | 0.8671024098205928 | 0.8975223805519974  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| DT       | 0.9949052877278032 | 0.8788185568532312  | 0.8955102915207653 | 0.8955102915207653  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| CAT      | 0.991942723763354  | 0.9272633251565002  | 0.8667802665360602 | 0.8911656860485214  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_LR   | 0.9779827632642069 | 0.8935577318874394  | 0.8740736890020081 | 0.8832650386482424  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_cat  | 0.9921671604273274 | 0.9191957663018202  | 0.8584784831693989 | 0.8810153121413229  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_mlp  | 0.9930424634168238 | 0.88796620134592    | 0.869342792282491  | 0.8772761155746978  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| LR       | 0.9770176856091211 | 0.8831549325128254  | 0.8673773306973656 | 0.8749641466329727  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| LGBM     | 0.9810799892270401 | 0.8061670854344731  | 0.897063118260298  | 0.8344309304949139  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| SVM      | 0.9686686417093097 | 0.9028531429506866  | 0.7798472302216962 | 0.8196285935045303  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_svm  | 0.9664467187359727 | 0.894907476770992   | 0.7693770391390485 | 0.8082703847582711  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_ada  | 0.8487072448155131 | 0.7405070945703415  | 0.679679978243031  | 0.7014942990114014  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| ADA      | 0.8397746655893706 | 0.6839200512420371  | 0.6442847056071805 | 0.6581826833025187  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| RF       | 0.9447436933297424 | 0.7708185365724234  | 0.6023789911204002 | 0.6403908180408727  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| Bag_rf   | 0.9475267079630129 | 0.5785773231049737  | 0.5739562471453103 | 0.5755721001018624  |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "| DNN      | 0.858919113026304  | 0.34125096267087296 | 0.3904399282352647 | 0.36406088439634254 |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n",
      "|          |                    |                     |                    |                     |\n",
      "+----------+--------------------+---------------------+--------------------+---------------------+\n"
     ]
    }
   ],
   "source": [
    "# Combine data into a list of tuples for sorting\n",
    "model_data = list(zip(names_models, level_00_acc, level_00_pre, level_00_rec, level_00_f1))\n",
    "\n",
    "# Sort by F1-00 score in descending order\n",
    "model_data_sorted = sorted(model_data, key=lambda x: x[4], reverse=True)\n",
    "\n",
    "# Separate the sorted data back into individual lists\n",
    "sorted_names_models, sorted_level_00_acc, sorted_level_00_pre, sorted_level_00_rec, sorted_level_00_f1 = zip(*model_data_sorted)\n",
    "\n",
    "# Assign the sorted data to the table\n",
    "for i in range(len(sorted_names_models)):\n",
    "    data[i][0] = sorted_names_models[i]\n",
    "    data[i][1] = sorted_level_00_acc[i]\n",
    "    data[i][2] = sorted_level_00_pre[i] \n",
    "    data[i][3] = sorted_level_00_rec[i] \n",
    "    data[i][4] = sorted_level_00_f1[i]\n",
    "\n",
    "# Define column headers\n",
    "headers = [\"Models\", \"ACC-00\", \"PRE-00\", \"REC-00\", \"F1-00\"]\n",
    "\n",
    "# Print the table\n",
    "table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "with open(output_file_name, \"a\") as f: print('Summary table - LEVEL 00', file = f)\n",
    "\n",
    "if feature_selection_bit == 1: \n",
    "    with open(output_file_name, \"a\") as f: print('Feature Selection was applied', file = f)\n",
    "else:\n",
    "    with open(output_file_name, \"a\") as f: print('All features were used', file = f)\n",
    "\n",
    "\n",
    "    \n",
    "print(table)\n",
    "with open(output_file_name, \"a\") as f: print(table, file = f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+----------------+\n",
      "| Models   |   time-00(sec) |\n",
      "+==========+================+\n",
      "| RF       |       0.492887 |\n",
      "+----------+----------------+\n",
      "| DT       |       1.69256  |\n",
      "+----------+----------------+\n",
      "| LGBM     |       2.76506  |\n",
      "+----------+----------------+\n",
      "| CAT      |       3.03572  |\n",
      "+----------+----------------+\n",
      "| Bag_rf   |       8.13415  |\n",
      "+----------+----------------+\n",
      "| ADA      |       8.38519  |\n",
      "+----------+----------------+\n",
      "| LR       |       9.03887  |\n",
      "+----------+----------------+\n",
      "| Bag_DT   |      19.8727   |\n",
      "+----------+----------------+\n",
      "| Bag_cat  |      28.73     |\n",
      "+----------+----------------+\n",
      "| SVM      |      35.0649   |\n",
      "+----------+----------------+\n",
      "| DNN      |      50.9278   |\n",
      "+----------+----------------+\n",
      "| XGB      |      73.9083   |\n",
      "+----------+----------------+\n",
      "| Bag_ada  |      80.312    |\n",
      "+----------+----------------+\n",
      "| Bag_svm  |      95.9546   |\n",
      "+----------+----------------+\n",
      "| MLP      |     107.966    |\n",
      "+----------+----------------+\n",
      "| Bag_lgbm |     368.351    |\n",
      "+----------+----------------+\n",
      "| Bag_comb |     748.475    |\n",
      "+----------+----------------+\n",
      "| Bag_LR   |     831.574    |\n",
      "+----------+----------------+\n",
      "| KNN      |     919.402    |\n",
      "+----------+----------------+\n",
      "| Bag_mlp  |    1214.87     |\n",
      "+----------+----------------+\n",
      "| Bag_knn  |    5142.25     |\n",
      "+----------+----------------+\n"
     ]
    }
   ],
   "source": [
    "# implement time table\n",
    "from tabulate import tabulate\n",
    "\n",
    "names_models = ['ADA',\n",
    "                'SVM',\n",
    "                'DNN',\n",
    "                'MLP',\n",
    "                'KNN',\n",
    "                'CAT',\n",
    "                'XGB',\n",
    "                'LGBM',\n",
    "                'RF',\n",
    "                'LR',\n",
    "                'DT',\n",
    "                # 'VOTING',\n",
    "                'Bag_svm',\n",
    "                'Bag_knn',\n",
    "                'Bag_DT',\n",
    "                'Bag_LR',\n",
    "                'Bag_mlp',\n",
    "\n",
    "                'Bag_rf',\n",
    "                'Bag_ada',\n",
    "                'Bag_lgbm',\n",
    "                # 'Bag_xgb',\n",
    "                'Bag_cat',\n",
    "                'Bag_comb',\n",
    "                # 'avg',\n",
    "                # 'weighed_avg'\n",
    "                ]\n",
    "\n",
    "data = [[\"\" for _ in range(2)] for _ in range(len(names_models))]\n",
    "\n",
    "level_00_time = [\n",
    "                ada_time_00,\n",
    "                svm_time_00,\n",
    "                dnn_time_00,\n",
    "                mlp_time_00,\n",
    "                knn_time_00,\n",
    "                cat_time_00,\n",
    "                xgb_time_00,\n",
    "                lgbm_time_00,\n",
    "                rf_time_00,\n",
    "                lr_time_00,\n",
    "                dt_time_00,\n",
    "                # voting_time_00,\n",
    "                bag_svm_time_00,\n",
    "                bag_knn_time_00,\n",
    "                bag_dt_time_00,\n",
    "                bag_lr_time_00,\n",
    "                bag_mlp_time_00,\n",
    "\n",
    "                bag_rf_time_00,\n",
    "                bag_ada_time_00,\n",
    "                bag_lgbm_time_00,\n",
    "                # bag_xgb_time_00,\n",
    "                bag_cat_time_00,\n",
    "                bag_comb_time_00,\n",
    "\n",
    "                # avg_time_00,\n",
    "                # weighed_avg_time_00\n",
    "                ]  \n",
    "\n",
    "\n",
    "# Combine data into a list of tuples for sorting\n",
    "model_data = list(zip(names_models, level_00_time))\n",
    "\n",
    "# Sort by F1-00 score in descending order\n",
    "model_data_sorted = sorted(model_data, key=lambda x: x[1], reverse=False)\n",
    "\n",
    "# Separate the sorted data back into individual lists\n",
    "sorted_names_models, sorted_level_00_time = zip(*model_data_sorted)\n",
    "\n",
    "# Assign the sorted data to the table\n",
    "for i in range(len(sorted_names_models)):\n",
    "    data[i][0] = sorted_names_models[i]\n",
    "    data[i][1] = sorted_level_00_time[i]\n",
    "\n",
    "# Define column headers\n",
    "headers = [\"Models\", \"time-00(sec)\"]\n",
    "\n",
    "\n",
    "# Print the table\n",
    "table = tabulate(data, headers=headers, tablefmt=\"grid\")\n",
    "with open(output_file_name, \"a\") as f: print('Time is counted is seconds', file = f)\n",
    "print(table)\n",
    "with open(output_file_name, \"a\") as f: print(table, file = f)\n",
    "end_program = time.time()\n",
    "time_program = end_program - start_program\n",
    "with open(output_file_name, \"a\") as f: print('Running time of entire program is:', time_program ,' seconds',file = f)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
