{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import random\n",
    "import matplotlib\n",
    "matplotlib.use('Agg')\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import pandas_profiling\n",
    "from tqdm import tqdm\n",
    "from sklearn.metrics import roc_auc_score,confusion_matrix,accuracy_score,classification_report,roc_curve\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn import preprocessing\n",
    "import sklearn\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "from utils.autoencoder import autoencoder\n",
    "from utils.Data_gen import Data_gen\n",
    "from utils.get_result import get_result\n",
    "\n",
    "np.random.seed(123)\n",
    "random.seed(123)\n",
    "\n",
    "train_data_df = pd.read_csv(\"./data/unsw/UNSW_NB15_training-set.csv\")\n",
    "test_data_df = pd.read_csv(\"./data/unsw/UNSW_NB15_testing-set.csv\")\n",
    "\n",
    "##label 0:n, 1:an\n",
    "data_df=pd.concat([train_data_df,test_data_df],axis=0)\n",
    "data_df=data_df.drop([\"id\"],axis=1)\n",
    "data_df=data_df.drop([\"attack_cat\"],axis=1)\n",
    "\n",
    "cat_vars=[\"proto\",\"service\",\"state\",\"is_ftp_login\",\"is_sm_ips_ports\"]\n",
    "cat_data = pd.get_dummies(data_df[cat_vars])\n",
    "\n",
    "numeric_vars = list(set(data_df.columns.values.tolist()) - set(cat_vars))\n",
    "numeric_vars.remove('label')\n",
    "numeric_data = data_df[numeric_vars].copy()\n",
    "\n",
    "label_data = data_df['label']\n",
    "\n",
    "final_data_df=pd.concat([numeric_data, cat_data, label_data], axis=1).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "normal_ratio(%) 36.09225646458884 93000\n",
      "number of anomalies =  5000 number of normal =  93000\n",
      "anomal_data shape =  (5000, 197)\n",
      "normal_data shape =  (93000, 197)\n",
      "reduced_data shape =  (98000, 197)\n"
     ]
    }
   ],
   "source": [
    "labels = final_data_df['label'].copy()\n",
    "num_data = len(labels)\n",
    "num_normal = np.sum(labels ==0)\n",
    "normal_ratio = num_normal/num_data\n",
    "print('normal_ratio(%)', normal_ratio*100, num_normal)\n",
    "\n",
    "def reduce_data(df, ano_cnt):\n",
    "    #num_anomalies = int(num_normal * (anomal_ratio /(1-anomal_ratio)))\n",
    "    num_anomalies =ano_cnt\n",
    "    print('number of anomalies = ', num_anomalies, 'number of normal = ', num_normal)\n",
    "    anomal_labels = labels[labels !=0]\n",
    "    anomal_idx = np.random.choice(anomal_labels.index, size = num_anomalies, replace = False)\n",
    "    anomal_data = df.iloc[anomal_idx].copy()\n",
    "    normal_data = df[labels ==0].copy()\n",
    "    print('anomal_data shape = ', anomal_data.shape)\n",
    "    print('normal_data shape = ',normal_data.shape)\n",
    "    return pd.concat([normal_data, anomal_data], axis = 0).reset_index(drop=True)\n",
    "\n",
    "ano_cnt=5000\n",
    "final_data_reduce_df = reduce_data(final_data_df, ano_cnt)\n",
    "print('reduced_data shape = ', final_data_reduce_df.shape)\n",
    "\n",
    "input_size=len(final_data_reduce_df.columns)-1\n",
    "\n",
    "\n",
    "test_an_df=final_data_reduce_df.loc[final_data_reduce_df[\"label\"]==1].reset_index(drop=True)\n",
    "n_df=final_data_reduce_df.loc[final_data_reduce_df[\"label\"]==0].reset_index(drop=True)\n",
    "n_df=sklearn.utils.shuffle(n_df).reset_index(drop=True)\n",
    "test_n_df=n_df.iloc[-ano_cnt:].reset_index(drop=True)\n",
    "test_df=pd.concat([test_an_df,test_n_df],axis=0).reset_index(drop=True)\n",
    "train_df=n_df.iloc[:-ano_cnt].reset_index(drop=True)\n",
    "\n",
    "min_max_scaler = preprocessing.MinMaxScaler()\n",
    "train_df = min_max_scaler.fit_transform(train_df)\n",
    "test_df = min_max_scaler.transform(test_df)\n",
    "\n",
    "train_df = pd.DataFrame(train_df)\n",
    "test_df = pd.DataFrame(test_df)\n",
    "\n",
    "train_label=train_df[196]\n",
    "train_df=train_df.drop([196],axis=1)\n",
    "\n",
    "y_test=test_df[196]\n",
    "x_test=test_df.drop([196],axis=1)\n",
    "\n",
    "\n",
    "x_train, x_val, y_train, y_val = train_test_split(train_df,\n",
    "                                                    train_label,\n",
    "                                                    test_size=0.2, \n",
    "                                                    random_state=1029)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ">>>> @ 1, traing_loss = 0.03419903293251991, val_loss = 0.02820383571088314\n",
      ">>>> @ 2, traing_loss = 0.028260204941034317, val_loss = 0.023678477853536606\n",
      ">>>> @ 3, traing_loss = 0.023726824671030045, val_loss = 0.020842963829636574\n",
      ">>>> @ 4, traing_loss = 0.02088170312345028, val_loss = 0.017485348507761955\n",
      ">>>> @ 5, traing_loss = 0.017520131543278694, val_loss = 0.016042882576584816\n",
      ">>>> @ 6, traing_loss = 0.016072766855359077, val_loss = 0.015605912543833256\n",
      ">>>> @ 7, traing_loss = 0.015626614913344383, val_loss = 0.01522442139685154\n",
      ">>>> @ 8, traing_loss = 0.015242310240864754, val_loss = 0.014436888508498669\n",
      ">>>> @ 9, traing_loss = 0.014461543411016464, val_loss = 0.013787755742669106\n",
      ">>>> @ 10, traing_loss = 0.013821247965097427, val_loss = 0.01310693845152855\n",
      ">>>> @ 11, traing_loss = 0.013148360885679722, val_loss = 0.012485438026487827\n",
      ">>>> @ 12, traing_loss = 0.01253407821059227, val_loss = 0.011708720587193966\n",
      ">>>> @ 13, traing_loss = 0.011765974573791027, val_loss = 0.011187820695340633\n",
      ">>>> @ 14, traing_loss = 0.011251424439251423, val_loss = 0.010864452458918095\n",
      ">>>> @ 15, traing_loss = 0.010933690704405308, val_loss = 0.011718519032001495\n",
      ">>>> @ 16, traing_loss = 0.011793795973062515, val_loss = 0.010094995610415936\n",
      ">>>> @ 17, traing_loss = 0.01016746461391449, val_loss = 0.010925834998488426\n",
      ">>>> @ 18, traing_loss = 0.010996423661708832, val_loss = 0.009755098260939121\n",
      ">>>> @ 19, traing_loss = 0.009829960763454437, val_loss = 0.010153153911232948\n",
      ">>>> @ 20, traing_loss = 0.010228119790554047, val_loss = 0.009004691615700722\n",
      ">>>> @ 21, traing_loss = 0.00907529890537262, val_loss = 0.008577846921980381\n",
      ">>>> @ 22, traing_loss = 0.008645115420222282, val_loss = 0.00881939847022295\n",
      ">>>> @ 23, traing_loss = 0.008883307687938213, val_loss = 0.007975200191140175\n",
      ">>>> @ 24, traing_loss = 0.00803406536579132, val_loss = 0.008096136152744293\n",
      ">>>> @ 25, traing_loss = 0.008150234818458557, val_loss = 0.008215929381549358\n",
      ">>>> @ 26, traing_loss = 0.008267735131084919, val_loss = 0.00794112216681242\n",
      ">>>> @ 27, traing_loss = 0.00799017958343029, val_loss = 0.007759119383990765\n",
      ">>>> @ 28, traing_loss = 0.007806293200701475, val_loss = 0.007926610298454762\n",
      ">>>> @ 29, traing_loss = 0.00797252170741558, val_loss = 0.007824915461242199\n",
      ">>>> @ 30, traing_loss = 0.007870379835367203, val_loss = 0.007593805901706219\n",
      ">>>> @ 31, traing_loss = 0.007640718016773462, val_loss = 0.0076596480794250965\n",
      ">>>> @ 32, traing_loss = 0.007709549739956856, val_loss = 0.0076600536704063416\n",
      ">>>> @ 33, traing_loss = 0.007709112949669361, val_loss = 0.007561270613223314\n",
      ">>>> @ 34, traing_loss = 0.007610219065099955, val_loss = 0.007522155996412039\n",
      ">>>> @ 35, traing_loss = 0.007571011316031218, val_loss = 0.00754860695451498\n",
      ">>>> @ 36, traing_loss = 0.0075979153625667095, val_loss = 0.007500768173485994\n",
      ">>>> @ 37, traing_loss = 0.007550485897809267, val_loss = 0.007441445253789425\n",
      ">>>> @ 38, traing_loss = 0.007489281706511974, val_loss = 0.00740676699206233\n",
      ">>>> @ 39, traing_loss = 0.0074553280137479305, val_loss = 0.007410361897200346\n",
      ">>>> @ 40, traing_loss = 0.007458845153450966, val_loss = 0.007345903664827347\n",
      ">>>> @ 41, traing_loss = 0.007394576445221901, val_loss = 0.00729965977370739\n",
      ">>>> @ 42, traing_loss = 0.007348688319325447, val_loss = 0.007300275377929211\n",
      ">>>> @ 43, traing_loss = 0.007350452709943056, val_loss = 0.007231228984892368\n",
      ">>>> @ 44, traing_loss = 0.007282535545527935, val_loss = 0.007178086321800947\n",
      ">>>> @ 45, traing_loss = 0.007231113500893116, val_loss = 0.0071504125371575356\n",
      ">>>> @ 46, traing_loss = 0.007204957772046328, val_loss = 0.007073290646076202\n",
      ">>>> @ 47, traing_loss = 0.007128814235329628, val_loss = 0.00700457813218236\n",
      ">>>> @ 48, traing_loss = 0.007061575539410114, val_loss = 0.0069369422271847725\n",
      ">>>> @ 49, traing_loss = 0.006995134521275759, val_loss = 0.006840546149760485\n",
      ">>>> @ 50, traing_loss = 0.006899077445268631, val_loss = 0.006749381776899099\n",
      ">>>> @ 51, traing_loss = 0.0068086786195635796, val_loss = 0.006658434867858887\n",
      ">>>> @ 52, traing_loss = 0.00671814102679491, val_loss = 0.006550394929945469\n",
      ">>>> @ 53, traing_loss = 0.006610027048736811, val_loss = 0.00644702510908246\n",
      ">>>> @ 54, traing_loss = 0.006505994126200676, val_loss = 0.006333896424621344\n",
      ">>>> @ 55, traing_loss = 0.0063927751034498215, val_loss = 0.0062128216959536076\n",
      ">>>> @ 56, traing_loss = 0.006270912010222673, val_loss = 0.006108029279857874\n",
      ">>>> @ 57, traing_loss = 0.006165324244648218, val_loss = 0.006000069435685873\n",
      ">>>> @ 58, traing_loss = 0.006056536454707384, val_loss = 0.005920755211263895\n",
      ">>>> @ 59, traing_loss = 0.00597483990713954, val_loss = 0.005870690569281578\n",
      ">>>> @ 60, traing_loss = 0.0059220981784164906, val_loss = 0.005666751880198717\n",
      ">>>> @ 61, traing_loss = 0.005716281943023205, val_loss = 0.005564742721617222\n",
      ">>>> @ 62, traing_loss = 0.00561103317886591, val_loss = 0.005488210823386908\n",
      ">>>> @ 63, traing_loss = 0.005532171111553907, val_loss = 0.005325542762875557\n",
      ">>>> @ 64, traing_loss = 0.005365514196455479, val_loss = 0.005318342242389917\n",
      ">>>> @ 65, traing_loss = 0.005354557652026415, val_loss = 0.005126556847244501\n",
      ">>>> @ 66, traing_loss = 0.005159588530659676, val_loss = 0.005043394863605499\n",
      ">>>> @ 67, traing_loss = 0.005073862615972757, val_loss = 0.004965674132108688\n",
      ">>>> @ 68, traing_loss = 0.004993734415620565, val_loss = 0.0048759630881249905\n",
      ">>>> @ 69, traing_loss = 0.004901145584881306, val_loss = 0.0049025677144527435\n",
      ">>>> @ 70, traing_loss = 0.004927884321659803, val_loss = 0.004991729743778706\n",
      ">>>> @ 71, traing_loss = 0.0050137401558458805, val_loss = 0.00505366874858737\n",
      ">>>> @ 72, traing_loss = 0.005081200506538153, val_loss = 0.004887186922132969\n",
      ">>>> @ 73, traing_loss = 0.004909769631922245, val_loss = 0.004705031868070364\n",
      ">>>> @ 74, traing_loss = 0.004728799220174551, val_loss = 0.004887544550001621\n",
      ">>>> @ 75, traing_loss = 0.0049124909564852715, val_loss = 0.004700465127825737\n",
      ">>>> @ 76, traing_loss = 0.004723267629742622, val_loss = 0.004605526104569435\n",
      ">>>> @ 77, traing_loss = 0.004627366550266743, val_loss = 0.004605505615472794\n",
      ">>>> @ 78, traing_loss = 0.004629568196833134, val_loss = 0.004428761545568705\n",
      ">>>> @ 79, traing_loss = 0.004451606422662735, val_loss = 0.004513355437666178\n",
      ">>>> @ 80, traing_loss = 0.004535116255283356, val_loss = 0.004357937723398209\n",
      ">>>> @ 81, traing_loss = 0.0043812645599246025, val_loss = 0.0044175367802381516\n",
      ">>>> @ 82, traing_loss = 0.004444612190127373, val_loss = 0.004298187326639891\n",
      ">>>> @ 83, traing_loss = 0.004322963301092386, val_loss = 0.0042710560373961926\n",
      ">>>> @ 84, traing_loss = 0.004297018516808748, val_loss = 0.004214318469166756\n",
      ">>>> @ 85, traing_loss = 0.004241066053509712, val_loss = 0.004158366937190294\n",
      ">>>> @ 86, traing_loss = 0.004185076802968979, val_loss = 0.004167447332292795\n",
      ">>>> @ 87, traing_loss = 0.004193441942334175, val_loss = 0.00405722064897418\n",
      ">>>> @ 88, traing_loss = 0.004084387794137001, val_loss = 0.004082379397004843\n",
      ">>>> @ 89, traing_loss = 0.004110756795853376, val_loss = 0.00398238655179739\n",
      ">>>> @ 90, traing_loss = 0.004010747652500868, val_loss = 0.003953250125050545\n",
      ">>>> @ 91, traing_loss = 0.003982172813266516, val_loss = 0.003913328051567078\n",
      ">>>> @ 92, traing_loss = 0.003944210708141327, val_loss = 0.003854496870189905\n",
      ">>>> @ 93, traing_loss = 0.0038840286433696747, val_loss = 0.0038093840703368187\n",
      ">>>> @ 94, traing_loss = 0.003839117242023349, val_loss = 0.0037763090804219246\n",
      ">>>> @ 95, traing_loss = 0.0038069761358201504, val_loss = 0.003734497120603919\n",
      ">>>> @ 96, traing_loss = 0.003764603054150939, val_loss = 0.0036806981079280376\n",
      ">>>> @ 97, traing_loss = 0.0037108897231519222, val_loss = 0.0036599787417799234\n",
      ">>>> @ 98, traing_loss = 0.00369082554243505, val_loss = 0.0036462859716266394\n",
      ">>>> @ 99, traing_loss = 0.0036761092487722635, val_loss = 0.003597908653318882\n",
      ">>>> @ 100, traing_loss = 0.003628089325502515, val_loss = 0.003525931853801012\n"
     ]
    }
   ],
   "source": [
    "batch_size = int(len(x_train))\n",
    "\n",
    "x_train_dataset = Data_gen(x_train)\n",
    "x_val_dataset = Data_gen(x_val)\n",
    "x_test_dataset = Data_gen(x_test)\n",
    "\n",
    "train_dataloader = DataLoader(x_train_dataset, batch_size=batch_size, shuffle=True, num_workers = 0)\n",
    "val_dataloader = DataLoader(x_val_dataset, batch_size=batch_size, shuffle=True, num_workers = 0)\n",
    "test_dataloader = DataLoader(x_test_dataset, batch_size=batch_size, shuffle=True, num_workers = 0)\n",
    "\n",
    "model = autoencoder(input_size)\n",
    "model = model.cuda()\n",
    "criterion = torch.nn.MSELoss()\n",
    "learning_rate = .01\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
    "\n",
    "save_model_path = 'ae_model.pth'\n",
    "FULL_TRAIN = True\n",
    "num_epochs = 100\n",
    "if (FULL_TRAIN):\n",
    "    for epoch in range(num_epochs):\n",
    "        running_training_loss =0.0\n",
    "        running_val_loss = 0.0\n",
    "\n",
    "        for i_batch, sample_batched in enumerate(train_dataloader):\n",
    "            sample_batched = sample_batched.cuda()\n",
    "            sample_batched = Variable(sample_batched)\n",
    "            en_out,de_out,latant_v,output = model(sample_batched)\n",
    "            train_loss = criterion(output, sample_batched)\n",
    "            optimizer.zero_grad()\n",
    "            train_loss.backward()\n",
    "            optimizer.step()\n",
    "            running_training_loss += train_loss.item()\n",
    "\n",
    "        if (epoch%1 == 0):\n",
    "            for i_batch, sample_batched in enumerate(val_dataloader):\n",
    "                sample_batched = sample_batched.cuda()\n",
    "                sample_batched = Variable(sample_batched)\n",
    "                en_out,de_out,latant_v,val_output = model(sample_batched)\n",
    "                val_loss = criterion(val_output, sample_batched)\n",
    "                running_val_loss += val_loss.item()\n",
    "            print(\">>>> @ {}, traing_loss = {}, val_loss = {}\".format(epoch+1, \n",
    "                                                                       running_training_loss/len(train_dataloader),\n",
    "                                                                       running_val_loss/len(val_dataloader)))\n",
    "\n",
    "\n",
    "        #print(\">>>> @ {}, traing_loss = {}, val_loss = {}\".format(epoch+1,running_training_loss/len(train_dataloader),running_val_loss/len(val_dataloader)))\n",
    "        torch.save(model.state_dict(), save_model_path)\n",
    "else:\n",
    "    print(\"Load previous trained model\")\n",
    "    model.load_state_dict(torch.load(save_model_path))\n",
    "    \n",
    "from sklearn.metrics import roc_curve, auc, confusion_matrix\n",
    "from sklearn.metrics import roc_auc_score,confusion_matrix,accuracy_score,classification_report,roc_curve\n",
    "\n",
    "\n",
    "x_test_array = x_test.values\n",
    "x_test_torch = torch.from_numpy(x_test_array).type(torch.FloatTensor)\n",
    "x_test_variable = Variable(x_test_torch).cuda()\n",
    "test_en_out,test_de_out,test_latant_v,x_test_recon = model(x_test_variable)\n",
    "\n",
    "x_train_array = x_train.values\n",
    "x_train_torch = torch.from_numpy(x_train_array).type(torch.FloatTensor)\n",
    "x_train_variable = Variable(x_train_torch).cuda()\n",
    "train_en_out,train_de_out,train_latant_v,x_train_recon = model(x_train_variable)\n",
    "\n",
    "x_train_recon=x_train_recon.detach().cpu().numpy()\n",
    "train_latant_v=train_latant_v.detach().cpu().numpy()\n",
    "x_test_recon=x_test_recon.detach().cpu().numpy()\n",
    "test_latant_v=test_latant_v.detach().cpu().numpy()\n",
    "\n",
    "test_en_out=[x.detach().cpu().numpy() for x in test_en_out]\n",
    "train_en_out=[x.detach().cpu().numpy() for x in train_en_out]\n",
    "test_de_out=[x.detach().cpu().numpy() for x in test_de_out]\n",
    "train_de_out=[x.detach().cpu().numpy() for x in train_de_out]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.ensemble import IsolationForest\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from scipy.spatial.distance import pdist, cdist, squareform\n",
    "from sklearn import covariance\n",
    "from scipy.spatial import distance\n",
    "import warnings\n",
    "warnings.filterwarnings(action='ignore')\n",
    "\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(x_train_array)\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(x_train_array,axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(x_test_array)):\n",
    "    scores=scores+[distance.mahalanobis(mean, x_test_array[i], incov)]\n",
    "ma_scores=np.array(scores)\n",
    "\n",
    "iso_Forest = IsolationForest(n_estimators=100,random_state=123)\n",
    "iso_Forest.fit(x_train_array)\n",
    "if_scores=iso_Forest.decision_function(x_test_array)\n",
    "\n",
    "ae_scores = np.mean((x_test_array - x_test_recon)**2, axis=1)\n",
    "\n",
    "input_1=[train_en_out[0],test_en_out[0]]\n",
    "input_2=[train_en_out[1],test_en_out[1]]\n",
    "input_3=[train_en_out[2],test_en_out[2]]\n",
    "input_4=[train_en_out[3],test_en_out[3]]\n",
    "input_5=[train_latant_v,test_latant_v]\n",
    "input_6=[train_de_out[0],test_de_out[0]]\n",
    "input_7=[train_de_out[1],test_de_out[1]]\n",
    "input_8=[train_de_out[2],test_de_out[2]]\n",
    "input_9=[train_de_out[3],test_de_out[3]]\n",
    "input_10=[train_de_out[4],test_de_out[4]]\n",
    "\n",
    "input = input_1\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores1=np.array(scores)\n",
    "\n",
    "input = input_2\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores2=np.array(scores)\n",
    "\n",
    "input = input_3\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores3=np.array(scores)\n",
    "\n",
    "input = input_4\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores4=np.array(scores)\n",
    "\n",
    "input = input_5\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores5=np.array(scores)\n",
    "\n",
    "input = input_6\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores6=np.array(scores)\n",
    "\n",
    "input = input_7\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores7=np.array(scores)\n",
    "\n",
    "input = input_8\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores8=np.array(scores)\n",
    "\n",
    "input = input_9\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores9=np.array(scores)\n",
    "\n",
    "input = input_10\n",
    "group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n",
    "group_lasso.fit(input[0])\n",
    "incov=group_lasso.precision_\n",
    "mean=np.mean(input[0],axis=0)\n",
    "mean=list(mean)\n",
    "scores=[]\n",
    "for i in range(len(input[1])):\n",
    "    scores=scores+[distance.mahalanobis(mean, input[1][i], incov)]\n",
    "scores10=np.array(scores)\n",
    "\n",
    "efe_scores=[scores1,scores2,scores3,scores4,scores5]\n",
    "dfe_scores=[scores6,scores7,scores8,scores9,scores10]\n",
    "\n",
    "scores=[ma_scores]+[if_scores]+[ae_scores]+efe_scores+dfe_scores\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ma_scores\n",
      "0.796\n",
      "0.8931987247608927\n",
      "0.6724\n",
      "0.8401023800000001\n",
      "0.869775326573813\n",
      "0.7672295755362848\n",
      "0.0804\n",
      "\n",
      "\n",
      "if_scores\n",
      "0.7301\n",
      "0.7660115606936416\n",
      "0.6626\n",
      "0.79362918\n",
      "0.8057846527418319\n",
      "0.7105630026809652\n",
      "0.2024\n",
      "\n",
      "\n",
      "ae_scores\n",
      "0.8295\n",
      "0.88039713691988\n",
      "0.7626\n",
      "0.8415485400000001\n",
      "0.8910613406954397\n",
      "0.8172757475083057\n",
      "0.1036\n",
      "\n",
      "\n",
      "efe_scores\n",
      "0.853\n",
      "0.9486527707168276\n",
      "0.7464\n",
      "0.8885054600000001\n",
      "0.9202248030108623\n",
      "0.8354600402955003\n",
      "0.0404\n",
      "\n",
      "\n",
      "dfe_scores\n",
      "0.83\n",
      "0.9239465570400822\n",
      "0.7192\n",
      "0.8878753\n",
      "0.916553344147357\n",
      "0.808816914080072\n",
      "0.0592\n",
      "\n",
      "\n",
      "mahal+efe_scores\n",
      "0.8568\n",
      "0.9493702770780856\n",
      "0.7538\n",
      "0.90141666\n",
      "0.9267794694792562\n",
      "0.8403567447045708\n",
      "0.0402\n",
      "\n",
      "\n",
      "mahal+dfe_scores\n",
      "0.8363\n",
      "0.9273189326556543\n",
      "0.7298\n",
      "0.8897139000000001\n",
      "0.9185980110121231\n",
      "0.8167879127028539\n",
      "0.0572\n",
      "\n",
      "\n",
      "mahal+ae_scores\n",
      "0.7981\n",
      "0.8946253640455388\n",
      "0.6758\n",
      "0.84152106\n",
      "0.871444050396915\n",
      "0.7699669590976415\n",
      "0.0796\n",
      "\n",
      "\n",
      "efe_scores+dfe_scores\n",
      "0.8592\n",
      "0.9463220675944334\n",
      "0.7616\n",
      "0.9149979\n",
      "0.9355900929714418\n",
      "0.8439716312056738\n",
      "0.0432\n",
      "\n",
      "\n",
      "ae_scores+efe_scores\n",
      "0.853\n",
      "0.9486527707168276\n",
      "0.7464\n",
      "0.88850922\n",
      "0.9202261379282715\n",
      "0.8354600402955003\n",
      "0.0404\n",
      "\n",
      "\n",
      "ae_scores+dfe_scores\n",
      "0.8303\n",
      "0.9242229642948883\n",
      "0.7196\n",
      "0.88803278\n",
      "0.9166962780890596\n",
      "0.8091757562127516\n",
      "0.059\n",
      "\n",
      "\n",
      "ma_scores+efe_scores+dfe_scores\n",
      "0.8599\n",
      "0.9490890940853506\n",
      "0.7606\n",
      "0.9207194599999999\n",
      "0.9385274564876805\n",
      "0.8444543133118686\n",
      "0.0408\n",
      "\n",
      "\n",
      "ma_scores+ae_scores+efe_scores\n",
      "0.8568\n",
      "0.9493702770780856\n",
      "0.7538\n",
      "0.9014175400000001\n",
      "0.9267799941767989\n",
      "0.8403567447045708\n",
      "0.0402\n",
      "\n",
      "\n",
      "ma_scores+ae_scores+dfe_scores\n",
      "0.8363\n",
      "0.9273189326556543\n",
      "0.7298\n",
      "0.8896904200000001\n",
      "0.9185922262994026\n",
      "0.8167879127028539\n",
      "0.0572\n",
      "\n",
      "\n",
      "ae_scores+efe_scores+dfe_scores\n",
      "0.8592\n",
      "0.9463220675944334\n",
      "0.7616\n",
      "0.9150015\n",
      "0.9356054412239493\n",
      "0.8439716312056738\n",
      "0.0432\n",
      "\n",
      "\n",
      "ma_scores+ae_scores+efe_scores+dfe_scores\n",
      "0.8596\n",
      "0.9207146599999999\n",
      "0.9385163724115043\n",
      "0.949050949050949\n",
      "0.76\n",
      "0.844069302532208\n",
      "0.0408\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from sklearn.datasets import load_iris\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import precision_recall_curve, auc, roc_auc_score, average_precision_score\n",
    "from sklearn.metrics import precision_score, recall_score, f1_score\n",
    "\n",
    "scores_df=pd.DataFrame({\"ma_scores\":scores[0],\n",
    "                        \"if_scores\":scores[1],\n",
    "                        \"ae_scores\":scores[2],\n",
    "                        \"efe_scores_1\":scores[3],\n",
    "                        \"efe_scores_2\":scores[4],\n",
    "                        \"efe_scores_3\":scores[5],\n",
    "                        \"efe_scores_4\":scores[6],\n",
    "                        \"efe_scores_5\":scores[7],\n",
    "                        \"dfe_scores_1\":scores[8],\n",
    "                        \"dfe_scores_2\":scores[9],\n",
    "                        \"dfe_scores_3\":scores[10],\n",
    "                        \"dfe_scores_4\":scores[11],\n",
    "                        \"dfe_scores_5\":scores[12],\n",
    "                        \"label\":y_test})\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ma_scores\") #only mahal of input\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"if_scores\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"if_scores\") #isolation forest\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ae_scores\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ae_scores\") # use recon loss\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"efe_scores\") # use encoder mahal \n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"dfe_scores\") # use decoder mahal\n",
    "print(test_acc) \n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"mahal+efe_scores\") # use mahal for input and encoder mahal\n",
    "print(test_acc) \n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"mahal+dfe_scores\") # use mahal for input and decoder mahal\n",
    "print(test_acc) \n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"ae_scores\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"mahal+ae_scores\") # use mahal for input and recon loss\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"efe_scores+dfe_scores\") # use encoder mahal and decoder mahal\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ae_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ae_scores+efe_scores\") # use encoder mahal and recon loss\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ae_scores\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ae_scores+dfe_scores\") # use decoder mahal and recon loss\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ma_scores+efe_scores+dfe_scores\") # # use mahal for input, encoder mahal and decoder mahal\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"ae_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ma_scores+ae_scores+efe_scores\") # # use mahal for input, recon loss, encoder mahal\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"ae_scores\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ma_scores+ae_scores+dfe_scores\") # # use mahal for input, recon loss, decoder mahal\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ae_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ae_scores+efe_scores+dfe_scores\") # # use recon loss, encoder mahal, and decoder mahal\n",
    "print(test_acc)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")\n",
    "\n",
    "x=scores_df.drop(\"label\",axis=1)\n",
    "x=scores_df[[\"ma_scores\",\"ae_scores\",\"efe_scores_1\",\"efe_scores_2\",\"efe_scores_3\",\"efe_scores_4\",\"efe_scores_5\",\"dfe_scores_1\",\"dfe_scores_2\",\"dfe_scores_3\",\"dfe_scores_4\",\"dfe_scores_5\"]]\n",
    "y=scores_df[\"label\"].values\n",
    "clf = LogisticRegression(random_state=0).fit(x, y)\n",
    "model=clf\n",
    "test_acc = model.score(x, y)\n",
    "AUROC = roc_auc_score(y,model.predict_proba(x)[:,1])\n",
    "AUPRC = average_precision_score(y,model.predict_proba(x)[:,1])\n",
    "recall = recall_score(y,  model.predict(x))\n",
    "precision = precision_score(y, model.predict(x))\n",
    "f1_score_=f1_score(y_true=y, y_pred =model.predict(x))\n",
    "fpr, tpr, thresholds = roc_curve(y,model.predict_proba(x)[:,1])\n",
    "tn, fp, fn, tp = confusion_matrix(scores_df[\"label\"].values, model.predict(x)).ravel()\n",
    "\n",
    "print(\"ma_scores+ae_scores+efe_scores+dfe_scores\") # # use mahal for input, recon loss, encoder mahal, and decoder mahal\n",
    "print(test_acc)\n",
    "print(AUROC)\n",
    "print(AUPRC)\n",
    "print(precision)\n",
    "print(recall)\n",
    "print(f1_score_)\n",
    "print(fp/(fp+tn))\n",
    "print(\"\\n\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "spesis",
   "language": "python",
   "name": "spesis"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
