{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:43:37.042804Z",
     "start_time": "2024-08-12T09:43:35.651982Z"
    }
   },
   "outputs": [],
   "source": [
    "#!/usr/bin/env python\n",
    "# -*- coding: utf-8 -*-\n",
    "\n",
    "import sys\n",
    "sys.path.append('../')\n",
    "\n",
    "import numpy as np\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn import svm\n",
    "\n",
    "from loglizer.models import PCA, IsolationForest, LogClustering, OneClassSVM\n",
    "from loglizer import dataloader, preprocessing\n",
    "from loglizer.utils import metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:43:43.776537Z",
     "start_time": "2024-08-12T09:43:37.059551Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train normal size: 334933\n",
      "Train abnormal size: 6735\n",
      "Test normal size: 223290\n",
      "Test abnormal size: 10103\n",
      "====== Transformed train data summary ======\n",
      "Train data shape: 341668-by-43\n",
      "\n",
      "====== Transformed test data summary ======\n",
      "Test data shape: 233393-by-43\n",
      "\n"
     ]
    }
   ],
   "source": [
    "ouput_dir = \"../../../dataset/full_dataset/HDFS/\"\n",
    "(x_train, y_train), (x_test, y_test) = dataloader.load_data(data_dir=ouput_dir)\n",
    "feature_extractor = preprocessing.FeatureExtractor()\n",
    "x_train = feature_extractor.fit_transform(x_train)\n",
    "x_test = feature_extractor.transform(x_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:43:45.290981Z",
     "start_time": "2024-08-12T09:43:43.777520Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================== Model: PCA ====================\n",
      "theshold 0\n",
      "====== Model summary ======\n",
      "n_components: 1\n",
      "Project matrix shape: 43-by-43\n",
      "SPE threshold: 1\n",
      "\n",
      "Train validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 6735, FP: 333646, TN: 1287, FN: 0\n",
      "Precision: 1.979%, recall: 100.000%, F1-measure: 3.881%\n",
      "\n",
      "Test validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 10103, FP: 222368, TN: 922, FN: 0\n",
      "Precision: 4.346%, recall: 100.000%, F1-measure: 8.330%\n",
      "\n",
      "CPU times: user 2.71 s, sys: 4.15 s, total: 6.85 s\n",
      "Wall time: 1.48 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "print(\"=\"*20 + \" Model: PCA \" + \"=\"*20)\n",
    "for th in np.arange(1):\n",
    "    print(\"theshold\", th)\n",
    "    model = PCA(n_components=0.8, threshold=1, c_alpha = 1.9600)\n",
    "    model.fit(x_train)\n",
    "    print('Train validation:')\n",
    "    precision, recall, f1 = model.evaluate(x_train, y_train)\n",
    "    print('Test validation:')\n",
    "    precision, recall, f1 = model.evaluate(x_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:44:04.795845Z",
     "start_time": "2024-08-12T09:43:45.292955Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================== Model: IsolationForest ====================\n",
      "====== Model summary ======\n",
      "Train validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 5937, FP: 26915, TN: 308018, FN: 798\n",
      "Precision: 18.072, recall: 88.151, F1-measure: 29.995\n",
      "\n",
      "Test validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 8969, FP: 17670, TN: 205620, FN: 1134\n",
      "Precision: 33.669, recall: 88.776, F1-measure: 48.822\n",
      "\n",
      "CPU times: user 2.51 s, sys: 33.3 ms, total: 2.54 s\n",
      "Wall time: 2.54 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "print(\"=\"*20 + \" Model: IsolationForest \" + \"=\"*20)\n",
    "model = IsolationForest(n_estimators=100, max_samples='auto', contamination='auto', random_state=19)\n",
    "model.fit(x_train)\n",
    "print('Train validation:')\n",
    "precision, recall, f1 = model.evaluate(x_train, y_train)\n",
    "print('Test validation:')\n",
    "precision, recall, f1 = model.evaluate(x_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:46:39.483018Z",
     "start_time": "2024-08-12T09:44:04.798827Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================== Model: one class SVM ====================\n",
      "====== Model summary ======\n",
      "Train validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 223, FP: 334933, TN: 0, FN: 6512\n",
      "Precision: 0.067, recall: 3.311, F1-measure: 0.131\n",
      "\n",
      "Test validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 321, FP: 223290, TN: 0, FN: 9782\n",
      "Precision: 0.144, recall: 3.177, F1-measure: 0.275\n",
      "\n",
      "CPU times: user 4h 58min 5s, sys: 11.6 s, total: 4h 58min 17s\n",
      "Wall time: 4h 58min 52s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "print(\"=\"*20 + \" Model: one class SVM \" + \"=\"*20)\n",
    "model = OneClassSVM(kernel='rbf')\n",
    "model.fit(x_train, y_train)\n",
    "\n",
    "print('Train validation:')\n",
    "precision, recall, f1 = model.evaluate(x_train, y_train)\n",
    "print('Test validation:')\n",
    "precision, recall, f1 = model.evaluate(x_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:46:39.498017Z",
     "start_time": "2024-08-12T09:46:39.485025Z"
    }
   },
   "outputs": [],
   "source": [
    "# %%time\n",
    "# print(\"=\"*20 + \" Model: one class SVM \" + \"=\"*20)\n",
    "\n",
    "# nus = [0.001, 0.01, 0.1, 1]\n",
    "# gammas = [0.001, 0.01, 0.1, 1]\n",
    "# tuned_parameters = {'kernel' : ['rbf','poly','linear','sigmoid'], 'gamma' : gammas, 'nu': nus}\n",
    "\n",
    "# ocsvm = svm.OneClassSVM()\n",
    "# model = GridSearchCV(ocsvm, tuned_parameters, cv=5, scoring=\"f1_micro\")\n",
    "\n",
    "# model.fit(x_train, y_train.astype(int))\n",
    "\n",
    "# # print('Train validation:')\n",
    "# # precision, recall, f1 = model.predict(x_train, y_train.astype(int))\n",
    "# # print('Test validation:')\n",
    "# # precision, recall, f1 = model.predict(x_test, y_test.astype(int))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:46:39.513026Z",
     "start_time": "2024-08-12T09:46:39.500001Z"
    }
   },
   "outputs": [],
   "source": [
    "# print('Train validation:')\n",
    "# y_eval = model.predict(x_train)\n",
    "# precision, recall, f1 = metrics(y_eval, y_train)\n",
    "# print('Precision: {:.3f}, recall: {:.3f}, F1-measure: {:.3f}\\n'.format(precision, recall, f1))\n",
    "    \n",
    "# print('Test validation:')\n",
    "# y_pred = model.predict(x_test)\n",
    "# precision, recall, f1 = metrics(y_pred, y_test)\n",
    "# print('Precision: {:.3f}, recall: {:.3f}, F1-measure: {:.3f}\\n'.format(precision, recall, f1))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:46:39.527667Z",
     "start_time": "2024-08-12T09:46:39.515135Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:47:04.281047Z",
     "start_time": "2024-08-12T09:46:39.530666Z"
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================== Model: LogClustering ====================\n",
      "====== Model summary ======\n",
      "Starting offline clustering...\n",
      "Processed 1000 instances.\n",
      "Found 4 clusters offline.\n",
      "\n",
      "Starting online clustering...\n",
      "Processed 2000 instances.\n",
      "Processed 4000 instances.\n",
      "Processed 6000 instances.\n",
      "Processed 8000 instances.\n",
      "Processed 10000 instances.\n",
      "Processed 12000 instances.\n",
      "Processed 14000 instances.\n",
      "Processed 16000 instances.\n",
      "Processed 18000 instances.\n",
      "Processed 20000 instances.\n",
      "Processed 22000 instances.\n",
      "Processed 24000 instances.\n",
      "Processed 26000 instances.\n",
      "Processed 28000 instances.\n",
      "Processed 30000 instances.\n",
      "Processed 32000 instances.\n",
      "Processed 34000 instances.\n",
      "Processed 36000 instances.\n",
      "Processed 38000 instances.\n",
      "Processed 40000 instances.\n",
      "Processed 42000 instances.\n",
      "Processed 44000 instances.\n",
      "Processed 46000 instances.\n",
      "Processed 48000 instances.\n",
      "Processed 50000 instances.\n",
      "Processed 52000 instances.\n",
      "Processed 54000 instances.\n",
      "Processed 56000 instances.\n",
      "Processed 58000 instances.\n",
      "Processed 60000 instances.\n",
      "Processed 62000 instances.\n",
      "Processed 64000 instances.\n",
      "Processed 66000 instances.\n",
      "Processed 68000 instances.\n",
      "Processed 70000 instances.\n",
      "Processed 72000 instances.\n",
      "Processed 74000 instances.\n",
      "Processed 76000 instances.\n",
      "Processed 78000 instances.\n",
      "Processed 80000 instances.\n",
      "Processed 82000 instances.\n",
      "Processed 84000 instances.\n",
      "Processed 86000 instances.\n",
      "Processed 88000 instances.\n",
      "Processed 90000 instances.\n",
      "Processed 92000 instances.\n",
      "Processed 94000 instances.\n",
      "Processed 96000 instances.\n",
      "Processed 98000 instances.\n",
      "Processed 100000 instances.\n",
      "Processed 102000 instances.\n",
      "Processed 104000 instances.\n",
      "Processed 106000 instances.\n",
      "Processed 108000 instances.\n",
      "Processed 110000 instances.\n",
      "Processed 112000 instances.\n",
      "Processed 114000 instances.\n",
      "Processed 116000 instances.\n",
      "Processed 118000 instances.\n",
      "Processed 120000 instances.\n",
      "Processed 122000 instances.\n",
      "Processed 124000 instances.\n",
      "Processed 126000 instances.\n",
      "Processed 128000 instances.\n",
      "Processed 130000 instances.\n",
      "Processed 132000 instances.\n",
      "Processed 134000 instances.\n",
      "Processed 136000 instances.\n",
      "Processed 138000 instances.\n",
      "Processed 140000 instances.\n",
      "Processed 142000 instances.\n",
      "Processed 144000 instances.\n",
      "Processed 146000 instances.\n",
      "Processed 148000 instances.\n",
      "Processed 150000 instances.\n",
      "Processed 152000 instances.\n",
      "Processed 154000 instances.\n",
      "Processed 156000 instances.\n",
      "Processed 158000 instances.\n",
      "Processed 160000 instances.\n",
      "Processed 162000 instances.\n",
      "Processed 164000 instances.\n",
      "Processed 166000 instances.\n",
      "Processed 168000 instances.\n",
      "Processed 170000 instances.\n",
      "Processed 172000 instances.\n",
      "Processed 174000 instances.\n",
      "Processed 176000 instances.\n",
      "Processed 178000 instances.\n",
      "Processed 180000 instances.\n",
      "Processed 182000 instances.\n",
      "Processed 184000 instances.\n",
      "Processed 186000 instances.\n",
      "Processed 188000 instances.\n",
      "Processed 190000 instances.\n",
      "Processed 192000 instances.\n",
      "Processed 194000 instances.\n",
      "Processed 196000 instances.\n",
      "Processed 198000 instances.\n",
      "Processed 200000 instances.\n",
      "Processed 202000 instances.\n",
      "Processed 204000 instances.\n",
      "Processed 206000 instances.\n",
      "Processed 208000 instances.\n",
      "Processed 210000 instances.\n",
      "Processed 212000 instances.\n",
      "Processed 214000 instances.\n",
      "Processed 216000 instances.\n",
      "Processed 218000 instances.\n",
      "Processed 220000 instances.\n",
      "Processed 222000 instances.\n",
      "Processed 224000 instances.\n",
      "Processed 226000 instances.\n",
      "Processed 228000 instances.\n",
      "Processed 230000 instances.\n",
      "Processed 232000 instances.\n",
      "Processed 234000 instances.\n",
      "Processed 236000 instances.\n",
      "Processed 238000 instances.\n",
      "Processed 240000 instances.\n",
      "Processed 242000 instances.\n",
      "Processed 244000 instances.\n",
      "Processed 246000 instances.\n",
      "Processed 248000 instances.\n",
      "Processed 250000 instances.\n",
      "Processed 252000 instances.\n",
      "Processed 254000 instances.\n",
      "Processed 256000 instances.\n",
      "Processed 258000 instances.\n",
      "Processed 260000 instances.\n",
      "Processed 262000 instances.\n",
      "Processed 264000 instances.\n",
      "Processed 266000 instances.\n",
      "Processed 268000 instances.\n",
      "Processed 270000 instances.\n",
      "Processed 272000 instances.\n",
      "Processed 274000 instances.\n",
      "Processed 276000 instances.\n",
      "Processed 278000 instances.\n",
      "Processed 280000 instances.\n",
      "Processed 282000 instances.\n",
      "Processed 284000 instances.\n",
      "Processed 286000 instances.\n",
      "Processed 288000 instances.\n",
      "Processed 290000 instances.\n",
      "Processed 292000 instances.\n",
      "Processed 294000 instances.\n",
      "Processed 296000 instances.\n",
      "Processed 298000 instances.\n",
      "Processed 300000 instances.\n",
      "Processed 302000 instances.\n",
      "Processed 304000 instances.\n",
      "Processed 306000 instances.\n",
      "Processed 308000 instances.\n",
      "Processed 310000 instances.\n",
      "Processed 312000 instances.\n",
      "Processed 314000 instances.\n",
      "Processed 316000 instances.\n",
      "Processed 318000 instances.\n",
      "Processed 320000 instances.\n",
      "Processed 322000 instances.\n",
      "Processed 324000 instances.\n",
      "Processed 326000 instances.\n",
      "Processed 328000 instances.\n",
      "Processed 330000 instances.\n",
      "Processed 332000 instances.\n",
      "Processed 334000 instances.\n",
      "Processed 334933 instances.\n",
      "Found 4 clusters online.\n",
      "\n",
      "Train validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 2530, FP: 0, TN: 334933, FN: 4205\n",
      "Precision: 100.000, recall: 37.565, F1-measure: 54.614\n",
      "\n",
      "Test validation:\n",
      "====== Evaluation summary ======\n",
      "Confusion Matrix: TP: 3678, FP: 0, TN: 223290, FN: 6425\n",
      "Precision: 100.000, recall: 36.405, F1-measure: 53.378\n",
      "\n",
      "CPU times: user 29.2 s, sys: 46 ms, total: 29.3 s\n",
      "Wall time: 29.3 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "print(\"=\"*20 + \" Model: LogClustering \" + \"=\"*20)\n",
    "max_dist = 0.3  # the threshold to stop the clustering process\n",
    "anomaly_threshold = 0.3  # the threshold for anomaly detection\n",
    "model = LogClustering(max_dist=max_dist, anomaly_threshold=anomaly_threshold)\n",
    "model.fit(x_train[y_train == 0, :])  # Use only normal samples for training\n",
    "print('Train validation:')\n",
    "precision, recall, f1 = model.evaluate(x_train, y_train)\n",
    "print('Test validation:')\n",
    "precision, recall, f1 = model.evaluate(x_test, y_test)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-08-12T09:47:04.296533Z",
     "start_time": "2024-08-12T09:47:04.283022Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "experiment",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "metadata": {
     "collapsed": false
    },
    "source": []
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
