{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append('../../common/')\n",
    "from evaluator import *\n",
    "import warnings\n",
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from lstmvae import VAE_LSTM\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_datase(dataset):\n",
    "    folder = os.path.join(\"../../processed\", dataset)\n",
    "    if not os.path.exists(folder):\n",
    "        raise Exception(\"Processed Data not found.\")\n",
    "    loader = []\n",
    "    for file in [\"train\", \"test\", \"labels\"]:\n",
    "        loader.append(np.load(os.path.join(folder, f\"{file}.npy\")))\n",
    "    ## 准备数据\n",
    "    train_data = loader[0]\n",
    "    test_data = loader[1]\n",
    "    labels = loader[2][:,0].reshape(-1,1)\n",
    "    return train_data, test_data, labels\n",
    "def convert_to_windows(data, window_size):\n",
    "    windows = []\n",
    "    length  = data.shape[0]\n",
    "    for i in range(length):\n",
    "        if length - i >= window_size:\n",
    "            window = data[i:i+window_size]\n",
    "        else:\n",
    "            window = data[i-window_size+1:i+1]\n",
    "        windows.append(window)    \n",
    "    return np.array(windows)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## SWaT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data, test_data, labels = load_datase(\"SWaT\")\n",
    "train_df = pd.DataFrame(train_data)\n",
    "test_df = pd.DataFrame(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 143,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.\n",
      "\n",
      "\n",
      "Training...\n",
      "\n",
      "\n",
      "Epoch   1    Loss 5435.25066\n",
      "\n",
      "\n",
      "Training time 1.02 minutes\n"
     ]
    }
   ],
   "source": [
    "algo = VAE_LSTM(sequence_length=12, intermediate_dim=5, z_dim=40, n_dim=train_df.shape[1], num_epochs=5, lr=1e-3, batch_size=6020, verbose=False)\n",
    "algo.fit(train_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 152,
   "metadata": {},
   "outputs": [],
   "source": [
    "anomaly_score = np.mean(algo.predict(test_df)['error_tc'], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 153,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "without adjust\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'f1-score': 0.8103714856367935,\n",
       " 'precision': 0.9663531020141602,\n",
       " 'recall': 0.6977536111206764,\n",
       " 'TP': 38112.0,\n",
       " 'TN': 393971.0,\n",
       " 'FP': 1327.0,\n",
       " 'FN': 16509.0,\n",
       " 'threshold': 0.286147478179861}"
      ]
     },
     "execution_count": 153,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(\"point adjust\")\n",
    "bf_search(labels.squeeze(), anomaly_score.squeeze(), is_adjust=True, verbose=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 154,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "point adjust\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'f1-score': 0.7421235439956759,\n",
       " 'precision': 0.9412840420489297,\n",
       " 'recall': 0.6125299791998443,\n",
       " 'TP': 33457.0,\n",
       " 'TN': 393211.0,\n",
       " 'FP': 2087.0,\n",
       " 'FN': 21164.0,\n",
       " 'threshold': 0.28049156562924704}"
      ]
     },
     "execution_count": 154,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(\"without adjust\")\n",
    "bf_search(labels.squeeze(), anomaly_score.squeeze(), is_adjust=False, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WADI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data, test_data, labels = load_datase(\"WADI\")\n",
    "train_df = pd.DataFrame(train_data)\n",
    "test_df = pd.DataFrame(test_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/user/yjq/usad-ml/compared-algorithms/lstmvae/lstm_vae/lstm_vae_model.py:46: DatasetV1.make_initializable_iterator (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_initializable_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.\n",
      "WARNING:tensorflow:From /home/user/yjq/usad-ml/compared-algorithms/lstmvae/lstm_vae/lstm_vae_model.py:120: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
      "WARNING:tensorflow:From /home/user/miniconda3/envs/yjq-3.6/lib/python3.6/site-packages/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py:981: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
      "WARNING:tensorflow:`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.\n",
      "\n",
      "\n",
      "Training...\n",
      "\n",
      "\n",
      "Epoch   1    Loss 6485.59011\n",
      "\n",
      "\n",
      "Training time 2.13 minutes\n"
     ]
    }
   ],
   "source": [
    "algo = VAE_LSTM(sequence_length=100, intermediate_dim=20, z_dim=50, n_dim=train_df.shape[1], num_epochs=5,  batch_size=8000, verbose=False, stride=10)\n",
    "algo.fit(train_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "anomaly_score = np.mean(algo.predict(test_df)['error_tc'], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "without adjust\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'f1-score': 0.268407234419442,\n",
       " 'precision': 0.9871794555391201,\n",
       " 'recall': 0.15532022110277247,\n",
       " 'TP': 308.0,\n",
       " 'TN': 32574.0,\n",
       " 'FP': 4.0,\n",
       " 'FN': 1675.0,\n",
       " 'threshold': 2.478954431047747}"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(\"without adjust\")\n",
    "bf_search(labels.squeeze(), anomaly_score.squeeze(), is_adjust=False, verbose=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "point adjust\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'f1-score': 0.3325804452486329,\n",
       " 'precision': 0.6462882001995895,\n",
       " 'recall': 0.22390317587542524,\n",
       " 'TP': 444.0,\n",
       " 'TN': 32335.0,\n",
       " 'FP': 243.0,\n",
       " 'FN': 1539.0,\n",
       " 'threshold': 2.4624872759880274}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(\"point adjust\")\n",
    "bf_search(labels.squeeze(), anomaly_score.squeeze(), is_adjust=True, verbose=0)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "yjq-3.6",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
