{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "strange-cartoon",
   "metadata": {},
   "source": [
    "# Bad performing algorithms\n",
    "\n",
    "Analysis of bad performing algorithms using the default parameters on the benchmark datasets. Quality/Performance is evaluated using the AUC_ROC scores."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "elegant-fellow",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Automatically reload packages:\n",
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "directed-instruction",
   "metadata": {},
   "outputs": [],
   "source": [
    "# imports\n",
    "import warnings\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import scipy as sp\n",
    "import plotly.offline as py\n",
    "import plotly.graph_objects as go\n",
    "import plotly.figure_factory as ff\n",
    "import plotly.express as px\n",
    "from plotly.subplots import make_subplots\n",
    "from pathlib import Path\n",
    "from timeeval import Datasets"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "synthetic-motivation",
   "metadata": {},
   "source": [
    "## Configuration"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "spiritual-emergency",
   "metadata": {},
   "outputs": [],
   "source": [
    "# constants and configuration\n",
    "data_path = Path(\"../data\") / \"test-cases\"\n",
    "result_path = Path(\"../results\") / \"2021-08-22_default-params-merged\"\n",
    "\n",
    "# load results\n",
    "# result_path = result_path / \"results\"\n",
    "print(f\"Reading results from {result_path.resolve()}\")\n",
    "\n",
    "df = pd.read_csv(result_path / \"results.csv\")\n",
    "df[\"dataset_name\"] = df[\"dataset\"].str.split(\".\").str[0]\n",
    "\n",
    "def load_scores_df(algorithm_name, dataset_id, repetition=1):\n",
    "    params_id = df.loc[(df[\"algorithm\"] == algorithm_name) & (df[\"collection\"] == dataset_id[0]) & (df[\"dataset\"] == dataset_id[1]) & (df[\"status\"] == \"Status.OK\"), \"hyper_params_id\"].item()\n",
    "    path = (\n",
    "        result_path /\n",
    "        algorithm_name /\n",
    "        params_id /\n",
    "        dataset_id[0] /\n",
    "        dataset_id[1] /\n",
    "        str(repetition) /\n",
    "        \"anomaly_scores.ts\"\n",
    "    )\n",
    "    return pd.read_csv(path, header=None)\n",
    "\n",
    "# load dataset metadata\n",
    "dmgr = Datasets(data_path)\n",
    "\n",
    "def plot_scores(algorithm_name, dataset_name):\n",
    "    if isinstance(algorithm_name, str):\n",
    "        algorithms = [algorithm_name]\n",
    "    else:\n",
    "        algorithms = algorithm_name\n",
    "    # construct dataset ID\n",
    "    dataset_id = (\"GutenTAG\", f\"{dataset_name}.unsupervised\")\n",
    "\n",
    "    # load dataset details\n",
    "    df_dataset = dmgr.get_dataset_df(dataset_id)\n",
    "\n",
    "    # check if dataset is multivariate\n",
    "    dataset_dim = df.loc[df[\"dataset_name\"] == dataset_name, \"dataset_input_dimensionality\"].unique().item()\n",
    "    dataset_dim = dataset_dim.lower()\n",
    "    \n",
    "    auroc = {}\n",
    "    df_scores = pd.DataFrame(index=df_dataset.index)\n",
    "    skip_algos = []\n",
    "    for algo in algorithms:\n",
    "        # get algorithm metric results\n",
    "        try:\n",
    "            auroc[algo] = df.loc[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"] == dataset_name) & (df[\"status\"] == \"Status.OK\"), \"ROC_AUC\"].item()\n",
    "        except ValueError as e:\n",
    "            warnings.warn(f\"No ROC_AUC score found! Probably {algo} was not executed on {dataset_name}: {repr(e)}\")\n",
    "            auroc[algo] = -1\n",
    "            skip_algos.append(algo)\n",
    "            continue\n",
    "\n",
    "        # load scores\n",
    "        training_type = df.loc[df[\"algorithm\"] == algo, \"algo_training_type\"].values[0].lower().replace(\"_\", \"-\")\n",
    "        try:\n",
    "            df_scores[algo] = load_scores_df(algo, (\"GutenTAG\", f\"{dataset_name}.{training_type}\")).iloc[:, 0]\n",
    "        except (ValueError, FileNotFoundError) as e:\n",
    "            warnings.warn(f\"No scores found! Probably {algo} was not executed on {dataset_name}: {repr(e)}\")\n",
    "            df_scores[algo] = np.nan\n",
    "            skip_algos.append(algo)\n",
    "    algorithms = [a for a in algorithms if a not in skip_algos]\n",
    "\n",
    "    # Create plot\n",
    "    fig = make_subplots(2, 1)\n",
    "    if dataset_dim == \"multivariate\":\n",
    "        for i in range(1, df_dataset.shape[1]-1):\n",
    "            fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, i], name=f\"channel-{i}\"), 1, 1)\n",
    "    else:\n",
    "        fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, 1], name=\"timeseries\"), 1, 1)\n",
    "    fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset[\"is_anomaly\"], name=\"label\"), 2, 1)\n",
    "    for algo in algorithms:\n",
    "        fig.add_trace(go.Scatter(x=df_scores.index, y=df_scores[algo], name=f\"{algo}={auroc[algo]:.4f}\"), 2, 1)\n",
    "    fig.update_xaxes(matches=\"x\")\n",
    "    fig.update_layout(\n",
    "        title=f\"Results of {','.join(algorithms)} on {dataset_name}\",\n",
    "        height=400\n",
    "    )\n",
    "    return py.iplot(fig)\n",
    "\n",
    "def plot_datasets(datasets, max_channels = 20):\n",
    "    if isinstance(datasets, str):\n",
    "        datasets = [datasets]\n",
    "    else:\n",
    "        datasets = datasets\n",
    "    n_datasets = len(datasets)\n",
    "    \n",
    "    # Create plot\n",
    "    fig = make_subplots(n_datasets, 1)\n",
    "    for i, d in enumerate(datasets):\n",
    "        # construct dataset ID\n",
    "        dataset_id = (\"GutenTAG\", f\"{d}.unsupervised\")\n",
    "        \n",
    "        # load dataset details\n",
    "        try:\n",
    "            df_dataset = dmgr.get_dataset_df(dataset_id)\n",
    "        except Exception as e:\n",
    "            warnings.warn(f\"Could not load dataset {d}, because {repr(e)}\")\n",
    "            continue\n",
    "\n",
    "        # get algorithm metric results\n",
    "        try:\n",
    "            auroc = df.loc[df[\"dataset_name\"] == d, \"ROC_AUC\"].median()\n",
    "        except ValueError:\n",
    "            warnings.warn(f\"No scores found for dataset {d} found!\")\n",
    "            auroc = -1\n",
    "            continue\n",
    "\n",
    "        for j in range(1, min(df_dataset.shape[1]-1, max_channels+1)):\n",
    "            fig.add_trace(go.Scatter(\n",
    "                x=df_dataset.index,\n",
    "                y=df_dataset.iloc[:, j],\n",
    "                name=f\"{d} channel {j} ({auroc:.4f})\",\n",
    "            ), i+1, 1)\n",
    "\n",
    "        # mark anomaly regions\n",
    "        s = df_dataset[\"is_anomaly\"].diff()\n",
    "        anomaly_regions = zip(s[s== 1].index, s[s == -1].index)\n",
    "        for s, e in anomaly_regions:\n",
    "            fig.add_vrect(x0=s-1, x1=e,\n",
    "                          exclude_empty_subplots=True,\n",
    "                          line_width=0,\n",
    "                          fillcolor=\"red\",\n",
    "                          opacity=0.3,\n",
    "                          annotation_text=\"anomaly\",\n",
    "                          annotation_position=\"top left\",\n",
    "                          row=i+1,\n",
    "                          col=1)\n",
    "\n",
    "#     fig.update_xaxes(matches=\"x\")\n",
    "    fig.update_layout(\n",
    "        title=f\"Datasets and ground truth of {','.join(datasets)} datasets\",\n",
    "        height=200*n_datasets if n_datasets > 1 else 400\n",
    "    )\n",
    "    return py.iplot(fig)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dominican-coupon",
   "metadata": {},
   "outputs": [],
   "source": [
    "def select(algo, dataset, column):\n",
    "    record = df[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"] == dataset) & (df[\"status\"] == \"Status.OK\")]\n",
    "    return record[column], record"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "julian-produce",
   "metadata": {},
   "source": [
    "## Overview over bad performing algorithms"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "native-connectivity",
   "metadata": {},
   "source": [
    "#### Overall algorithm performance based on ROC_AUC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "indonesian-button",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "aggregations = [\"min\", \"mean\", \"median\", \"max\"]\n",
    "df_overall_scores = df.pivot_table(index=\"algorithm\", values=\"ROC_AUC\", aggfunc=aggregations)\n",
    "df_overall_scores.columns = aggregations\n",
    "df_overall_scores = df_overall_scores.sort_values(by=\"median\", ascending=False)\n",
    "\n",
    "df_overall_scores.tail(10)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "unnecessary-strain",
   "metadata": {},
   "source": [
    "#### Algorithms that are bad on any dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "sought-lending",
   "metadata": {},
   "outputs": [],
   "source": [
    "threshold = 0.9\n",
    "df_tmp = df_overall_scores[df_overall_scores[\"max\"] < threshold]\n",
    "df_tmp.sort_values(by=\"max\", ascending=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "promotional-arlington",
   "metadata": {},
   "source": [
    "#### Algorithms that are bad on a simple point anomaly dataset \"sinus-type-extremum\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "possible-strand",
   "metadata": {},
   "outputs": [],
   "source": [
    "threshold = 0.6\n",
    "dataset = \"sinus-type-extremum\"\n",
    "df_tmp = df[(df[\"dataset_name\"] == dataset) & (df[\"ROC_AUC\"] < threshold)][[\"algorithm\", \"ROC_AUC\"]]\n",
    "df_tmp.sort_values(by=\"ROC_AUC\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "hybrid-glucose",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(df_tmp[\"algorithm\"].values, dataset)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "advised-relaxation",
   "metadata": {},
   "source": [
    "#### Algorithms that are bad on a simple subsequence anomaly dataset \"sinus-type-platform\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "silver-liechtenstein",
   "metadata": {},
   "outputs": [],
   "source": [
    "threshold = 0.6\n",
    "dataset = \"sinus-type-platform\"\n",
    "df_tmp = df[(df[\"dataset_name\"] == dataset) & (df[\"ROC_AUC\"] < threshold)][[\"algorithm\", \"ROC_AUC\"]]\n",
    "df_tmp.sort_values(by=\"ROC_AUC\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "decent-butter",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "plot_scores(df_tmp[\"algorithm\"].values, dataset)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "offshore-transport",
   "metadata": {},
   "source": [
    "## Detailled inspection of bad performing algorithms\n",
    "\n",
    "List of algorithms to inspect (based on above criteria):\n",
    "\n",
    "- [KMeans](#KMeans)\n",
    "- [AutoEncoder](#(Denoising-)AutoEncoder)\n",
    "- [**Bagel**](#Bagel)\n",
    "- [**DBStream**](#DBStream)\n",
    "- [DenoisingAutoEncoder](#(Denoising-)AutoEncoder)\n",
    "- [DSPOT](#DSPOT)\n",
    "- [FFT](#FFT)\n",
    "- [**HOT SAX**](#HOT-SAX)\n",
    "- [Isolation Forest - Local Outier Factor](#Isolation-Forest---Local-Outier-Factor)\n",
    "- [LOF](#LOF)\n",
    "- [MedianMethod](#MedianMethod)\n",
    "- [**MultiHMM**](#MultiHMM)\n",
    "- [NormA](#NormA)\n",
    "- [NumentaHTM](#NumentaHTM)\n",
    "- [PCC](#PCC)\n",
    "- [PCI](#PCI)\n",
    "- [PST](#PST)\n",
    "- [Robust PCA](#Robust-PCA)\n",
    "- [SR-CNN](#SR-CNN)\n",
    "- [SSA](#SSA)\n",
    "- [Subsequence LOF](#Subsequence-LOF)\n",
    "- [**TARZAN**](#TARZAN)\n",
    "- [**TripleES**](#TripleES)\n",
    "- [TSBitmap](#TSBitmap)\n",
    "- [XGBoost Regressor](#XGBoost-Regressor)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "informed-diversity",
   "metadata": {},
   "source": [
    "### COF, LOF, CBLOF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "armed-calgary",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"CBLOF\"\n",
    "df_overall_scores.loc[algo]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "australian-potato",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_tmp = df.pivot_table(index=\"algorithm\", values=\"RANGE_PR_AUC\", aggfunc=aggregations)\n",
    "df_tmp.columns = aggregations\n",
    "df_tmp.loc[algo]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ordinary-calvin",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-extremum\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "color-tobago",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "failing-nirvana",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-noise-01%\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "external-season",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"rw-diff-count-6\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "religious-diesel",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores([algo, \"KMeans\", \"DWT-MLEAD\"], \"poly-combined-diff-2\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "published-murder",
   "metadata": {},
   "source": [
    "### HBOS\n",
    "\n",
    "- same than for LOF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "herbal-request",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"HBOS\"\n",
    "plot_scores(algo, \"poly-combined-diff-2\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "blocked-desire",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-noise-10%\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "victorian-solomon",
   "metadata": {},
   "outputs": [],
   "source": [
    "select(algo, \"sinus-noise-10%\", [\"ROC_AUC\", \"RANGE_PR_AUC\"])[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "different-repeat",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-diff-count-5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "advisory-survivor",
   "metadata": {},
   "outputs": [],
   "source": [
    "select(algo, \"sinus-diff-count-5\", \"RANGE_PR_AUC\")[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bearing-motion",
   "metadata": {},
   "outputs": [],
   "source": [
    "select(\"Random\", \"sinus-diff-count-5\", \"RANGE_PR_AUC\")[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7632f5a8",
   "metadata": {},
   "source": [
    "### KMeans\n",
    "\n",
    "- `window_size` should actually be `anomaly_window_size`, because if `window_size` is too small (for small period sizes), then the anomaly is not correctly detected"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b35a52d",
   "metadata": {},
   "outputs": [],
   "source": [
    "df[(df[\"algorithm\"] == \"KMeans\") & (df[\"dataset_name\"] == \"sinus-type-platform\")]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "practical-bottom",
   "metadata": {},
   "source": [
    "### DBStream\n",
    "\n",
    "- sometimes the scores seem to be inverted, other times the scores are correct\n",
    "- score invertion does not depend on dataset, but can be seen within a single dataset containing multiple anomalies (e.g. `ecg-diff-count-4`)\n",
    "- Scores are higher at the beginning and decrease over time. This is likely due to the streaming character. (non-issue)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "coupled-basic",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"DBStream\", \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "greek-postage",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"DBStream\", \"ecg-diff-count-4\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "alpine-territory",
   "metadata": {},
   "outputs": [],
   "source": [
    "df.loc[(df[\"algorithm\"] == \"DBStream\") & (df[\"dataset_name\"] == \"ecg-diff-count-6\"), \"hyper_params\"].item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "protective-gravity",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_path(algorithm_name, dataset_id, repetition=1):\n",
    "    params_id = df.loc[(df[\"algorithm\"] == algorithm_name) & (df[\"collection\"] == dataset_id[0]) & (df[\"dataset\"] == dataset_id[1]), \"hyper_params_id\"].item()\n",
    "    path = (\n",
    "        result_path /\n",
    "        algorithm_name /\n",
    "        params_id /\n",
    "        dataset_id[0] /\n",
    "        dataset_id[1] /\n",
    "        str(repetition)\n",
    "    )\n",
    "    return path\n",
    "dd = pd.read_csv(get_path(\"DBStream\", (\"GutenTAG\", \"ecg-diff-count-6.unsupervised\")) / \"docker-algorithm-scores.csv\", header=None)\n",
    "fig = go.Figure()\n",
    "fig.add_trace(go.Scatter(\n",
    "    x=dd.index,\n",
    "    y=dd.iloc[:, 0],\n",
    "    name=\"scores\",\n",
    "))\n",
    "fig.update_layout(\n",
    "    title={\"text\":\"DBStream original scores\", \"xanchor\": \"center\", \"x\": 0.5},\n",
    "    xaxis_title=\"AUC_ROC score\",\n",
    "    legend_title=\"Algorithms\"\n",
    ")\n",
    "py.iplot(fig)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "intensive-gamma",
   "metadata": {},
   "source": [
    "### TARZAN\n",
    "\n",
    "- Anomaly scores seem to be shifted far to the left.\n",
    "- Additional noise confuses algorithm (almost all of our datasets have noise)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "greenhouse-clock",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"TARZAN\", \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "imposed-combat",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"TARZAN\", \"sinus-noise-00%\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "imposed-entertainment",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"TARZAN\", \"sinus-noise-10%\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "portable-planning",
   "metadata": {},
   "outputs": [],
   "source": [
    "res, rec = select(\"TARZAN\", \"sinus-noise-10%\", \"ROC_AUC\")\n",
    "rec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "blind-mediterranean",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"TARZAN\", \"sinus-diff-count-5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "infectious-dining",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"TARZAN\", \"sinus-type-frequency\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "hydraulic-programming",
   "metadata": {},
   "source": [
    "### Bagel\n",
    "\n",
    "- seems to work better on non-periodic datasets (poly, rw) than the sinus and ecg base oscillations\n",
    "- maybe parameters are bad or training data is not enough, so that algorithm cannot correctly learn the reoccuring patterns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b9fe7b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "df[(df[\"algorithm\"] == \"Bagel\") & (df[\"dataset_name\"] == \"sinus-type-mean\")][\"hyper_params\"].item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "neither-apache",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"Bagel\", \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "unauthorized-syndrome",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "plot_scores(\"Bagel\", \"rw-type-variance\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "higher-constitution",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"Bagel\", \"poly-type-variance\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "overall-toronto",
   "metadata": {},
   "source": [
    "### (Denoising-)AutoEncoder\n",
    "\n",
    "- They are just very bad?!\n",
    "- I guess, we could exclude them. They don't have a very good implementation and there is no real paper behind them!"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "supported-transsexual",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores([\"AutoEncoder\", \"DenoisingAutoEncoder\"], \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "clear-farmer",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores([\"AutoEncoder\", \"DenoisingAutoEncoder\"], \"poly-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "downtown-brazilian",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores([\"AutoEncoder\", \"DenoisingAutoEncoder\"], \"ecg-type-platform\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "thermal-union",
   "metadata": {},
   "source": [
    "### DSPOT\n",
    "\n",
    "- has binary output for each point: anomaly or no anomaly\n",
    "- the current metrics do not capture this correctly\n",
    "- does DSPOT fit into our evaluation scheme?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "promotional-superior",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"hyper params:\")\n",
    "df.loc[(df[\"algorithm\"] == \"DSPOT\") & (df[\"dataset_name\"] == \"sinus-type-mean\"), \"hyper_params\"].item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "quantitative-clerk",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"DSPOT\", \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "charming-relation",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"DSPOT\", \"poly-type-mean\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "social-albania",
   "metadata": {},
   "source": [
    "### FFT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "featured-joseph",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"FFT\"\n",
    "dataset = \"ecg-diff-count-5\"\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "acute-shepherd",
   "metadata": {},
   "outputs": [],
   "source": [
    "df[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"] == dataset)][\"hyper_params\"].item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "pressing-indian",
   "metadata": {},
   "outputs": [],
   "source": [
    "param_id = df[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"] == dataset)][\"hyper_params_id\"].item()\n",
    "path = result_path / algo / param_id / \"GutenTAG\" / (dataset + \".unsupervised\") / \"1\" / \"execution.log\"\n",
    "with path.open() as fh:\n",
    "    print(\"\".join(fh.readlines()))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "clean-kuwait",
   "metadata": {},
   "source": [
    "### LOF"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "composite-tablet",
   "metadata": {},
   "source": [
    "### MedianMethod"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "civic-harris",
   "metadata": {},
   "source": [
    "### MultiHMM"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "falling-oklahoma",
   "metadata": {},
   "source": [
    "### NormA\n",
    "\n",
    "- scores look very broken --> **INVESTIGATE**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "general-cathedral",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"NormA\"\n",
    "dataset = \"sinus-diff-count-5\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "directed-substance",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-type-mean\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "primary-anaheim",
   "metadata": {},
   "source": [
    "### NumentaHTM"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "round-infrared",
   "metadata": {},
   "source": [
    "### PCC"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "certain-spanish",
   "metadata": {},
   "source": [
    "### PCI"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "affecting-dynamics",
   "metadata": {},
   "source": [
    "### PST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "golden-pocket",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_overall_scores.loc[\"PST\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "stretch-space",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_tmp = df[(df[\"algorithm\"] == \"PST\") & (df[\"status\"] == \"Status.OK\")][[\"dataset_name\", \"execute_main_time\", \"RANGE_PR_AUC\", \"ROC_AUC\"]]\n",
    "df_tmp = df_tmp[(df_tmp[\"ROC_AUC\"] < 0.8) & (~df[\"dataset_name\"].str.startswith(\"rw\"))]\n",
    "df_tmp.sort_values(by=\"ROC_AUC\", ascending=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "wrong-coast",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"PST\"\n",
    "dataset = \"sinus-diff-count-9\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dynamic-belize",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-type-amplitude\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "third-stereo",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-extremum\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "crude-chocolate",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-platform\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "frank-subscriber",
   "metadata": {},
   "source": [
    "### Robust PCA"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "collect-discount",
   "metadata": {},
   "source": [
    "### SR-CNN"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "biblical-script",
   "metadata": {},
   "source": [
    "### SSA\n",
    "\n",
    "- very sensitive to `window_size` parameter (should be set to `2x period`)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "substantial-copying",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"SSA\"\n",
    "dataset = \"sinus-diff-count-5\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "given-country",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f\"Execution Status: {select(algo, 'ecg-diff-count-5', 'hyper_params')[0].item()}\")\n",
    "plot_scores(algo, \"ecg-diff-count-5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "rising-rescue",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f\"Execution Status: {select(algo, 'poly-diff-count-5', 'hyper_params')[0].item()}\")\n",
    "plot_scores(algo, \"poly-diff-count-5\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "instrumental-enzyme",
   "metadata": {},
   "source": [
    "### Subsequence LOF"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fundamental-australia",
   "metadata": {},
   "source": [
    "### TripleES"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "marine-thailand",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"TripleES\"\n",
    "dataset = \"sinus-diff-count-9\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "addressed-transcript",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "print(select(algo, \"sinus-type-mean\", \"hyper_params\")[0].item())\n",
    "plot_scores(algo, \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "comparative-click",
   "metadata": {},
   "source": [
    "### TSBitmap"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "deadly-preservation",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"TSBitmap\"\n",
    "dataset = \"sinus-diff-count-9\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "reduced-mission",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-variance\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "liable-andorra",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-noise-01%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "plain-logic",
   "metadata": {},
   "source": [
    "### XGBoost Regressor\n",
    "\n",
    "- learning rate was too small, so that algorithm could not really learn the training data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "impressed-value",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"XGBoost Regressor\"\n",
    "dataset = \"ecg-diff-count-5\"\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "opened-contractor",
   "metadata": {},
   "outputs": [],
   "source": [
    "df[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"] == dataset)][\"hyper_params\"].item()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "august-victoria",
   "metadata": {},
   "source": [
    "### Fast-MCD\n",
    "\n",
    "- regards every point as a (multidimensional in the case of multivariate data) single object and estimates the covariance matrix from a clean training dataset\n",
    "- comparing just the different points does not work well\n",
    "- we introduced a variant that works on univariate data and regards a subsequence as an object: `Subsequence Fast-MCD`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "verified-geography",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"FastMCD\", \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "capable-parish",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"FastMCD\", \"sinus-type-frequency\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "forty-motel",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(\"FastMCD\", \"poly-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "naked-arbitration",
   "metadata": {},
   "outputs": [],
   "source": [
    "df[(df[\"algorithm\"] == \"FastMCD\")].iloc[0][\"hyper_params\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "brilliant-ukraine",
   "metadata": {},
   "source": [
    "### HOT-SAX\n",
    "\n",
    "- in the implementation of HOT-SAX (the code, where we call it from), we only mark the starting index instead of the whole window as anomalous\n",
    "- runtime comparison might not be fair, because we let HOT-SAX search for all discords\n",
    "  - acutally, we know the number of anomalies in the dataset\n",
    "  - just letting HOT-SAX search for a specific number of anomalies result in bad scores, because two discords could sit within the same anomaly window"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "african-sacramento",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"HOT SAX\"\n",
    "dataset = \"sinus-diff-count-5\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "pharmaceutical-rings",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f\"Status {select(algo, 'rw-type-extremum', 'status')[0].item()}\")\n",
    "plot_scores(algo, \"rw-type-extremum\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "marine-polymer",
   "metadata": {},
   "source": [
    "### KNN\n",
    "\n",
    "- same issues as all point-based methods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "american-operations",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"KNN\"\n",
    "dataset = \"sinus-diff-count-5\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aquatic-spokesman",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-type-variance\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "alleged-badge",
   "metadata": {},
   "source": [
    "### OmniAnomaly"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "biological-right",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"OmniAnomaly\"\n",
    "dataset = \"sinus-type-platform\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "artistic-consideration",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "statistical-persian",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "alien-beginning",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-variance\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "neural-heading",
   "metadata": {},
   "source": [
    "### Median method\n",
    "\n",
    "- is good for poly and rw based datasets\n",
    "- has no way to capture a seasonal aspect and therefore fails for those datasets and anomalies that break the cycles\n",
    "- smaller window sizes (smaller contextes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "certain-harmony",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"MedianMethod\"\n",
    "dataset = \"sinus-type-platform\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "herbal-buddy",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-diff-count-5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fourth-replacement",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"poly-diff-count-5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "commercial-winter",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_tmp = df[(df[\"algorithm\"] == algo) & (df[\"dataset_name\"].str.startswith(\"poly\"))][[\"dataset_name\", \"execute_main_time\", \"RANGE_PR_AUC\", \"ROC_AUC\"]]\n",
    "df_tmp.sort_values(by=\"ROC_AUC\", ascending=False, inplace=True)\n",
    "df_tmp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "heavy-valve",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores([algo, \"STOMP\", \"Subsequence LOF\"], \"poly-diff-count-5\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "successful-penetration",
   "metadata": {},
   "source": [
    "### S-H-ESD\n",
    "\n",
    "- Annotates points\n",
    "- Cannot deal with non-periodic data (it even assumes timestamps)\n",
    "- Cannot deal with ECG data despite its periodicity\n",
    "- Cannot deal with trends in the signal"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "compatible-webcam",
   "metadata": {},
   "outputs": [],
   "source": [
    "algo = \"S-H-ESD\"\n",
    "dataset = \"sinus-diff-count-5\"\n",
    "print(f\"Execution Status: {select(algo, dataset, 'status')[0].item()}\")\n",
    "print(f\"Params: {select(algo, dataset, 'hyper_params')[0].item()}\")\n",
    "print(f\"Time:  {select(algo, dataset, 'train_main_time')[0].item()}/{select(algo, dataset, 'execute_main_time')[0].item()} seconds\")\n",
    "plot_scores(algo, dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afraid-cookbook",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-type-mean\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "imposed-daniel",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"sinus-position-middle\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "modified-boston",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"ecg-same-count-1\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "greater-racing",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_scores(algo, \"poly-type-extremum\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "human-reply",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
