{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MIT-BIH Arrhythmia Database (_mitdb_)\n",
    "\n",
    "Part of the ECG Database Collection:\n",
    "\n",
    "| Short Name | Long Name |\n",
    "| :--- | :--- |\n",
    "| _mitdb_ | MIT-BIH Arrhythmia Database |\n",
    "| _svdb_ | MIT-BIH Supraventricular Arrhythmia Database |\n",
    "| _ltdb_ | MIT-BIH Long-Term ECG Database |\n",
    "\n",
    "[Docu](https://wfdb.readthedocs.io/en/latest) of the `wfdb`-package."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import wfdb\n",
    "import os\n",
    "from typing import Final\n",
    "from collections.abc import Callable\n",
    "import matplotlib.pyplot as plt\n",
    "from config import data_raw_folder, data_processed_folder\n",
    "from timeeval import Datasets\n",
    "from IPython.display import display, Markdown, Latex"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking for source datasets in /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB and\n",
      "saving processed datasets in /home/projects/akita/data/benchmark-data/data-processed\n"
     ]
    }
   ],
   "source": [
    "dataset_collection_name = \"MITDB\"\n",
    "source_folder = os.path.join(data_raw_folder, \"MIT-BIH Arrhythmia DB\")\n",
    "target_folder = data_processed_folder\n",
    "\n",
    "from pathlib import Path\n",
    "print(f\"Looking for source datasets in {Path(source_folder).absolute()} and\\nsaving processed datasets in {Path(target_folder).absolute()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_dataset_names() -> list[str]:\n",
    "    with open(os.path.join(source_folder, \"RECORDS\"), 'r') as f:\n",
    "        records = [l.rstrip('\\n') for l in f]\n",
    "    return records"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "ann_normal = [\"N\", \"/\", \"L\", \"R\"]\n",
    "ann_beat = [\"F\", \"f\", \"S\", \"A\", \"a\", \"V\", \"J\", \"j\", \"E\", \"e\"]\n",
    "ann_no_beat = [\"x\"]\n",
    "ann_fibr_start = \"[\"\n",
    "ann_fibr_end = \"]\"\n",
    "ann_fibr = [ann_fibr_start, \"!\", ann_fibr_end]\n",
    "ann_ext = [\"Q\", \"|\"]\n",
    "ann_ignore = [\"+\", \"~\", '\"']\n",
    "\n",
    "def transform_and_label(source_file: str, target: str) -> int:\n",
    "    print(f\"Transforming {os.path.basename(source_file)}\")\n",
    "    # load dataset\n",
    "    record = wfdb.rdrecord(source_file)\n",
    "    df_record = pd.DataFrame(record.p_signal, columns=record.sig_name)\n",
    "    print(f\"  record {record.file_name[0]} loaded\")\n",
    "\n",
    "    # load annotation file\n",
    "    atr = wfdb.rdann(source_file, \"atr\")\n",
    "    assert record.fs == atr.fs, \"Sample frequency of records and annotations does not match!\"\n",
    "    df_annotation = pd.DataFrame({\"position\": atr.sample, \"label\": atr.symbol})\n",
    "    # remove ignored annotations\n",
    "    df_annotation = df_annotation[~df_annotation[\"label\"].isin(ann_ignore)]\n",
    "    df_annotation = df_annotation.reset_index(drop=True)\n",
    "    print(f\"  {len(df_annotation)}/{atr.ann_len} beat annotations for {source_file} loaded (others were ignored)\")\n",
    "\n",
    "    # calculate normal beat length\n",
    "    print(\"  preparing windows for labeling...\")\n",
    "    df_normal_beat = df_annotation.copy()\n",
    "    df_normal_beat[\"prev_position\"] = df_annotation[\"position\"].shift()\n",
    "    df_normal_beat[\"prev_label\"] = df_annotation[\"label\"].shift()\n",
    "    df_normal_beat = df_normal_beat[(df_normal_beat[\"label\"].isin(ann_normal)) & (df_normal_beat[\"prev_label\"].isin(ann_normal))]\n",
    "    df_normal_beat = df_normal_beat.drop(columns=[\"label\", \"prev_label\"])\n",
    "    s_normal_beat_lengths = df_normal_beat[\"position\"] - df_normal_beat[\"prev_position\"]\n",
    "    print(f\"    normal beat distance samples = {len(s_normal_beat_lengths)}\")\n",
    "    normal_beat_length = s_normal_beat_lengths.median()\n",
    "    if (normal_beat_length % 2) == 0:\n",
    "        normal_beat_length += 1\n",
    "    beat_window_size = int(normal_beat_length)\n",
    "    beat_window_margin = (beat_window_size - 1)//2\n",
    "    del df_normal_beat\n",
    "    del s_normal_beat_lengths\n",
    "    print(f\"    window size = {beat_window_size}\")\n",
    "    print(f\"    window margins (left and right) = {beat_window_margin}\")\n",
    "\n",
    "    # calculate beat windows\n",
    "    ## ~ and other annotations are ignored!\n",
    "    ## for fibrillation\n",
    "    # we only need start and end marked with `[` and `]` respectively\n",
    "    s_fibr_start = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_start, \"position\"]\n",
    "    s_index = s_fibr_start.index\n",
    "    s_fibr_start = s_fibr_start.reset_index(drop=True)\n",
    "    s_fibr_end = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_end, \"position\"]\n",
    "    s_fibr_end = s_fibr_end.reset_index(drop=True)\n",
    "    df_fibr = pd.DataFrame({\"index\": s_index, \"window_start\": s_fibr_start, \"window_end\": s_fibr_end})\n",
    "    df_fibr = df_fibr.set_index(\"index\")\n",
    "    df_fibr[\"position\"] = df_fibr[\"window_start\"]\n",
    "    print(f\"    {len(df_fibr)} windows for fibrillation anomalies ({','.join(ann_fibr)})\")\n",
    "    ## for external anomalies\n",
    "    df_ext = df_annotation[df_annotation[\"label\"].isin(ann_ext)].copy()\n",
    "    df_ext[\"window_start\"] = np.maximum(0, df_ext[\"position\"]-beat_window_margin)\n",
    "    df_ext[\"window_end\"] = np.minimum(record.sig_len - 1, df_ext[\"position\"]+beat_window_margin)\n",
    "    df_ext = df_ext[[\"position\", \"window_start\", \"window_end\"]]\n",
    "    print(f\"    {len(df_ext)} windows for external anomalies ({','.join(ann_ext)})\")\n",
    "    ## anomalous beats\n",
    "    # exclude additional non-beat annotations\n",
    "    df_svf = df_annotation[~df_annotation[\"label\"].isin([\"|\", ann_fibr_start, ann_fibr_end])].copy()\n",
    "    df_svf[\"position_next\"] = df_svf[\"position\"].shift(-1)\n",
    "    df_svf[\"position_prev\"] = df_svf[\"position\"].shift(1)\n",
    "    #df_svf = df_svf[(df_svf[\"position_prev\"].notnull()) & (df_svf[\"position_next\"].notnull())]\n",
    "    df_svf = df_svf[df_svf[\"label\"].isin(ann_beat)]\n",
    "    df_svf[\"window_start\"] = np.maximum(0, np.minimum(df_svf[\"position\"].values-beat_window_margin, df_svf[\"position_prev\"].values+beat_window_margin))\n",
    "    df_svf[\"window_end\"] = np.minimum(record.sig_len - 1, np.maximum(df_svf[\"position\"].values+beat_window_margin, df_svf[\"position_next\"].values-beat_window_margin))\n",
    "    df_svf = df_svf[[\"position\", \"window_start\", \"window_end\"]]\n",
    "    print(f\"    {len(df_svf)} windows for anomalous beats ({','.join(ann_beat)})\")\n",
    "    # missing beats\n",
    "    df_no_beat = df_annotation[df_annotation[\"label\"].isin(ann_no_beat)].drop(columns=[\"label\"]).copy()\n",
    "    df_no_beat[\"window_start\"] = df_no_beat[\"position\"]\n",
    "    if not df_no_beat.empty:\n",
    "        df_normal_windows = df_annotation[df_annotation[\"label\"].isin(ann_normal)].copy()\n",
    "        df_normal_windows = df_normal_windows.drop(columns=[\"label\"])\n",
    "        df_normal_windows[\"window_start\"] = np.maximum(0, df_normal_windows[\"position\"]-beat_window_margin)\n",
    "        df_normal_windows[\"window_end\"] = np.minimum(record.sig_len - 1, df_normal_windows[\"position\"]+beat_window_margin)\n",
    "        df_lut = df_annotation[~df_annotation[\"label\"].isin(ann_no_beat)].merge(pd.concat([df_ext, df_svf, df_fibr, df_normal_windows]), on=\"position\", how=\"left\")\n",
    "        def find_next_window_start(pos: int):\n",
    "            next_window_start = df_lut.loc[df_lut[\"position\"] > pos, \"window_start\"].iloc[0]\n",
    "            return max(pos, next_window_start)\n",
    "        df_no_beat[\"window_end\"] = df_no_beat[\"position\"].transform(find_next_window_start)\n",
    "        del df_normal_windows\n",
    "        del df_lut\n",
    "    else:\n",
    "        df_no_beat[\"window_end\"] = df_no_beat[\"position\"]\n",
    "    print(f\"    {len(df_no_beat)} windows for missing beats ({','.join(ann_no_beat)})\")\n",
    "    ## merge\n",
    "    df_windows = pd.concat([df_ext, df_svf, df_fibr, df_no_beat])\n",
    "    df_windows.sort_index(inplace=True)\n",
    "    print(f\"  ...done.\")\n",
    "\n",
    "    # add labels based on anomaly windows\n",
    "    print(\"  labeling\")\n",
    "    df_record[\"is_anomaly\"] = 0\n",
    "    for _, (_, t1, t2) in df_windows.iterrows():\n",
    "        tmp = df_record[df_record.index >= t1]\n",
    "        tmp = tmp[tmp.index <= t2]\n",
    "        df_record[\"is_anomaly\"].values[tmp.index] = 1\n",
    "    del tmp\n",
    "\n",
    "    # reconstruct timestamps and set as index\n",
    "    print(\"  reconstructing timestamps\")\n",
    "    df_record[\"timestamp\"] = pd.to_datetime(df_record.index.values * 1e+9/record.fs, unit='ns')\n",
    "    df_record = df_record.set_index(\"timestamp\")\n",
    "    df_record.to_csv(target)\n",
    "    print(f\"Dataset {os.path.basename(source_file)} transformed and saved!\")\n",
    "    \n",
    "    # return dataset length\n",
    "    return record.sig_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Directories /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB already exist\n"
     ]
    }
   ],
   "source": [
    "# shared by all datasets\n",
    "dataset_type = \"real\"\n",
    "input_type = \"multivariate\"\n",
    "datetime_index = True\n",
    "train_type = \"unsupervised\"\n",
    "train_is_normal = False\n",
    "\n",
    "# create target directory\n",
    "dataset_subfolder = os.path.join(input_type, dataset_collection_name)\n",
    "target_subfolder = os.path.join(target_folder, dataset_subfolder)\n",
    "try:\n",
    "    os.makedirs(target_subfolder)\n",
    "    print(f\"Created directories {target_subfolder}\")\n",
    "except FileExistsError:\n",
    "    print(f\"Directories {target_subfolder} already exist\")\n",
    "    pass\n",
    "\n",
    "dm = Datasets(target_folder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Transforming 100\n",
      "  record 100.dat loaded\n",
      "  2273/2274 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/100 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2204\n",
      "    window size = 287\n",
      "    window margins (left and right) = 143\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    34 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 100 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/100 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/100.test.csv\n",
      "Transforming 101\n",
      "  record 101.dat loaded\n",
      "  1869/1874 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/101 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1851\n",
      "    window size = 353\n",
      "    window margins (left and right) = 176\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    6 windows for external anomalies (Q,|)\n",
      "    3 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 101 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/101 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/101.test.csv\n",
      "Transforming 102\n",
      "  record 102.dat loaded\n",
      "  2187/2192 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/102 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2099\n",
      "    window size = 297\n",
      "    window margins (left and right) = 148\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    60 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 102 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/102 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/102.test.csv\n",
      "Transforming 103\n",
      "  record 103.dat loaded\n",
      "  2084/2091 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/103 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2079\n",
      "    window size = 313\n",
      "    window margins (left and right) = 156\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    2 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 103 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/103 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/103.test.csv\n",
      "Transforming 104\n",
      "  record 104.dat loaded\n",
      "  2229/2311 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/104 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1324\n",
      "    window size = 291\n",
      "    window margins (left and right) = 145\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    18 windows for external anomalies (Q,|)\n",
      "    668 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 104 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/104 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/104.test.csv\n",
      "Transforming 105\n",
      "  record 105.dat loaded\n",
      "  2602/2691 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/105 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2450\n",
      "    window size = 253\n",
      "    window margins (left and right) = 126\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    35 windows for external anomalies (Q,|)\n",
      "    41 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 105 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/105 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/105.test.csv\n",
      "Transforming 106\n",
      "  record 106.dat loaded\n",
      "  2027/2098 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/106 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1083\n",
      "    window size = 345\n",
      "    window margins (left and right) = 172\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    520 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 106 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/106 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/106.test.csv\n",
      "Transforming 107\n",
      "  record 107.dat loaded\n",
      "  2137/2140 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/107 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2018\n",
      "    window size = 307\n",
      "    window margins (left and right) = 153\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    59 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 107 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/107 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/107.test.csv\n",
      "Transforming 108\n",
      "  record 108.dat loaded\n",
      "  1782/1824 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/108 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1708\n",
      "    window size = 371\n",
      "    window margins (left and right) = 185\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    8 windows for external anomalies (Q,|)\n",
      "    24 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    11 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 108 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/108 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/108.test.csv\n",
      "Transforming 109\n",
      "  record 109.dat loaded\n",
      "  2532/2535 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/109 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2451\n",
      "    window size = 257\n",
      "    window margins (left and right) = 128\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    40 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 109 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/109 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/109.test.csv\n",
      "Transforming 111\n",
      "  record 111.dat loaded\n",
      "  2124/2133 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/111 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2121\n",
      "    window size = 307\n",
      "    window margins (left and right) = 153\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    1 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 111 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/111 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/111.test.csv\n",
      "Transforming 112\n",
      "  record 112.dat loaded\n",
      "  2539/2550 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/112 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2534\n",
      "    window size = 257\n",
      "    window margins (left and right) = 128\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    2 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 112 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/112 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/112.test.csv\n",
      "Transforming 113\n",
      "  record 113.dat loaded\n",
      "  1795/1796 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/113 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1782\n",
      "    window size = 365\n",
      "    window margins (left and right) = 182\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    6 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 113 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/113 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/113.test.csv\n",
      "Transforming 114\n",
      "  record 114.dat loaded\n",
      "  1880/1890 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/114 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1767\n",
      "    window size = 341\n",
      "    window margins (left and right) = 170\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    59 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 114 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/114 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/114.test.csv\n",
      "Transforming 115\n",
      "  record 115.dat loaded\n",
      "  1959/1962 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/115 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1947\n",
      "    window size = 329\n",
      "    window margins (left and right) = 164\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    6 windows for external anomalies (Q,|)\n",
      "    0 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 115 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/115 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/115.test.csv\n",
      "Transforming 116\n",
      "  record 116.dat loaded\n",
      "  2412/2421 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/116 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2193\n",
      "    window size = 269\n",
      "    window margins (left and right) = 134\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    110 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 116 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/116 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/116.test.csv\n",
      "Transforming 117\n",
      "  record 117.dat loaded\n",
      "  1535/1539 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/117 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1532\n",
      "    window size = 425\n",
      "    window margins (left and right) = 212\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    1 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 117 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/117 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/117.test.csv\n",
      "Transforming 118\n",
      "  record 118.dat loaded\n",
      "  2288/2301 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/118 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2047\n",
      "    window size = 291\n",
      "    window margins (left and right) = 145\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    112 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    10 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 118 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/118 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/118.test.csv\n",
      "Transforming 119\n",
      "  record 119.dat loaded\n",
      "  1987/2094 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/119 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1098\n",
      "    window size = 325\n",
      "    window margins (left and right) = 162\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    444 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 119 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/119 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/119.test.csv\n",
      "Transforming 121\n",
      "  record 121.dat loaded\n",
      "  1863/1876 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/121 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1858\n",
      "    window size = 359\n",
      "    window margins (left and right) = 179\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    2 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 121 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/121 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/121.test.csv\n",
      "Transforming 122\n",
      "  record 122.dat loaded\n",
      "  2478/2479 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/122 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2473\n",
      "    window size = 263\n",
      "    window margins (left and right) = 131\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    2 windows for external anomalies (Q,|)\n",
      "    0 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 122 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/122 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/122.test.csv\n",
      "Transforming 123\n",
      "  record 123.dat loaded\n",
      "  1518/1519 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/123 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1511\n",
      "    window size = 433\n",
      "    window margins (left and right) = 216\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    3 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 123 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/123 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/123.test.csv\n",
      "Transforming 124\n",
      "  record 124.dat loaded\n",
      "  1619/1634 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/124 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1509\n",
      "    window size = 405\n",
      "    window margins (left and right) = 202\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    88 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 124 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/124 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/124.test.csv\n",
      "Transforming 200\n",
      "  record 200.dat loaded\n",
      "  2601/2792 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/200 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 945\n",
      "    window size = 243\n",
      "    window margins (left and right) = 121\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    858 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 200 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/200 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/200.test.csv\n",
      "Transforming 201\n",
      "  record 201.dat loaded\n",
      "  2000/2039 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/201 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1295\n",
      "    window size = 267\n",
      "    window margins (left and right) = 133\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    338 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    37 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 201 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/201 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/201.test.csv\n",
      "Transforming 202\n",
      "  record 202.dat loaded\n",
      "  2138/2146 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/202 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2001\n",
      "    window size = 323\n",
      "    window margins (left and right) = 161\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    2 windows for external anomalies (Q,|)\n",
      "    75 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 202 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/202 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/202.test.csv\n",
      "Transforming 203\n",
      "  record 203.dat loaded\n",
      "  3006/3108 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/203 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2184\n",
      "    window size = 233\n",
      "    window margins (left and right) = 116\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    30 windows for external anomalies (Q,|)\n",
      "    447 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 203 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/203 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/203.test.csv\n",
      "Transforming 205\n",
      "  record 205.dat loaded\n",
      "  2657/2672 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/205 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2528\n",
      "    window size = 243\n",
      "    window margins (left and right) = 121\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    85 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 205 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/205 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/205.test.csv\n",
      "Transforming 207\n",
      "  record 207.dat loaded\n",
      "  2346/2385 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/207 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1438\n",
      "    window size = 337\n",
      "    window margins (left and right) = 168\n",
      "    6 windows for fibrillation anomalies ([,!,])\n",
      "    2 windows for external anomalies (Q,|)\n",
      "    317 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 207 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/207 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/207.test.csv\n",
      "Transforming 208\n",
      "  record 208.dat loaded\n",
      "  2963/3040 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/208 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 689\n",
      "    window size = 209\n",
      "    window margins (left and right) = 104\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    10 windows for external anomalies (Q,|)\n",
      "    1367 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 208 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/208 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/208.test.csv\n",
      "Transforming 209\n",
      "  record 209.dat loaded\n",
      "  3012/3052 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/209 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2481\n",
      "    window size = 227\n",
      "    window margins (left and right) = 113\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    7 windows for external anomalies (Q,|)\n",
      "    384 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 209 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/209 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/209.test.csv\n",
      "Transforming 210\n",
      "  record 210.dat loaded\n",
      "  2651/2685 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/210 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2226\n",
      "    window size = 249\n",
      "    window margins (left and right) = 124\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    227 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 210 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/210 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/210.test.csv\n",
      "Transforming 212\n",
      "  record 212.dat loaded\n",
      "  2749/2763 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/212 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2746\n",
      "    window size = 237\n",
      "    window margins (left and right) = 118\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    0 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 212 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/212 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/212.test.csv\n",
      "Transforming 213\n",
      "  record 213.dat loaded\n",
      "  3251/3294 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/213 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2051\n",
      "    window size = 201\n",
      "    window margins (left and right) = 100\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    610 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 213 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/213 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/213.test.csv\n",
      "Transforming 214\n",
      "  record 214.dat loaded\n",
      "  2267/2297 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/214 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1753\n",
      "    window size = 285\n",
      "    window margins (left and right) = 142\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    7 windows for external anomalies (Q,|)\n",
      "    257 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 214 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/214 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/214.test.csv\n",
      "Transforming 215\n",
      "  record 215.dat loaded\n",
      "  3363/3400 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/215 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 3050\n",
      "    window size = 193\n",
      "    window margins (left and right) = 96\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    168 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 215 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/215 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/215.test.csv\n",
      "Transforming 217\n",
      "  record 217.dat loaded\n",
      "  2209/2280 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/217 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1561\n",
      "    window size = 301\n",
      "    window margins (left and right) = 150\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    422 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 217 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/217 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/217.test.csv\n",
      "Transforming 219\n",
      "  record 219.dat loaded\n",
      "  2287/2312 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/219 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1882\n",
      "    window size = 287\n",
      "    window margins (left and right) = 143\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    72 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    133 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 219 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/219 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/219.test.csv\n",
      "Transforming 220\n",
      "  record 220.dat loaded\n",
      "  2048/2069 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/220 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1893\n",
      "    window size = 323\n",
      "    window margins (left and right) = 161\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    94 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 220 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/220 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/220.test.csv\n",
      "Transforming 221\n",
      "  record 221.dat loaded\n",
      "  2427/2462 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/221 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1641\n",
      "    window size = 257\n",
      "    window margins (left and right) = 128\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    396 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 221 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/221 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/221.test.csv\n",
      "Transforming 222\n",
      "  record 222.dat loaded\n",
      "  2483/2634 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/222 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1896\n",
      "    window size = 271\n",
      "    window margins (left and right) = 135\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    421 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 222 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/222 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/222.test.csv\n",
      "Transforming 223\n",
      "  record 223.dat loaded\n",
      "  2605/2643 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/223 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1684\n",
      "    window size = 253\n",
      "    window margins (left and right) = 126\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    576 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 223 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/223 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/223.test.csv\n",
      "Transforming 228\n",
      "  record 228.dat loaded\n",
      "  2077/2141 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/228 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1310\n",
      "    window size = 315\n",
      "    window margins (left and right) = 157\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    24 windows for external anomalies (Q,|)\n",
      "    365 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 228 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/228 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/228.test.csv\n",
      "Transforming 230\n",
      "  record 230.dat loaded\n",
      "  2257/2466 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/230 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2252\n",
      "    window size = 287\n",
      "    window margins (left and right) = 143\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    1 windows for external anomalies (Q,|)\n",
      "    1 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 230 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/230 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/230.test.csv\n",
      "Transforming 231\n",
      "  record 231.dat loaded\n",
      "  1573/2011 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/231 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1565\n",
      "    window size = 353\n",
      "    window margins (left and right) = 176\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    3 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    2 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 231 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/231 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/231.test.csv\n",
      "Transforming 232\n",
      "  record 232.dat loaded\n",
      "  1780/1816 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/232 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 121\n",
      "    window size = 505\n",
      "    window margins (left and right) = 252\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    1383 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 232 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/232 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/232.test.csv\n",
      "Transforming 233\n",
      "  record 233.dat loaded\n",
      "  3081/3152 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/233 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 1454\n",
      "    window size = 213\n",
      "    window margins (left and right) = 106\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    2 windows for external anomalies (Q,|)\n",
      "    849 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 233 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/233 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/233.test.csv\n",
      "Transforming 234\n",
      "  record 234.dat loaded\n",
      "  2753/2764 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/234 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 2695\n",
      "    window size = 237\n",
      "    window margins (left and right) = 118\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    53 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 234 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Arrhythmia DB/234 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/MITDB/234.test.csv\n"
     ]
    }
   ],
   "source": [
    "# dataset transformation\n",
    "transform_file: Callable[[str, str], int] = transform_and_label\n",
    "\n",
    "for dataset_name in load_dataset_names():\n",
    "    # intentionally no file suffix (.dat)\n",
    "    source_file = os.path.join(source_folder, dataset_name)\n",
    "    filename = f\"{dataset_name}.test.csv\"\n",
    "    path = os.path.join(dataset_subfolder, filename)\n",
    "    target_filepath = os.path.join(target_subfolder, filename)\n",
    "            \n",
    "    # transform file and label it\n",
    "    dataset_length = transform_file(source_file, target_filepath)\n",
    "    print(f\"Processed source dataset {source_file} -> {target_filepath}\")\n",
    "\n",
    "    # save metadata\n",
    "    dm.add_dataset((dataset_collection_name, dataset_name),\n",
    "        train_path = None,\n",
    "        test_path = path,\n",
    "        dataset_type = dataset_type,\n",
    "        datetime_index = datetime_index,\n",
    "        split_at = None,\n",
    "        train_type = train_type,\n",
    "        train_is_normal = train_is_normal,\n",
    "        input_type = input_type,\n",
    "        dataset_length = dataset_length\n",
    "    )\n",
    "\n",
    "# save metadata of benchmark\n",
    "dm.save()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>train_path</th>\n",
       "      <th>test_path</th>\n",
       "      <th>dataset_type</th>\n",
       "      <th>datetime_index</th>\n",
       "      <th>split_at</th>\n",
       "      <th>train_type</th>\n",
       "      <th>train_is_normal</th>\n",
       "      <th>input_type</th>\n",
       "      <th>length</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>collection_name</th>\n",
       "      <th>dataset_name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"48\" valign=\"top\">MITDB</th>\n",
       "      <th>100</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/100.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>101</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/101.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>102</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/102.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>103</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/103.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>104</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/104.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>105</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/105.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>106</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/106.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/107.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>108</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/108.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>109</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/109.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>111</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/111.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>112</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/112.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>113</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/113.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>114</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/114.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>115</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/115.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>116</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/116.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>117</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/117.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>118</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/118.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>119</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/119.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>121</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/121.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>122</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/122.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>123</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/123.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>124</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/124.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>200</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/200.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>201</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/201.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>202</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/202.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>203</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/203.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>205</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/205.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>207</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/207.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>208</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/208.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>209</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/209.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>210</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/210.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>212</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/212.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>213</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/213.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/214.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>215</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/215.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>217</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/217.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>219</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/219.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>220</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/220.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>221</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/221.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>222</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/222.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>223</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/223.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>228</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/228.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>230</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/230.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>231</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/231.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>232</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/232.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>233</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/233.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>234</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/MITDB/234.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>650000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                             train_path                        test_path  \\\n",
       "collection_name dataset_name                                               \n",
       "MITDB           100                 NaN  multivariate/MITDB/100.test.csv   \n",
       "                101                 NaN  multivariate/MITDB/101.test.csv   \n",
       "                102                 NaN  multivariate/MITDB/102.test.csv   \n",
       "                103                 NaN  multivariate/MITDB/103.test.csv   \n",
       "                104                 NaN  multivariate/MITDB/104.test.csv   \n",
       "                105                 NaN  multivariate/MITDB/105.test.csv   \n",
       "                106                 NaN  multivariate/MITDB/106.test.csv   \n",
       "                107                 NaN  multivariate/MITDB/107.test.csv   \n",
       "                108                 NaN  multivariate/MITDB/108.test.csv   \n",
       "                109                 NaN  multivariate/MITDB/109.test.csv   \n",
       "                111                 NaN  multivariate/MITDB/111.test.csv   \n",
       "                112                 NaN  multivariate/MITDB/112.test.csv   \n",
       "                113                 NaN  multivariate/MITDB/113.test.csv   \n",
       "                114                 NaN  multivariate/MITDB/114.test.csv   \n",
       "                115                 NaN  multivariate/MITDB/115.test.csv   \n",
       "                116                 NaN  multivariate/MITDB/116.test.csv   \n",
       "                117                 NaN  multivariate/MITDB/117.test.csv   \n",
       "                118                 NaN  multivariate/MITDB/118.test.csv   \n",
       "                119                 NaN  multivariate/MITDB/119.test.csv   \n",
       "                121                 NaN  multivariate/MITDB/121.test.csv   \n",
       "                122                 NaN  multivariate/MITDB/122.test.csv   \n",
       "                123                 NaN  multivariate/MITDB/123.test.csv   \n",
       "                124                 NaN  multivariate/MITDB/124.test.csv   \n",
       "                200                 NaN  multivariate/MITDB/200.test.csv   \n",
       "                201                 NaN  multivariate/MITDB/201.test.csv   \n",
       "                202                 NaN  multivariate/MITDB/202.test.csv   \n",
       "                203                 NaN  multivariate/MITDB/203.test.csv   \n",
       "                205                 NaN  multivariate/MITDB/205.test.csv   \n",
       "                207                 NaN  multivariate/MITDB/207.test.csv   \n",
       "                208                 NaN  multivariate/MITDB/208.test.csv   \n",
       "                209                 NaN  multivariate/MITDB/209.test.csv   \n",
       "                210                 NaN  multivariate/MITDB/210.test.csv   \n",
       "                212                 NaN  multivariate/MITDB/212.test.csv   \n",
       "                213                 NaN  multivariate/MITDB/213.test.csv   \n",
       "                214                 NaN  multivariate/MITDB/214.test.csv   \n",
       "                215                 NaN  multivariate/MITDB/215.test.csv   \n",
       "                217                 NaN  multivariate/MITDB/217.test.csv   \n",
       "                219                 NaN  multivariate/MITDB/219.test.csv   \n",
       "                220                 NaN  multivariate/MITDB/220.test.csv   \n",
       "                221                 NaN  multivariate/MITDB/221.test.csv   \n",
       "                222                 NaN  multivariate/MITDB/222.test.csv   \n",
       "                223                 NaN  multivariate/MITDB/223.test.csv   \n",
       "                228                 NaN  multivariate/MITDB/228.test.csv   \n",
       "                230                 NaN  multivariate/MITDB/230.test.csv   \n",
       "                231                 NaN  multivariate/MITDB/231.test.csv   \n",
       "                232                 NaN  multivariate/MITDB/232.test.csv   \n",
       "                233                 NaN  multivariate/MITDB/233.test.csv   \n",
       "                234                 NaN  multivariate/MITDB/234.test.csv   \n",
       "\n",
       "                             dataset_type  datetime_index  split_at  \\\n",
       "collection_name dataset_name                                          \n",
       "MITDB           100                  real            True       NaN   \n",
       "                101                  real            True       NaN   \n",
       "                102                  real            True       NaN   \n",
       "                103                  real            True       NaN   \n",
       "                104                  real            True       NaN   \n",
       "                105                  real            True       NaN   \n",
       "                106                  real            True       NaN   \n",
       "                107                  real            True       NaN   \n",
       "                108                  real            True       NaN   \n",
       "                109                  real            True       NaN   \n",
       "                111                  real            True       NaN   \n",
       "                112                  real            True       NaN   \n",
       "                113                  real            True       NaN   \n",
       "                114                  real            True       NaN   \n",
       "                115                  real            True       NaN   \n",
       "                116                  real            True       NaN   \n",
       "                117                  real            True       NaN   \n",
       "                118                  real            True       NaN   \n",
       "                119                  real            True       NaN   \n",
       "                121                  real            True       NaN   \n",
       "                122                  real            True       NaN   \n",
       "                123                  real            True       NaN   \n",
       "                124                  real            True       NaN   \n",
       "                200                  real            True       NaN   \n",
       "                201                  real            True       NaN   \n",
       "                202                  real            True       NaN   \n",
       "                203                  real            True       NaN   \n",
       "                205                  real            True       NaN   \n",
       "                207                  real            True       NaN   \n",
       "                208                  real            True       NaN   \n",
       "                209                  real            True       NaN   \n",
       "                210                  real            True       NaN   \n",
       "                212                  real            True       NaN   \n",
       "                213                  real            True       NaN   \n",
       "                214                  real            True       NaN   \n",
       "                215                  real            True       NaN   \n",
       "                217                  real            True       NaN   \n",
       "                219                  real            True       NaN   \n",
       "                220                  real            True       NaN   \n",
       "                221                  real            True       NaN   \n",
       "                222                  real            True       NaN   \n",
       "                223                  real            True       NaN   \n",
       "                228                  real            True       NaN   \n",
       "                230                  real            True       NaN   \n",
       "                231                  real            True       NaN   \n",
       "                232                  real            True       NaN   \n",
       "                233                  real            True       NaN   \n",
       "                234                  real            True       NaN   \n",
       "\n",
       "                                train_type  train_is_normal    input_type  \\\n",
       "collection_name dataset_name                                                \n",
       "MITDB           100           unsupervised            False  multivariate   \n",
       "                101           unsupervised            False  multivariate   \n",
       "                102           unsupervised            False  multivariate   \n",
       "                103           unsupervised            False  multivariate   \n",
       "                104           unsupervised            False  multivariate   \n",
       "                105           unsupervised            False  multivariate   \n",
       "                106           unsupervised            False  multivariate   \n",
       "                107           unsupervised            False  multivariate   \n",
       "                108           unsupervised            False  multivariate   \n",
       "                109           unsupervised            False  multivariate   \n",
       "                111           unsupervised            False  multivariate   \n",
       "                112           unsupervised            False  multivariate   \n",
       "                113           unsupervised            False  multivariate   \n",
       "                114           unsupervised            False  multivariate   \n",
       "                115           unsupervised            False  multivariate   \n",
       "                116           unsupervised            False  multivariate   \n",
       "                117           unsupervised            False  multivariate   \n",
       "                118           unsupervised            False  multivariate   \n",
       "                119           unsupervised            False  multivariate   \n",
       "                121           unsupervised            False  multivariate   \n",
       "                122           unsupervised            False  multivariate   \n",
       "                123           unsupervised            False  multivariate   \n",
       "                124           unsupervised            False  multivariate   \n",
       "                200           unsupervised            False  multivariate   \n",
       "                201           unsupervised            False  multivariate   \n",
       "                202           unsupervised            False  multivariate   \n",
       "                203           unsupervised            False  multivariate   \n",
       "                205           unsupervised            False  multivariate   \n",
       "                207           unsupervised            False  multivariate   \n",
       "                208           unsupervised            False  multivariate   \n",
       "                209           unsupervised            False  multivariate   \n",
       "                210           unsupervised            False  multivariate   \n",
       "                212           unsupervised            False  multivariate   \n",
       "                213           unsupervised            False  multivariate   \n",
       "                214           unsupervised            False  multivariate   \n",
       "                215           unsupervised            False  multivariate   \n",
       "                217           unsupervised            False  multivariate   \n",
       "                219           unsupervised            False  multivariate   \n",
       "                220           unsupervised            False  multivariate   \n",
       "                221           unsupervised            False  multivariate   \n",
       "                222           unsupervised            False  multivariate   \n",
       "                223           unsupervised            False  multivariate   \n",
       "                228           unsupervised            False  multivariate   \n",
       "                230           unsupervised            False  multivariate   \n",
       "                231           unsupervised            False  multivariate   \n",
       "                232           unsupervised            False  multivariate   \n",
       "                233           unsupervised            False  multivariate   \n",
       "                234           unsupervised            False  multivariate   \n",
       "\n",
       "                              length  \n",
       "collection_name dataset_name          \n",
       "MITDB           100           650000  \n",
       "                101           650000  \n",
       "                102           650000  \n",
       "                103           650000  \n",
       "                104           650000  \n",
       "                105           650000  \n",
       "                106           650000  \n",
       "                107           650000  \n",
       "                108           650000  \n",
       "                109           650000  \n",
       "                111           650000  \n",
       "                112           650000  \n",
       "                113           650000  \n",
       "                114           650000  \n",
       "                115           650000  \n",
       "                116           650000  \n",
       "                117           650000  \n",
       "                118           650000  \n",
       "                119           650000  \n",
       "                121           650000  \n",
       "                122           650000  \n",
       "                123           650000  \n",
       "                124           650000  \n",
       "                200           650000  \n",
       "                201           650000  \n",
       "                202           650000  \n",
       "                203           650000  \n",
       "                205           650000  \n",
       "                207           650000  \n",
       "                208           650000  \n",
       "                209           650000  \n",
       "                210           650000  \n",
       "                212           650000  \n",
       "                213           650000  \n",
       "                214           650000  \n",
       "                215           650000  \n",
       "                217           650000  \n",
       "                219           650000  \n",
       "                220           650000  \n",
       "                221           650000  \n",
       "                222           650000  \n",
       "                223           650000  \n",
       "                228           650000  \n",
       "                230           650000  \n",
       "                231           650000  \n",
       "                232           650000  \n",
       "                233           650000  \n",
       "                234           650000  "
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dm.refresh()\n",
    "dm.df().loc[(slice(dataset_collection_name,dataset_collection_name), slice(None))]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>timestamp</th>\n",
       "      <th>MLII</th>\n",
       "      <th>V1</th>\n",
       "      <th>is_anomaly</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1970-01-01 00:00:00.000000000</td>\n",
       "      <td>-0.215</td>\n",
       "      <td>0.095</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1970-01-01 00:00:00.002777777</td>\n",
       "      <td>-0.215</td>\n",
       "      <td>0.095</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1970-01-01 00:00:00.005555555</td>\n",
       "      <td>-0.215</td>\n",
       "      <td>0.095</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1970-01-01 00:00:00.008333333</td>\n",
       "      <td>-0.215</td>\n",
       "      <td>0.095</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1970-01-01 00:00:00.011111111</td>\n",
       "      <td>-0.215</td>\n",
       "      <td>0.095</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>649995</th>\n",
       "      <td>1970-01-01 00:30:05.541666666</td>\n",
       "      <td>-1.245</td>\n",
       "      <td>-0.540</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>649996</th>\n",
       "      <td>1970-01-01 00:30:05.544444444</td>\n",
       "      <td>-1.230</td>\n",
       "      <td>-0.525</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>649997</th>\n",
       "      <td>1970-01-01 00:30:05.547222222</td>\n",
       "      <td>-1.190</td>\n",
       "      <td>-0.465</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>649998</th>\n",
       "      <td>1970-01-01 00:30:05.550000000</td>\n",
       "      <td>-1.135</td>\n",
       "      <td>-0.400</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>649999</th>\n",
       "      <td>1970-01-01 00:30:05.552777777</td>\n",
       "      <td>-1.280</td>\n",
       "      <td>0.000</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>650000 rows × 4 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                           timestamp   MLII     V1  is_anomaly\n",
       "0      1970-01-01 00:00:00.000000000 -0.215  0.095           0\n",
       "1      1970-01-01 00:00:00.002777777 -0.215  0.095           0\n",
       "2      1970-01-01 00:00:00.005555555 -0.215  0.095           0\n",
       "3      1970-01-01 00:00:00.008333333 -0.215  0.095           0\n",
       "4      1970-01-01 00:00:00.011111111 -0.215  0.095           0\n",
       "...                              ...    ...    ...         ...\n",
       "649995 1970-01-01 00:30:05.541666666 -1.245 -0.540           0\n",
       "649996 1970-01-01 00:30:05.544444444 -1.230 -0.525           0\n",
       "649997 1970-01-01 00:30:05.547222222 -1.190 -0.465           0\n",
       "649998 1970-01-01 00:30:05.550000000 -1.135 -0.400           0\n",
       "649999 1970-01-01 00:30:05.552777777 -1.280  0.000           0\n",
       "\n",
       "[650000 rows x 4 columns]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dm.get_dataset_df((dataset_collection_name, \"207\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dataset transformation walk-through"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_obj_attr(obj, name=\"Object\"):\n",
    "    print(name)\n",
    "    tmp = vars(obj)\n",
    "    for key in tmp:\n",
    "        print(key, tmp[key])\n",
    "    print(\"\")\n",
    "records = load_dataset_names()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load and parse dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "records.index(\"219\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# dataset\n",
    "record = wfdb.rdrecord(os.path.join(source_folder, records[37]))\n",
    "#print_obj_attr(record, \"Record object\")\n",
    "\n",
    "df_record = pd.DataFrame(record.p_signal, columns=record.sig_name)\n",
    "df_record"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Add timestamp information based on sample interval ($$[fs] = samples/second$$):"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "display(Latex(f\"Samples per second: $$fs = {record.fs} \\\\frac{{1}}{{s}}$$\"))\n",
    "display(Markdown(f\"This gives a sample interval of {1e+9/record.fs} nanoseconds\"))\n",
    "df_record[\"timestamp\"] = pd.to_datetime(df_record.index.values * 1e+9/record.fs, unit='ns')\n",
    "df_record"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load and parse annotations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# find all annotations\n",
    "annotations = {}\n",
    "for r in records:\n",
    "    atr = wfdb.rdann(os.path.join(source_folder, r), \"atr\")\n",
    "    df_annotation = pd.DataFrame(atr.symbol, index=atr.sample, columns=[\"Label\"])\n",
    "    for an in df_annotation[\"Label\"].unique():\n",
    "        if an not in annotations:\n",
    "            annotations[an] = set()\n",
    "        annotations[an].add(atr.record_name)\n",
    "\n",
    "for an in annotations:\n",
    "    annotations[an] = \", \".join(annotations[an])\n",
    "annotations"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Annotations\n",
    "\n",
    "| Annotation | Description |\n",
    "| :--------- | :---------- |\n",
    "|| **Considered normal** |\n",
    "| `N` | Normal beat |\n",
    "| `/` | Paced beat (normal beat if pacemaker is used) |\n",
    "| `L` | Left bundle branch block beat (also normal?) |\n",
    "| `R` | Right bundle branch block beat (also normal?)  |\n",
    "|| **Anomalous beats** (use double-window labeling) |\n",
    "| `F` | Fusion of ventricular and normal beat |\n",
    "| `f` | Fusion of paced and normal beat |\n",
    "| `S` | Supraventricular premature or ectopic beat |\n",
    "| `A` | Atrial premature beat |\n",
    "| `a` | Aberrated atrial premature beat |\n",
    "| `V` | Premature ventricular contraction |\n",
    "| `J` | Nodal (junctional) premature beat |\n",
    "| `j` | Nodal (junctional) escape beat |\n",
    "| `E` | Ventricular escape beat |\n",
    "| `e` | Atrial escape beat |\n",
    "|| **Anomaly from `x` until next beat window start** |\n",
    "| `x` | Non-conducted P-wave (blocked APC) (no beat follows annotation) |\n",
    "|| **Entire section of fibrillation is regarded anomalous** (a single window from `[` to `]`) |\n",
    "| `[` | Start of ventricular flutter/fibrillation |\n",
    "| `!` | Ventricular flutter wave |\n",
    "| `]` | End of ventricular flutter/fibrillation |\n",
    "|| **External anomalies** (single window labeling) |\n",
    "| `Q` | Unclassifiable beat |\n",
    "| `\\|` | Isolated QRS-like artifact |\n",
    "|| **Ignored, bc hard to parse and to label** |\n",
    "| `+` | Rythm change |\n",
    "| `~` | Change in signal quality (usually noise level changes) |\n",
    "| `\"` | Tape slippage (unknown; variable length) |"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ann_normal = [\"N\", \"/\", \"L\", \"R\"]\n",
    "ann_beat = [\"F\", \"f\", \"S\", \"A\", \"a\", \"V\", \"J\", \"j\", \"E\", \"e\"]\n",
    "ann_no_beat = [\"x\"]\n",
    "ann_fibr_start = \"[\"\n",
    "ann_fibr_end = \"]\"\n",
    "ann_fibr = [ann_fibr_start, \"!\", ann_fibr_end]\n",
    "ann_ext = [\"Q\", \"|\"]\n",
    "ann_ignore = [\"+\", \"~\", '\"']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "atr = wfdb.rdann(os.path.join(source_folder, records[37]), \"atr\")\n",
    "#print_obj_attr(atr, \"Annotation object\")\n",
    "assert record.fs == atr.fs, \"Sample frequency of records and annotations does not match!\"\n",
    "\n",
    "df_annotation = pd.DataFrame(atr.symbol, index=atr.sample, columns=[\"Label\"])\n",
    "df_annotation = df_annotation.reset_index()\n",
    "df_annotation.columns = [\"position\", \"label\"]\n",
    "df_annotation.groupby(\"label\").count()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Remove ignored annotations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_annotation = df_annotation[~df_annotation[\"label\"].isin(ann_ignore)]\n",
    "df_annotation = df_annotation.reset_index(drop=True)\n",
    "df_annotation.groupby(\"label\").count()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Calculate beat window\n",
    "\n",
    "We assume that the normal beats (e.g. annotated with `N`) occur in a regular interval and that the expert annotations (from the dataset) are directly in the middle of a beat window.\n",
    "A beat window is a fixed length subsequence of the time series and shows a heart beat in its direct (local) context.\n",
    "\n",
    "We calculate the beat window length for each dataset based on the median distance between normal beats.\n",
    "The index (autoincrementing integers) serves as the measurement unit.\n",
    "\n",
    "Shifted-by-one self-join and filter out all beat-pairs that contain anomalous beats.\n",
    "We want to calculate the beat windows only based on the normal beats.\n",
    "We then calculate the distance between two neighboring heart beats:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_normal_beat = df_annotation.copy()\n",
    "df_normal_beat[\"prev_position\"] = df_annotation[\"position\"].shift()\n",
    "df_normal_beat[\"prev_label\"] = df_annotation[\"label\"].shift()\n",
    "df_normal_beat = df_normal_beat[(df_normal_beat[\"label\"].isin(ann_normal)) & (df_normal_beat[\"prev_label\"].isin(ann_normal))]\n",
    "df_normal_beat = df_normal_beat.drop(columns=[\"label\", \"prev_label\"])\n",
    "df_normal_beat[\"length\"] = df_normal_beat[\"position\"] - df_normal_beat[\"prev_position\"]\n",
    "df_normal_beat.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The median of all normal beat lengths is the beat window size.\n",
    "We require the beat window size to be odd.\n",
    "This allows us to center the window at the beat annotation."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "normal_beat_length = df_normal_beat[\"length\"].median()\n",
    "if (normal_beat_length%2) == 0:\n",
    "    normal_beat_length += 1\n",
    "beat_window_size = int(normal_beat_length)\n",
    "beat_window_margin = (beat_window_size - 1)//2\n",
    "print(f\"window size = {beat_window_size}\\nwindow margins (left and right) = {beat_window_margin}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Calculate anomalous windows\n",
    "\n",
    "The experts from PhysioNet annotated only the beats itself with a label, but the actual anomaly is also comprised of the beat surroundings.\n",
    "\n",
    "We assume that anomalous beats (such as `V` or `F`; see table above) require looking at a window around the actual beat as being anomalous.\n",
    "External anomalies (such as `|`; see table above) also mark a window around it as anomalous, because those artefacts comprise multiple points.\n",
    "\n",
    "We completely ignore `~`, `\"`, and `+`-annotations that indicate signal quality or rythm changes, because they are not relevant for our analysis.\n",
    "\n",
    "We automatically label a variable-sized window around an annotated beat as an anomalous subsequence using the following technique:\n",
    "\n",
    "1. For anomalous annotations (such as `S`, `V`, or `F` annotations):\n",
    "   - Remove `\"`, `~`, `+`, `[`, `]`, and `|` annotations\n",
    "   - Calculate anomaly window using `beat_window_size` aligned with its center on the beat annotation.\n",
    "   - Calculate end of previous beat window _e_ and beginning of next beat window _b_.\n",
    "     Use _e_ as beginning and _b_ as end for a second anomaly window.\n",
    "   - Mark the union of both anomaly windows' points as anomalous.\n",
    "2. For `|` and `Q` annotations, mark all points of an anomaly window centered on the annotation as anomalous.\n",
    "3. For `[`, `!`, and `]` annotations, mark all points within the region from `[` until `]` as anomalous.\n",
    "4. For `x` annotations, mark the annotated and all following points until the beginning of the next beat window as anomalous.\n",
    "5. Mark all other points as normal.\n",
    "\n",
    "> **Explain, why we used the combined windows for anomalous beats!!**\n",
    ">\n",
    "> - pattern/shape of signal may be ok\n",
    "> - but we consider distance to other beats also\n",
    "> - if too narrow or too far away, it's also anomalous\n",
    "\n",
    "The figure shows an anomalous beat with its anomaly window (in red) and the windows of its previous and subsequent normal beats (in green).\n",
    "We mark all points in the interval $$[min(W_{end}, X_{start}), max(X_{end}, Y_{start})]$$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "name = df_annotation[df_annotation[\"label\"] == ann_fibr_start].iloc[0].name\n",
    "df_annotation[df_annotation.index >= name]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# reverse lookup from timestamp to annotation index in df_beat\n",
    "p = df_record[df_record[\"timestamp\"] == \"1970-01-01 00:11:03.000\"].index.values[0]\n",
    "df_annotation[df_annotation[\"position\"] >= p].index[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_window(pos, color=\"blue\", **kvs):\n",
    "    start = pos - beat_window_margin\n",
    "    end = pos + beat_window_margin\n",
    "    plt.axvspan(start, end, color=color, alpha=0.5, **kvs)\n",
    "\n",
    "\n",
    "index = 39\n",
    "\n",
    "beat_n = df_annotation.loc[index, \"position\"]\n",
    "print(\"Selected beat is annotated as\", df_annotation.loc[index, \"label\"])\n",
    "print(\"with timestamp\", df_record.loc[beat_n, \"timestamp\"])\n",
    "ax = df_record.iloc[beat_n-1000:beat_n+1000].plot(kind='line', y=[\"MLII\", \"V1\"], use_index=True, figsize=(20,10))\n",
    "plot_window(df_annotation.loc[index-1, \"position\"], label=\"$W$\")\n",
    "plot_window(beat_n, color=\"orange\", label=\"$X$\")\n",
    "plot_window(df_annotation.loc[index+1, \"position\"], label=\"$Y$\")\n",
    "\n",
    "labels = df_annotation[(df_annotation[\"position\"] > beat_n-1000) & (df_annotation[\"position\"] < beat_n+1000)]\n",
    "for i, (position, label) in labels.iterrows():\n",
    "    plt.text(position, -1.2, label)\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Windows for fibrillation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# we only need start and end marked with `[` and `]` respectively\n",
    "s_fibr_start = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_start, \"position\"]\n",
    "s_index = s_fibr_start.index\n",
    "s_fibr_start = s_fibr_start.reset_index(drop=True)\n",
    "\n",
    "s_fibr_end = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_end, \"position\"]\n",
    "s_fibr_end = s_fibr_end.reset_index(drop=True)\n",
    "\n",
    "df_fibr = pd.DataFrame({\"index\": s_index, \"window_start\": s_fibr_start, \"window_end\": s_fibr_end})\n",
    "df_fibr = df_fibr.set_index(\"index\")\n",
    "df_fibr[\"position\"] = df_fibr[\"window_start\"]\n",
    "df_fibr"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Windows for external anomalies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_ext = df_annotation[df_annotation[\"label\"].isin(ann_ext)].copy()\n",
    "df_ext[\"window_start\"] = df_ext[\"position\"]-beat_window_margin\n",
    "df_ext[\"window_end\"] = df_ext[\"position\"]+beat_window_margin\n",
    "df_ext = df_ext[[\"position\", \"window_start\", \"window_end\"]]\n",
    "df_ext.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Windows for anomalous beats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# exclude additional non-beat annotations\n",
    "df_tmp = df_annotation[~df_annotation[\"label\"].isin([\"|\", ann_fibr_start, ann_fibr_end])].copy()\n",
    "df_tmp[\"position_next\"] = df_tmp[\"position\"].shift(-1)\n",
    "df_tmp[\"position_prev\"] = df_tmp[\"position\"].shift(1)\n",
    "#df_tmp = df_tmp[(df_tmp[\"position_prev\"].notnull()) & (df_tmp[\"position_next\"].notnull())]\n",
    "df_tmp = df_tmp[df_tmp[\"label\"].isin(ann_beat)]\n",
    "df_tmp[\"window_start\"] = np.minimum(df_tmp[\"position\"].values-beat_window_margin, df_tmp[\"position_prev\"].values+beat_window_margin)\n",
    "df_tmp[\"window_end\"] = np.maximum(df_tmp[\"position\"].values+beat_window_margin, df_tmp[\"position_next\"].values-beat_window_margin)\n",
    "df_svf = df_tmp[[\"position\", \"window_start\", \"window_end\"]]\n",
    "df_tmp.groupby(\"label\").count()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Windows for missing beats (such as `x` annotation)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_normal_windows = df_annotation[df_annotation[\"label\"].isin(ann_normal)].copy()\n",
    "df_normal_windows = df_normal_windows.drop(columns=[\"label\"])\n",
    "df_normal_windows[\"window_start\"] = df_normal_windows[\"position\"]-beat_window_margin\n",
    "df_normal_windows[\"window_end\"] = df_normal_windows[\"position\"]+beat_window_margin\n",
    "\n",
    "df_lut = df_annotation[~df_annotation[\"label\"].isin(ann_no_beat)].merge(pd.concat([df_ext, df_svf, df_fibr, df_normal_windows]), on=\"position\", how=\"left\")\n",
    "df_lut"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_next_window_start(pos: int):\n",
    "    next_window_start = df_lut.loc[df_lut[\"position\"] > pos, \"window_start\"].iloc[0]\n",
    "    return max(pos, next_window_start)\n",
    "\n",
    "df_no_beat = df_annotation[df_annotation[\"label\"].isin(ann_no_beat)].drop(columns=[\"label\"]).copy()\n",
    "df_no_beat[\"window_start\"] = df_no_beat[\"position\"]\n",
    "df_no_beat[\"window_end\"] = df_no_beat[\"position\"].transform(find_next_window_start)\n",
    "df_no_beat.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Merge everything together"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_windows = pd.concat([df_ext, df_svf, df_fibr, df_no_beat])\n",
    "df_windows.sort_index(inplace=True)\n",
    "df_windows"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "index = 798\n",
    "\n",
    "beat = df_windows.loc[index, \"position\"]\n",
    "start = df_windows.loc[index, \"window_start\"]\n",
    "end = df_windows.loc[index, \"window_end\"]\n",
    "print(\"Selected beat is annotated as\", df_beat.loc[index, \"label\"])\n",
    "print(\"with timestamp\", df_record.loc[beat, \"timestamp\"])\n",
    "ax = df_record.iloc[beat-500:beat+500].plot(kind='line', y=['ECG1', 'ECG2'], use_index=True, figsize=(20,10))\n",
    "plt.axvspan(beat-500, start-1, color=\"green\", alpha=0.5, label=\"normal region 1\", ymin=.5)\n",
    "plt.axvspan(start, end, color=\"red\", alpha=0.5, label=\"anomalous region\", ymin=.5)\n",
    "plt.axvspan(end+1, beat+500, color=\"green\", alpha=0.5, label=\"normal region 2\", ymin=.5)\n",
    "plot_window(df_beat.loc[index-1, \"position\"], label=\"$W$\", ymax=.5)\n",
    "plot_window(beat_n, color=\"orange\", label=\"$X$\", ymax=.5)\n",
    "plot_window(df_beat.loc[index+1, \"position\"], label=\"$Y$\", ymax=.5)\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Add labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = df_record.copy()\n",
    "df[\"is_anomaly\"] = 0\n",
    "\n",
    "for _, (_, t1, t2) in df_windows.iterrows():\n",
    "    tmp = df[df.index >= t1]\n",
    "    tmp = tmp[tmp.index <= t2]\n",
    "    df[\"is_anomaly\"].values[tmp.index] = 1\n",
    "\n",
    "#df = df.set_index(\"timestamp\")\n",
    "df[df[\"is_anomaly\"] == 1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "index = 370176\n",
    "snippet_size = 1500\n",
    "\n",
    "start = max(0, index - snippet_size//2)\n",
    "end = min(len(df), index + snippet_size//2)\n",
    "df_show = df.loc[start:end]\n",
    "df_show.plot(kind='line', y=[\"MLII\", \"V1\", \"is_anomaly\"], use_index=True, figsize=(20,10))\n",
    "\n",
    "labels = df_annotation[(df_annotation[\"position\"] >= start) & (df_annotation[\"position\"] <= end)]\n",
    "for i, (position, label) in labels.iterrows():\n",
    "    plt.text(position, -2, label)\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Experimentation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.merge(df_record, df_annotation, left_index=True, right_index=True, how=\"outer\")\n",
    "#df = df.fillna(value={\"Label\": \".\", \"is_anomaly\": 0})\n",
    "df.groupby([\"is_anomaly\"]).count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df[df[\"Label\"].notna()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "df_show = df.loc[27000:28000]\n",
    "df_show.plot(kind='line', y=['ECG1', 'ECG2', 'is_anomaly'], use_index=True, figsize=(20,10))\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.read_csv(os.path.join(dataset_subfolder, \"800.test.csv\"), index_col=\"timestamp\")\n",
    "df.loc[\"1970-01-01 00:21:20\":\"1970-01-01 00:21:40\"].plot(figsize=(20,10))\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "timeeval",
   "language": "python",
   "name": "timeeval"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
