{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MIT-BIH Long-Term ECG Database (_ltdb_)\n",
    "\n",
    "Part of the ECG Database Collection:\n",
    "\n",
    "| Short Name | Long Name |\n",
    "| :--- | :--- |\n",
    "| _mitdb_ | MIT-BIH Arrhythmia Database |\n",
    "| _svdb_ | MIT-BIH Supraventricular Arrhythmia Database |\n",
    "| _ltdb_ | MIT-BIH Long-Term ECG Database |\n",
    "\n",
    "[Docu](https://wfdb.readthedocs.io/en/latest) of the `wfdb`-package."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import wfdb\n",
    "import os\n",
    "from typing import Final\n",
    "from collections.abc import Callable\n",
    "from config import data_raw_folder, data_processed_folder\n",
    "from timeeval import Datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking for source datasets in /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database and\n",
      "saving processed datasets in /home/projects/akita/data/benchmark-data/data-processed\n"
     ]
    }
   ],
   "source": [
    "dataset_collection_name = \"LTDB\"\n",
    "source_folder = os.path.join(data_raw_folder, \"MIT-BIH Long-Term ECG Database\")\n",
    "target_folder = data_processed_folder\n",
    "\n",
    "from pathlib import Path\n",
    "print(f\"Looking for source datasets in {Path(source_folder).absolute()} and\\nsaving processed datasets in {Path(target_folder).absolute()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_dataset_names() -> list[str]:\n",
    "    with open(os.path.join(source_folder, \"RECORDS\"), 'r') as f:\n",
    "        records = [l.rstrip('\\n') for l in f]\n",
    "    return records"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "For the explaination of the following transformation function `transform_and_label`, see the tranformation walk-through of the _MIT-BIH Arrhythmia Database.ipynb_-notebook.\n",
    "\n",
    "The following annotations are present in this dataset:\n",
    "\n",
    "| Annotation | Description |\n",
    "| :--------- | :---------- |\n",
    "|| **Considered normal** |\n",
    "| `N` | Normal beat |\n",
    "|| **Anomalous beats** (use double-window labeling) |\n",
    "| `F` | Fusion of ventricular and normal beat |\n",
    "| `S` | Supraventricular premature or ectopic beat |\n",
    "| `a` | Aberrated atrial premature beat |\n",
    "| `V` | Premature ventricular contraction |\n",
    "| `J` | Nodal (junctional) premature beat |\n",
    "|| **Anomaly from `x` until next beat window start** |\n",
    "| - ||\n",
    "|| **Entire section of fibrillation is regarded anomalous** (a single window from `[` to `]`) |\n",
    "| - ||\n",
    "|| **External anomalies** (single window labeling) |\n",
    "| - ||\n",
    "|| **Ignored, bc hard to parse and to label** |\n",
    "| `~` | Change in signal quality (usually noise level changes) |"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "ann_normal = [\"N\", \"/\", \"L\", \"R\"]\n",
    "ann_beat = [\"F\", \"f\", \"S\", \"A\", \"a\", \"V\", \"J\", \"j\", \"E\", \"e\"]\n",
    "ann_no_beat = [\"x\"]\n",
    "ann_fibr_start = \"[\"\n",
    "ann_fibr_end = \"]\"\n",
    "ann_fibr = [ann_fibr_start, \"!\", ann_fibr_end]\n",
    "ann_ext = [\"Q\", \"|\"]\n",
    "ann_ignore = [\"+\", \"~\", '\"']\n",
    "\n",
    "def transform_and_label(source_file: str, target: str) -> int:\n",
    "    print(f\"Transforming {os.path.basename(source_file)}\")\n",
    "    # load dataset\n",
    "    record = wfdb.rdrecord(source_file)\n",
    "    df_record = pd.DataFrame(record.p_signal, columns=record.sig_name)\n",
    "    print(f\"  record {record.file_name[0]} loaded\")\n",
    "\n",
    "    # load annotation file\n",
    "    atr = wfdb.rdann(source_file, \"atr\")\n",
    "    assert record.fs == atr.fs, \"Sample frequency of records and annotations does not match!\"\n",
    "    df_annotation = pd.DataFrame({\"position\": atr.sample, \"label\": atr.symbol})\n",
    "    # remove ignored annotations\n",
    "    df_annotation = df_annotation[~df_annotation[\"label\"].isin(ann_ignore)]\n",
    "    df_annotation = df_annotation.reset_index(drop=True)\n",
    "    print(f\"  {len(df_annotation)}/{atr.ann_len} beat annotations for {source_file} loaded (others were ignored)\")\n",
    "\n",
    "    # calculate normal beat length\n",
    "    print(\"  preparing windows for labeling...\")\n",
    "    df_normal_beat = df_annotation.copy()\n",
    "    df_normal_beat[\"prev_position\"] = df_annotation[\"position\"].shift()\n",
    "    df_normal_beat[\"prev_label\"] = df_annotation[\"label\"].shift()\n",
    "    df_normal_beat = df_normal_beat[(df_normal_beat[\"label\"].isin(ann_normal)) & (df_normal_beat[\"prev_label\"].isin(ann_normal))]\n",
    "    df_normal_beat = df_normal_beat.drop(columns=[\"label\", \"prev_label\"])\n",
    "    s_normal_beat_lengths = df_normal_beat[\"position\"] - df_normal_beat[\"prev_position\"]\n",
    "    print(f\"    normal beat distance samples = {len(s_normal_beat_lengths)}\")\n",
    "    normal_beat_length = s_normal_beat_lengths.median()\n",
    "    if (normal_beat_length % 2) == 0:\n",
    "        normal_beat_length += 1\n",
    "    beat_window_size = int(normal_beat_length)\n",
    "    beat_window_margin = (beat_window_size - 1)//2\n",
    "    del df_normal_beat\n",
    "    del s_normal_beat_lengths\n",
    "    print(f\"    window size = {beat_window_size}\")\n",
    "    print(f\"    window margins (left and right) = {beat_window_margin}\")\n",
    "\n",
    "    # calculate beat windows\n",
    "    ## ~ and other annotations are ignored!\n",
    "    ## for fibrillation\n",
    "    # we only need start and end marked with `[` and `]` respectively\n",
    "    s_fibr_start = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_start, \"position\"]\n",
    "    s_index = s_fibr_start.index\n",
    "    s_fibr_start = s_fibr_start.reset_index(drop=True)\n",
    "    s_fibr_end = df_annotation.loc[df_annotation[\"label\"] == ann_fibr_end, \"position\"]\n",
    "    s_fibr_end = s_fibr_end.reset_index(drop=True)\n",
    "    df_fibr = pd.DataFrame({\"index\": s_index, \"window_start\": s_fibr_start, \"window_end\": s_fibr_end})\n",
    "    df_fibr = df_fibr.set_index(\"index\")\n",
    "    df_fibr[\"position\"] = df_fibr[\"window_start\"]\n",
    "    print(f\"    {len(df_fibr)} windows for fibrillation anomalies ({','.join(ann_fibr)})\")\n",
    "    ## for external anomalies\n",
    "    df_ext = df_annotation[df_annotation[\"label\"].isin(ann_ext)].copy()\n",
    "    df_ext[\"window_start\"] = np.maximum(0, df_ext[\"position\"]-beat_window_margin)\n",
    "    df_ext[\"window_end\"] = np.minimum(record.sig_len - 1, df_ext[\"position\"]+beat_window_margin)\n",
    "    df_ext = df_ext[[\"position\", \"window_start\", \"window_end\"]]\n",
    "    print(f\"    {len(df_ext)} windows for external anomalies ({','.join(ann_ext)})\")\n",
    "    ## anomalous beats\n",
    "    # exclude additional non-beat annotations\n",
    "    df_svf = df_annotation[~df_annotation[\"label\"].isin([\"|\", ann_fibr_start, ann_fibr_end])].copy()\n",
    "    df_svf[\"position_next\"] = df_svf[\"position\"].shift(-1)\n",
    "    df_svf[\"position_prev\"] = df_svf[\"position\"].shift(1)\n",
    "    #df_svf = df_svf[(df_svf[\"position_prev\"].notnull()) & (df_svf[\"position_next\"].notnull())]\n",
    "    df_svf = df_svf[df_svf[\"label\"].isin(ann_beat)]\n",
    "    df_svf[\"window_start\"] = np.maximum(0, np.minimum(df_svf[\"position\"].values-beat_window_margin, df_svf[\"position_prev\"].values+beat_window_margin))\n",
    "    df_svf[\"window_end\"] = np.minimum(record.sig_len - 1, np.maximum(df_svf[\"position\"].values+beat_window_margin, df_svf[\"position_next\"].values-beat_window_margin))\n",
    "    df_svf = df_svf[[\"position\", \"window_start\", \"window_end\"]]\n",
    "    print(f\"    {len(df_svf)} windows for anomalous beats ({','.join(ann_beat)})\")\n",
    "    # missing beats\n",
    "    df_no_beat = df_annotation[df_annotation[\"label\"].isin(ann_no_beat)].drop(columns=[\"label\"]).copy()\n",
    "    df_no_beat[\"window_start\"] = df_no_beat[\"position\"]\n",
    "    if not df_no_beat.empty:\n",
    "        df_normal_windows = df_annotation[df_annotation[\"label\"].isin(ann_normal)].copy()\n",
    "        df_normal_windows = df_normal_windows.drop(columns=[\"label\"])\n",
    "        df_normal_windows[\"window_start\"] = np.maximum(0, df_normal_windows[\"position\"]-beat_window_margin)\n",
    "        df_normal_windows[\"window_end\"] = np.minimum(record.sig_len - 1, df_normal_windows[\"position\"]+beat_window_margin)\n",
    "        df_lut = df_annotation[~df_annotation[\"label\"].isin(ann_no_beat)].merge(pd.concat([df_ext, df_svf, df_fibr, df_normal_windows]), on=\"position\", how=\"left\")\n",
    "        def find_next_window_start(pos: int):\n",
    "            next_window_start = df_lut.loc[df_lut[\"position\"] > pos, \"window_start\"].iloc[0]\n",
    "            return max(pos, next_window_start)\n",
    "        df_no_beat[\"window_end\"] = df_no_beat[\"position\"].transform(find_next_window_start)\n",
    "        del df_normal_windows\n",
    "        del df_lut\n",
    "    else:\n",
    "        df_no_beat[\"window_end\"] = df_no_beat[\"position\"]\n",
    "    print(f\"    {len(df_no_beat)} windows for missing beats ({','.join(ann_no_beat)})\")\n",
    "    ## merge\n",
    "    df_windows = pd.concat([df_ext, df_svf, df_fibr, df_no_beat])\n",
    "    df_windows.sort_index(inplace=True)\n",
    "    print(f\"  ...done.\")\n",
    "\n",
    "    # add labels based on anomaly windows\n",
    "    print(\"  labeling\")\n",
    "    df_record[\"is_anomaly\"] = 0\n",
    "    for _, (_, t1, t2) in df_windows.iterrows():\n",
    "        tmp = df_record[df_record.index >= t1]\n",
    "        tmp = tmp[tmp.index <= t2]\n",
    "        df_record[\"is_anomaly\"].values[tmp.index] = 1\n",
    "    del tmp\n",
    "\n",
    "    # reconstruct timestamps and set as index\n",
    "    print(\"  reconstructing timestamps\")\n",
    "    df_record[\"timestamp\"] = pd.to_datetime(df_record.index.values * 1e+9/record.fs, unit='ns')\n",
    "    df_record = df_record.set_index(\"timestamp\")\n",
    "    df_record.to_csv(target)\n",
    "    print(f\"Dataset {os.path.basename(source_file)} transformed and saved!\")\n",
    "    \n",
    "    # return dataset length\n",
    "    return record.sig_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Directories /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB already exist\n"
     ]
    }
   ],
   "source": [
    "# shared by all datasets\n",
    "dataset_type = \"real\"\n",
    "input_type = \"multivariate\"\n",
    "datetime_index = True\n",
    "train_type = \"unsupervised\"\n",
    "train_is_normal = False\n",
    "\n",
    "# create target directory\n",
    "dataset_subfolder = os.path.join(input_type, dataset_collection_name)\n",
    "target_subfolder = os.path.join(target_folder, dataset_subfolder)\n",
    "try:\n",
    "    os.makedirs(target_subfolder)\n",
    "    print(f\"Created directories {target_subfolder}\")\n",
    "except FileExistsError:\n",
    "    print(f\"Directories {target_subfolder} already exist\")\n",
    "    pass\n",
    "\n",
    "dm = Datasets(target_folder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Transforming 14046\n",
      "  record 14046.dat loaded\n",
      "  115278/115278 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14046 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 96721\n",
      "    window size = 95\n",
      "    window margins (left and right) = 47\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    9864 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14046 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14046 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14046.test.csv\n",
      "Transforming 14134\n",
      "  record 14134.dat loaded\n",
      "  49632/49769 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14134 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 27925\n",
      "    window size = 125\n",
      "    window margins (left and right) = 62\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    10859 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14134 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14134 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14134.test.csv\n",
      "Transforming 14149\n",
      "  record 14149.dat loaded\n",
      "  144818/145139 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14149 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 144327\n",
      "    window size = 71\n",
      "    window margins (left and right) = 35\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    264 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14149 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14149 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14149.test.csv\n",
      "Transforming 14157\n",
      "  record 14157.dat loaded\n",
      "  88104/88298 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14157 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 79233\n",
      "    window size = 107\n",
      "    window margins (left and right) = 53\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    4676 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14157 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14157 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14157.test.csv\n",
      "Transforming 14172\n",
      "  record 14172.dat loaded\n",
      "  66006/66413 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14172 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 50908\n",
      "    window size = 159\n",
      "    window margins (left and right) = 79\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    7683 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14172 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14172 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14172.test.csv\n",
      "Transforming 14184\n",
      "  record 14184.dat loaded\n",
      "  101543/101567 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14184 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 59377\n",
      "    window size = 103\n",
      "    window margins (left and right) = 51\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    23436 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 14184 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/14184 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/14184.test.csv\n",
      "Transforming 15814\n",
      "  record 15814.dat loaded\n",
      "  103354/103388 beat annotations for /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/15814 loaded (others were ignored)\n",
      "  preparing windows for labeling...\n",
      "    normal beat distance samples = 80604\n",
      "    window size = 99\n",
      "    window margins (left and right) = 49\n",
      "    0 windows for fibrillation anomalies ([,!,])\n",
      "    0 windows for external anomalies (Q,|)\n",
      "    11721 windows for anomalous beats (F,f,S,A,a,V,J,j,E,e)\n",
      "    0 windows for missing beats (x)\n",
      "  ...done.\n",
      "  labeling\n",
      "  reconstructing timestamps\n",
      "Dataset 15814 transformed and saved!\n",
      "Processed source dataset /home/projects/akita/data/benchmark-data/data-raw/MIT-BIH Long-Term ECG Database/15814 -> /home/projects/akita/data/benchmark-data/data-processed/multivariate/LTDB/15814.test.csv\n"
     ]
    }
   ],
   "source": [
    "# dataset transformation\n",
    "transform_file: Callable[[str, str], int] = transform_and_label\n",
    "\n",
    "for dataset_name in load_dataset_names():\n",
    "    # intentionally no file suffix (.dat)\n",
    "    source_file = os.path.join(source_folder, dataset_name)\n",
    "    filename = f\"{dataset_name}.test.csv\"\n",
    "    path = os.path.join(dataset_subfolder, filename)\n",
    "    target_filepath = os.path.join(target_subfolder, filename)\n",
    "            \n",
    "    # transform file and label it\n",
    "    dataset_length = transform_file(source_file, target_filepath)\n",
    "    print(f\"Processed source dataset {source_file} -> {target_filepath}\")\n",
    "\n",
    "    # save metadata\n",
    "    dm.add_dataset((dataset_collection_name, dataset_name),\n",
    "        train_path = None,\n",
    "        test_path = path,\n",
    "        dataset_type = dataset_type,\n",
    "        datetime_index = datetime_index,\n",
    "        split_at = None,\n",
    "        train_type = train_type,\n",
    "        train_is_normal = train_is_normal,\n",
    "        input_type = input_type,\n",
    "        dataset_length = dataset_length\n",
    "    )\n",
    "\n",
    "# save metadata of benchmark\n",
    "dm.save()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>train_path</th>\n",
       "      <th>test_path</th>\n",
       "      <th>dataset_type</th>\n",
       "      <th>datetime_index</th>\n",
       "      <th>split_at</th>\n",
       "      <th>train_type</th>\n",
       "      <th>train_is_normal</th>\n",
       "      <th>input_type</th>\n",
       "      <th>length</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>collection_name</th>\n",
       "      <th>dataset_name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"7\" valign=\"top\">LTDB</th>\n",
       "      <th>14046</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14046.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>10828800</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14134</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14134.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>6420480</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14149</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14149.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>10997760</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14157</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14157.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>9454080</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14172</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14172.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>9753600</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14184</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/14184.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>10252800</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15814</th>\n",
       "      <td>NaN</td>\n",
       "      <td>multivariate/LTDB/15814.test.csv</td>\n",
       "      <td>real</td>\n",
       "      <td>True</td>\n",
       "      <td>NaN</td>\n",
       "      <td>unsupervised</td>\n",
       "      <td>False</td>\n",
       "      <td>multivariate</td>\n",
       "      <td>10237440</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                             train_path                         test_path  \\\n",
       "collection_name dataset_name                                                \n",
       "LTDB            14046               NaN  multivariate/LTDB/14046.test.csv   \n",
       "                14134               NaN  multivariate/LTDB/14134.test.csv   \n",
       "                14149               NaN  multivariate/LTDB/14149.test.csv   \n",
       "                14157               NaN  multivariate/LTDB/14157.test.csv   \n",
       "                14172               NaN  multivariate/LTDB/14172.test.csv   \n",
       "                14184               NaN  multivariate/LTDB/14184.test.csv   \n",
       "                15814               NaN  multivariate/LTDB/15814.test.csv   \n",
       "\n",
       "                             dataset_type  datetime_index  split_at  \\\n",
       "collection_name dataset_name                                          \n",
       "LTDB            14046                real            True       NaN   \n",
       "                14134                real            True       NaN   \n",
       "                14149                real            True       NaN   \n",
       "                14157                real            True       NaN   \n",
       "                14172                real            True       NaN   \n",
       "                14184                real            True       NaN   \n",
       "                15814                real            True       NaN   \n",
       "\n",
       "                                train_type  train_is_normal    input_type  \\\n",
       "collection_name dataset_name                                                \n",
       "LTDB            14046         unsupervised            False  multivariate   \n",
       "                14134         unsupervised            False  multivariate   \n",
       "                14149         unsupervised            False  multivariate   \n",
       "                14157         unsupervised            False  multivariate   \n",
       "                14172         unsupervised            False  multivariate   \n",
       "                14184         unsupervised            False  multivariate   \n",
       "                15814         unsupervised            False  multivariate   \n",
       "\n",
       "                                length  \n",
       "collection_name dataset_name            \n",
       "LTDB            14046         10828800  \n",
       "                14134          6420480  \n",
       "                14149         10997760  \n",
       "                14157          9454080  \n",
       "                14172          9753600  \n",
       "                14184         10252800  \n",
       "                15814         10237440  "
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dm.refresh()\n",
    "dm.df().loc[(slice(dataset_collection_name,dataset_collection_name), slice(None))]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Experimentation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "records = load_dataset_names()\n",
    "records"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# find all annotations\n",
    "annotations = {}\n",
    "for r in records:\n",
    "    atr = wfdb.rdann(os.path.join(source_folder, r), \"atr\")\n",
    "    df_annotation = pd.DataFrame(atr.symbol, index=atr.sample, columns=[\"Label\"])\n",
    "    for an in df_annotation[\"Label\"].unique():\n",
    "        if an not in annotations:\n",
    "            annotations[an] = set()\n",
    "        annotations[an].add(atr.record_name)\n",
    "\n",
    "for an in annotations:\n",
    "    annotations[an] = \", \".join(annotations[an])\n",
    "annotations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "timeeval",
   "language": "python",
   "name": "timeeval"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
