{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/Ucchwas/Apnea-ECG/blob/main/Apnea_ECG.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "RF3B0pZBNx_S",
        "outputId": "8b5e0942-304c-40e9-be51-bdbd6eb9ebb0"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Collecting wfdb\n",
            "  Downloading wfdb-4.1.2-py3-none-any.whl (159 kB)\n",
            "\u001b[?25l     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/160.0 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K     \u001b[91m━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.7/160.0 kB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m160.0/160.0 kB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: SoundFile>=0.10.0 in /usr/local/lib/python3.10/dist-packages (from wfdb) (0.12.1)\n",
            "Requirement already satisfied: matplotlib>=3.2.2 in /usr/local/lib/python3.10/dist-packages (from wfdb) (3.7.1)\n",
            "Requirement already satisfied: numpy>=1.10.1 in /usr/local/lib/python3.10/dist-packages (from wfdb) (1.25.2)\n",
            "Requirement already satisfied: pandas>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from wfdb) (2.0.3)\n",
            "Requirement already satisfied: requests>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from wfdb) (2.31.0)\n",
            "Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from wfdb) (1.11.4)\n",
            "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (1.2.1)\n",
            "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (0.12.1)\n",
            "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (4.51.0)\n",
            "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (1.4.5)\n",
            "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (24.0)\n",
            "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (9.4.0)\n",
            "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (3.1.2)\n",
            "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->wfdb) (2.8.2)\n",
            "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.3.0->wfdb) (2023.4)\n",
            "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.3.0->wfdb) (2024.1)\n",
            "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.8.1->wfdb) (3.3.2)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.8.1->wfdb) (3.7)\n",
            "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.8.1->wfdb) (2.0.7)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.8.1->wfdb) (2024.2.2)\n",
            "Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.10/dist-packages (from SoundFile>=0.10.0->wfdb) (1.16.0)\n",
            "Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi>=1.0->SoundFile>=0.10.0->wfdb) (2.22)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3.2.2->wfdb) (1.16.0)\n",
            "Installing collected packages: wfdb\n",
            "Successfully installed wfdb-4.1.2\n",
            "Collecting tsfel\n",
            "  Downloading tsfel-0.1.7-py3-none-any.whl (55 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.8/55.8 kB\u001b[0m \u001b[31m1.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: ipython>=7.4.0 in /usr/local/lib/python3.10/dist-packages (from tsfel) (7.34.0)\n",
            "Requirement already satisfied: numpy>=1.18.5 in /usr/local/lib/python3.10/dist-packages (from tsfel) (1.25.2)\n",
            "Requirement already satisfied: pandas>=1.5.3 in /usr/local/lib/python3.10/dist-packages (from tsfel) (2.0.3)\n",
            "Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.10/dist-packages (from tsfel) (1.2.2)\n",
            "Requirement already satisfied: scipy>=1.7.3 in /usr/local/lib/python3.10/dist-packages (from tsfel) (1.11.4)\n",
            "Requirement already satisfied: setuptools>=47.1.1 in /usr/local/lib/python3.10/dist-packages (from tsfel) (67.7.2)\n",
            "Requirement already satisfied: statsmodels>=0.12.0 in /usr/local/lib/python3.10/dist-packages (from tsfel) (0.14.2)\n",
            "Collecting jedi>=0.16 (from ipython>=7.4.0->tsfel)\n",
            "  Downloading jedi-0.19.1-py2.py3-none-any.whl (1.6 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m21.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (4.4.2)\n",
            "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (0.7.5)\n",
            "Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (5.7.1)\n",
            "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (3.0.43)\n",
            "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (2.16.1)\n",
            "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (0.2.0)\n",
            "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (0.1.7)\n",
            "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython>=7.4.0->tsfel) (4.9.0)\n",
            "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.5.3->tsfel) (2.8.2)\n",
            "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.5.3->tsfel) (2023.4)\n",
            "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.5.3->tsfel) (2024.1)\n",
            "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->tsfel) (1.4.2)\n",
            "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->tsfel) (3.5.0)\n",
            "Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.10/dist-packages (from statsmodels>=0.12.0->tsfel) (0.5.6)\n",
            "Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.10/dist-packages (from statsmodels>=0.12.0->tsfel) (24.0)\n",
            "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython>=7.4.0->tsfel) (0.8.4)\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from patsy>=0.5.6->statsmodels>=0.12.0->tsfel) (1.16.0)\n",
            "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython>=7.4.0->tsfel) (0.7.0)\n",
            "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=7.4.0->tsfel) (0.2.13)\n",
            "Installing collected packages: jedi, tsfel\n",
            "Successfully installed jedi-0.19.1 tsfel-0.1.7\n",
            "Collecting biosppy\n",
            "  Downloading biosppy-2.2.2-py2.py3-none-any.whl (149 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m149.4/149.4 kB\u001b[0m \u001b[31m1.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: bidict in /usr/local/lib/python3.10/dist-packages (from biosppy) (0.23.1)\n",
            "Requirement already satisfied: h5py in /usr/local/lib/python3.10/dist-packages (from biosppy) (3.9.0)\n",
            "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from biosppy) (3.7.1)\n",
            "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.25.2)\n",
            "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.2.2)\n",
            "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.11.4)\n",
            "Collecting shortuuid (from biosppy)\n",
            "  Downloading shortuuid-1.0.13-py3-none-any.whl (10 kB)\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.16.0)\n",
            "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.4.2)\n",
            "Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from biosppy) (4.8.0.76)\n",
            "Requirement already satisfied: pywavelets in /usr/local/lib/python3.10/dist-packages (from biosppy) (1.6.0)\n",
            "Collecting mock (from biosppy)\n",
            "  Downloading mock-5.1.0-py3-none-any.whl (30 kB)\n",
            "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (1.2.1)\n",
            "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (0.12.1)\n",
            "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (4.51.0)\n",
            "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (1.4.5)\n",
            "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (24.0)\n",
            "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (9.4.0)\n",
            "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (3.1.2)\n",
            "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->biosppy) (2.8.2)\n",
            "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->biosppy) (3.5.0)\n",
            "Installing collected packages: shortuuid, mock, biosppy\n",
            "Successfully installed biosppy-2.2.2 mock-5.1.0 shortuuid-1.0.13\n"
          ]
        }
      ],
      "source": [
        "!pip install wfdb\n",
        "!pip install tsfel\n",
        "!pip install biosppy\n",
        "import wfdb\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "from biosppy.signals import ecg\n",
        "from biosppy.signals import resp\n",
        "import tsfel\n",
        "import pandas as pd\n",
        "from sklearn.model_selection import train_test_split\n",
        "from sklearn.preprocessing import StandardScaler\n",
        "from sklearn.preprocessing import LabelEncoder\n",
        "from sklearn.metrics import classification_report, confusion_matrix\n",
        "from tensorflow.keras.models import Sequential\n",
        "from tensorflow.keras.layers import Dense, Dropout\n",
        "from tensorflow.keras.optimizers import Adam\n",
        "from tensorflow.keras.callbacks import EarlyStopping\n",
        "from tensorflow.keras.utils import to_categorical\n",
        "from imblearn.over_sampling import RandomOverSampler\n",
        "from tensorflow import keras"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "iVgci3PEN83Q"
      },
      "outputs": [],
      "source": [
        "record_names = ['a01r', 'a02r', 'a03r', 'a04r', 'b01r', 'c01r', 'c02r', 'c03r']\n",
        "records = []\n",
        "for name in record_names:\n",
        "    record = wfdb.rdrecord(name, channels=[0, 1, 2, 3], pn_dir='apnea-ecg')\n",
        "    records.append(record)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "KGP3CNKTOtyD",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "0cfe17d5-c4fc-4d30-c183-a74806bc8188"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Record 1 - a01r\n",
            "record_name: a01r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 2956796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-0.1015  -0.3446   0.23545 98.     ]\n",
            " [-0.1025  -0.3455   0.2364  98.     ]\n",
            " [-0.1035  -0.3464   0.23735 98.     ]\n",
            " ...\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['a01r.dat', 'a01r.dat', 'a01r.dat', 'a01r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-2030, -6892, 4709, 98]\n",
            "checksum: [-21303, -27013, -10435, -4510]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 2 - a02r\n",
            "record_name: a02r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 3181796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-8.5250e-02  2.2005e-01 -4.1895e-01  9.6000e+01]\n",
            " [-8.4350e-02  2.0915e-01 -4.0935e-01  9.6000e+01]\n",
            " [-8.3450e-02  1.9830e-01 -3.9980e-01  9.6000e+01]\n",
            " ...\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['a02r.dat', 'a02r.dat', 'a02r.dat', 'a02r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-1705, 4401, -8379, 96]\n",
            "checksum: [-15209, 2086, -6766, -31524]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 3 - a03r\n",
            "record_name: a03r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 3134796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-6.7900e-02 -6.1850e-02 -5.0170e-01  9.2000e+01]\n",
            " [-6.6950e-02 -6.1050e-02 -5.0270e-01  9.2000e+01]\n",
            " [-6.6050e-02 -6.0300e-02 -5.0375e-01  9.2000e+01]\n",
            " ...\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['a03r.dat', 'a03r.dat', 'a03r.dat', 'a03r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-1358, -1237, -10034, 92]\n",
            "checksum: [-10034, 27032, -4788, -5589]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 4 - a04r\n",
            "record_name: a04r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 2979796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-0.14035 -0.144    0.10895 93.     ]\n",
            " [-0.1406  -0.1465   0.11435 93.     ]\n",
            " [-0.1409  -0.149    0.1198  93.     ]\n",
            " ...\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['a04r.dat', 'a04r.dat', 'a04r.dat', 'a04r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-2807, -2880, 2179, 93]\n",
            "checksum: [-17832, -26050, 4098, -8020]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 5 - b01r\n",
            "record_name: b01r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 2916796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-9.5150e-02 -8.0150e-02 -4.7650e-02  0.0000e+00]\n",
            " [-9.4800e-02 -7.9950e-02 -4.7700e-02  0.0000e+00]\n",
            " [-9.4450e-02 -7.9750e-02 -4.7750e-02  0.0000e+00]\n",
            " ...\n",
            " [-3.4170e-01 -2.9835e-01 -7.8050e-02  9.6000e+01]\n",
            " [-3.3970e-01 -2.9545e-01 -7.6550e-02  9.6000e+01]\n",
            " [-3.3770e-01 -2.9255e-01 -7.5100e-02  9.7000e+01]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['b01r.dat', 'b01r.dat', 'b01r.dat', 'b01r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-1903, -1603, -953, 0]\n",
            "checksum: [31184, -29611, -19875, -15249]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 6 - c01r\n",
            "record_name: c01r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 2898796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-0.1135  -0.9447  -0.0344   0.     ]\n",
            " [-0.11345 -0.9452  -0.0344   0.     ]\n",
            " [-0.1134  -0.94575 -0.0344   0.     ]\n",
            " ...\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['c01r.dat', 'c01r.dat', 'c01r.dat', 'c01r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-2270, -18894, -688, 0]\n",
            "checksum: [-9731, 14240, 20510, -30576]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 7 - c02r\n",
            "record_name: c02r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 3006796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[ 0.6526   0.64235 -0.039    0.     ]\n",
            " [ 0.6564   0.6461  -0.03905  0.     ]\n",
            " [ 0.66025  0.6499  -0.03915  0.     ]\n",
            " ...\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]\n",
            " [ 0.       0.       0.       0.     ]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['c02r.dat', 'c02r.dat', 'c02r.dat', 'c02r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [13052, 12847, -780, 0]\n",
            "checksum: [29503, -22109, 1364, 26896]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n",
            "Record 8 - c03r\n",
            "record_name: c03r\n",
            "n_sig: 4\n",
            "fs: 100\n",
            "counter_freq: None\n",
            "base_counter: None\n",
            "sig_len: 2719796\n",
            "base_time: None\n",
            "base_date: None\n",
            "comments: []\n",
            "sig_name: ['Resp C', 'Resp A', 'Resp N', 'SpO2']\n",
            "p_signal: [[-1.3890e-01 -6.3440e-01 -6.4400e-02  9.7000e+01]\n",
            " [-1.3850e-01 -6.3560e-01 -6.4800e-02  9.7000e+01]\n",
            " [-1.3815e-01 -6.3685e-01 -6.5200e-02  9.7000e+01]\n",
            " ...\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]\n",
            " [ 0.0000e+00  0.0000e+00  0.0000e+00  0.0000e+00]]\n",
            "d_signal: None\n",
            "e_p_signal: None\n",
            "e_d_signal: None\n",
            "file_name: ['c03r.dat', 'c03r.dat', 'c03r.dat', 'c03r.dat']\n",
            "fmt: ['16', '16', '16', '16']\n",
            "samps_per_frame: [1, 1, 1, 1]\n",
            "skew: [None, None, None, None]\n",
            "byte_offset: [None, None, None, None]\n",
            "adc_gain: [20000.0, 20000.0, 20000.0, 1.0]\n",
            "baseline: [0, 0, 0, 0]\n",
            "units: ['mV', 'mV', 'mV', 'mV']\n",
            "adc_res: [16, 16, 16, 16]\n",
            "adc_zero: [0, 0, 0, 0]\n",
            "init_value: [-2778, -12688, -1288, 97]\n",
            "checksum: [10621, -20382, 15656, 13580]\n",
            "block_size: [0, 0, 0, 0]\n",
            "\n",
            "\n"
          ]
        }
      ],
      "source": [
        "# Print all data with column names for each record\n",
        "for idx, record in enumerate(records):\n",
        "    print(f\"Record {idx + 1} - {record.record_name}\")\n",
        "    for key, value in record.__dict__.items():\n",
        "        print(f\"{key}: {value}\")\n",
        "    print(\"\\n\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "_Hir40UgRFjv"
      },
      "outputs": [],
      "source": [
        "# Initialize lists to store signals\n",
        "resp_signals_C = []\n",
        "resp_signals_A = []\n",
        "resp_signals_N = []\n",
        "spo2_signals = []\n",
        "\n",
        "# Extract signals from each record and store them\n",
        "for record in records:\n",
        "    resp_signals_C.append(record.p_signal[:, 0])\n",
        "    resp_signals_A.append(record.p_signal[:, 1])\n",
        "    resp_signals_N.append(record.p_signal[:, 2])\n",
        "    spo2_signals.append(record.p_signal[:, 3])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MZN1Rw3RQRon"
      },
      "outputs": [],
      "source": [
        "# Initialize an empty list to store the annotations\n",
        "annotations_list = []\n",
        "\n",
        "# Loop through each record name and read the corresponding annotations\n",
        "for name in record_names:\n",
        "    annotations = wfdb.rdann(name, extension='apn', pn_dir='apnea-ecg')\n",
        "    annotations_list.append(annotations)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "v9atMfTZBdjU",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "1240eb96-d30a-46a2-f6cb-0b8e34ce5e31"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Annotations for Record 1 - a01r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']\n",
            "\n",
            "Annotations for Record 2 - a02r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']\n",
            "\n",
            "Annotations for Record 3 - a03r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']\n",
            "\n",
            "Annotations for Record 4 - a04r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']\n",
            "\n",
            "Annotations for Record 5 - b01r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'A', 'N', 'N', 'N', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']\n",
            "\n",
            "Annotations for Record 6 - c01r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']\n",
            "\n",
            "Annotations for Record 7 - c02r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'A', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']\n",
            "\n",
            "Annotations for Record 8 - c03r\n",
            "Symbol Labels: ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']\n",
            "\n"
          ]
        }
      ],
      "source": [
        "for idx, annotations in enumerate(annotations_list):\n",
        "    print(f\"Annotations for Record {idx + 1} - {annotations.record_name}\")\n",
        "    #print(f\"Sample Labels: {annotations.sample}\")\n",
        "    print(f\"Symbol Labels: {annotations.symbol}\\n\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "s_FUJo9iR3fe",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "a6dc7079-0cdd-4d05-8690-cd7f703ec39f"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Total Number of Apnea signals: 1609\n",
            "Total Number of Non-apnea signals: 2346\n"
          ]
        }
      ],
      "source": [
        "# Initialize variables to store the counts\n",
        "total_apnea_count = 0\n",
        "total_non_apnea_count = 0\n",
        "\n",
        "# Loop through each set of annotations in the list\n",
        "for annotations in annotations_list:\n",
        "    # Extract the annotation labels\n",
        "    annotation_labels = annotations.symbol\n",
        "    # Count the occurrences of specific labels\n",
        "    num_apnea = annotation_labels.count('A')\n",
        "    num_non_apnea = annotation_labels.count('N')\n",
        "\n",
        "    # Accumulate the counts\n",
        "    total_apnea_count += num_apnea\n",
        "    total_non_apnea_count += num_non_apnea\n",
        "\n",
        "# Print the total counts\n",
        "print(f\"Total Number of Apnea signals: {total_apnea_count}\")\n",
        "print(f\"Total Number of Non-apnea signals: {total_non_apnea_count}\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "5rEvW1L8DzpQ",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "0104fb8d-1119-41d5-9147-e4d0d6bd03ee"
      },
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "3955"
            ]
          },
          "metadata": {},
          "execution_count": 8
        }
      ],
      "source": [
        "# Initialize empty list to store features\n",
        "two_features = []\n",
        "\n",
        "# Loop through each record and corresponding annotations\n",
        "for record, annotations in zip(records, annotations_list):\n",
        "    # Extract signals\n",
        "    resp_signals_C = record.p_signal[:, 3]\n",
        "\n",
        "    # Extract features and labels\n",
        "    features = []\n",
        "    for i in range(len(annotations.sample)):\n",
        "        start = annotations.sample[i]\n",
        "        end = annotations.sample[i + 1] if i < len(annotations.sample) - 1 else len(resp_signals_C)\n",
        "        segment = resp_signals_C[start:end]\n",
        "        mean = np.mean(segment)\n",
        "        std = np.std(segment)\n",
        "        label = annotations.symbol[i]  # Assuming labels are in annotations.symbol\n",
        "        features.append((mean, std, label))\n",
        "\n",
        "    # Append features for the current record to the overall list\n",
        "    two_features.extend(features)\n",
        "len(two_features)"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "# import numpy as np\n",
        "\n",
        "# # Initialize empty list to store all features\n",
        "# extracted_features_list = []\n",
        "\n",
        "# # Loop through each record and corresponding annotations\n",
        "# for record, annotations in zip(records, annotations_list):\n",
        "#     # Extract respiratory signals\n",
        "#     resp_signals_C = record.p_signal[:, 0]\n",
        "\n",
        "#     # Extract features and labels\n",
        "#     for i in range(len(annotations.sample)):\n",
        "#         start = annotations.sample[i]\n",
        "#         end = annotations.sample[i + 1] if i < len(annotations.sample) - 1 else len(resp_signals_C)\n",
        "#         segment = resp_signals_C[start:end]\n",
        "\n",
        "#         # Time domain features\n",
        "#         mean = np.mean(segment)\n",
        "#         std = np.std(segment)\n",
        "#         skewness = np.mean((segment - mean) ** 3) / (np.std(segment) ** 3)\n",
        "#         area_abs = np.sum(np.abs(segment))\n",
        "\n",
        "#         # Peak-related features\n",
        "#         peak_heights = segment - np.min(segment)\n",
        "#         mean_peak_height = np.mean(peak_heights)\n",
        "#         std_peak_height = np.std(peak_heights)\n",
        "#         skewness_peak_height = np.mean((peak_heights - mean_peak_height) ** 3) / (np.std(peak_heights) ** 3)\n",
        "#         num_peaks = len(segment)\n",
        "#         mean_inter_peak_distance = len(segment) / np.sum(np.diff(np.where(peak_heights > 0)[0]))\n",
        "#         std_peak_distance = np.std(np.diff(np.where(peak_heights > 0)[0]))\n",
        "#         skewness_inter_peak_distance = np.mean((np.diff(np.where(peak_heights > 0)[0]) - mean_inter_peak_distance) ** 3) / (np.std(np.diff(np.where(peak_heights > 0)[0])) ** 3)\n",
        "#         sum_peak_heights = np.sum(peak_heights)\n",
        "#         peak_frequency = len(segment) / np.sum(segment > (np.max(segment) / 2))\n",
        "\n",
        "#         # Frequency domain features (example using FFT)\n",
        "#         fft_result = np.fft.fft(segment)\n",
        "#         magnitude_spectrum = np.abs(fft_result)\n",
        "#         dominant_frequency = np.argmax(magnitude_spectrum)  # Dominant frequency component\n",
        "#         power_spectrum = magnitude_spectrum ** 2  # Power spectrum\n",
        "#         total_power = np.sum(power_spectrum)  # Total power\n",
        "#         mean_frequency = np.sum(magnitude_spectrum * np.arange(len(magnitude_spectrum))) / np.sum(magnitude_spectrum)\n",
        "#         central_frequency = np.argmax(magnitude_spectrum[len(magnitude_spectrum) // 2:]) + len(magnitude_spectrum) // 2\n",
        "\n",
        "#         # Define low and high frequency indices for computing band power\n",
        "#         # Adjust these values according to your requirements\n",
        "#         low_frequency_index = 0\n",
        "#         high_frequency_index = len(power_spectrum) // 2\n",
        "\n",
        "#         band_power = np.sum(power_spectrum[low_frequency_index:high_frequency_index])  # Adjust low and high frequency index according to your requirements\n",
        "\n",
        "#         # Store features along with label\n",
        "#         label = 1 if annotations.symbol[i] == 'A' else 0  # Assuming labels are in annotations.symbol\n",
        "#         features = [mean, std, skewness, area_abs, mean_peak_height, std_peak_height, skewness_peak_height,\n",
        "#                     num_peaks, mean_inter_peak_distance, std_peak_distance, skewness_inter_peak_distance,\n",
        "#                     sum_peak_heights, peak_frequency, mean_frequency, central_frequency, band_power, label]\n",
        "#         extracted_features_list.append(features)\n",
        "\n",
        "# # Print length of extracted features list\n",
        "# print(\"Total number of extracted features:\", len(extracted_features_list))"
      ],
      "metadata": {
        "id": "JmW5unLllFHK"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install pyentrp\n",
        "import numpy as np\n",
        "from scipy.stats import entropy\n",
        "from scipy.signal import welch\n",
        "from pyentrp import entropy as ent\n",
        "\n",
        "# Initialize empty list to store all features\n",
        "extracted_features_list = []\n",
        "\n",
        "# Loop through each record and corresponding annotations\n",
        "for record, annotations in zip(records, annotations_list):\n",
        "    # Extract respiratory signals\n",
        "    resp_signals_C = record.p_signal[:, 3]\n",
        "\n",
        "    # Extract features and labels\n",
        "    for i in range(len(annotations.sample)):\n",
        "        start = annotations.sample[i]\n",
        "        end = annotations.sample[i + 1] if i < len(annotations.sample) - 1 else len(resp_signals_C)\n",
        "        segment = resp_signals_C[start:end]\n",
        "\n",
        "        # Time domain features\n",
        "        mean = np.mean(segment)\n",
        "        std = np.std(segment)\n",
        "        skewness = np.mean((segment - mean) ** 3) / (np.std(segment) ** 3)\n",
        "        area_abs = np.sum(np.abs(segment))\n",
        "        kurtosis = np.mean((segment - mean) ** 4) / (np.std(segment) ** 4)  # Kurtosis\n",
        "        min_value = np.min(segment)  # Minimum value\n",
        "        max_value = np.max(segment)  # Maximum value\n",
        "        rms = np.sqrt(np.mean(segment ** 2))  # Root Mean Square\n",
        "        zcr = (np.diff(np.sign(segment)) != 0).sum()  # Zero Crossing Rate\n",
        "        signal_energy = np.sum(segment ** 2)\n",
        "        entropy_signal = entropy(segment)  # Shannon entropy\n",
        "\n",
        "        # Peak-related features\n",
        "        peak_heights = segment - np.min(segment)\n",
        "        mean_peak_height = np.mean(peak_heights)\n",
        "        std_peak_height = np.std(peak_heights)\n",
        "        skewness_peak_height = np.mean((peak_heights - mean_peak_height) ** 3) / (np.std(peak_heights) ** 3)\n",
        "        num_peaks = len(segment)\n",
        "        mean_inter_peak_distance = len(segment) / np.sum(np.diff(np.where(peak_heights > 0)[0]))\n",
        "        std_peak_distance = np.std(np.diff(np.where(peak_heights > 0)[0]))\n",
        "        skewness_inter_peak_distance = np.mean((np.diff(np.where(peak_heights > 0)[0]) - mean_inter_peak_distance) ** 3) / (np.std(np.diff(np.where(peak_heights > 0)[0])) ** 3)\n",
        "        sum_peak_heights = np.sum(peak_heights)\n",
        "        peak_frequency = len(segment) / np.sum(segment > (np.max(segment) / 2))\n",
        "        peak_indices = np.where(peak_heights > 0)[0]\n",
        "        max_peak_height = np.max(peak_heights) if len(peak_indices) > 0 else 0  # Maximum peak height\n",
        "        min_peak_height = np.min(peak_heights) if len(peak_indices) > 0 else 0  # Minimum peak height\n",
        "\n",
        "        # Frequency domain features (example using FFT)\n",
        "        fft_result = np.fft.fft(segment)\n",
        "        magnitude_spectrum = np.abs(fft_result)\n",
        "        dominant_frequency = np.argmax(magnitude_spectrum)  # Dominant frequency component\n",
        "        power_spectrum = magnitude_spectrum ** 2  # Power spectrum\n",
        "        total_power = np.sum(power_spectrum)  # Total power\n",
        "        mean_frequency = np.sum(magnitude_spectrum * np.arange(len(magnitude_spectrum))) / np.sum(magnitude_spectrum)\n",
        "        central_frequency = np.argmax(magnitude_spectrum[len(magnitude_spectrum) // 2:]) + len(magnitude_spectrum) // 2\n",
        "        # Define low and high frequency indices for computing band power\n",
        "        # Adjust these values according to your requirements\n",
        "        low_frequency_index = 0\n",
        "        high_frequency_index = len(power_spectrum) // 2\n",
        "        band_power = np.sum(power_spectrum[low_frequency_index:high_frequency_index])  # Adjust low and high frequency index according to your requirements\n",
        "        f, pxx = welch(segment)  # Power spectral density estimation\n",
        "        spectral_centroid = np.sum(f * pxx) / np.sum(pxx)  # Spectral Centroid\n",
        "        spectral_spread = np.sqrt(np.sum(((f - spectral_centroid) ** 2) * pxx) / np.sum(pxx))  # Spectral Spread\n",
        "        spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * pxx) / (spectral_spread ** 3)  # Spectral Skewness\n",
        "        spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * pxx) / (spectral_spread ** 4)  # Spectral Kurtosis\n",
        "\n",
        "        # Statistical Features\n",
        "        median = np.median(segment)\n",
        "        percentile_25 = np.percentile(segment, 25)\n",
        "        percentile_75 = np.percentile(segment, 75)\n",
        "        data_range = np.max(segment) - np.min(segment)\n",
        "\n",
        "        # Store features along with label\n",
        "        label = 1 if annotations.symbol[i] == 'A' else 0  # Assuming labels are in annotations.symbol\n",
        "        features = [mean, std, skewness, area_abs, kurtosis, min_value, max_value,\n",
        "            rms, zcr, signal_energy, entropy_signal,\n",
        "            mean_peak_height, std_peak_height, skewness_peak_height,\n",
        "            num_peaks, mean_inter_peak_distance, std_peak_distance, skewness_inter_peak_distance,\n",
        "            sum_peak_heights, peak_frequency, max_peak_height, min_peak_height,\n",
        "            dominant_frequency, central_frequency, band_power,\n",
        "            spectral_centroid, spectral_spread, spectral_skewness, spectral_kurtosis,\n",
        "            median, percentile_25, percentile_75, data_range,\n",
        "            label]\n",
        "\n",
        "        extracted_features_list.append(features)\n",
        "\n",
        "# Print length of extracted features list\n",
        "print(\"Total number of extracted features:\", len(extracted_features_list))"
      ],
      "metadata": {
        "id": "bwXRJysPJtFw",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "55f65dca-0269-486f-c5ca-6a0e0160bee5"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Collecting pyentrp\n",
            "  Downloading pyentrp-0.9.0-py3-none-any.whl (10 kB)\n",
            "Collecting numpy<2.0,>=1.26 (from pyentrp)\n",
            "  Downloading numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.2 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.2/18.2 MB\u001b[0m \u001b[31m12.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hInstalling collected packages: numpy, pyentrp\n",
            "  Attempting uninstall: numpy\n",
            "    Found existing installation: numpy 1.25.2\n",
            "    Uninstalling numpy-1.25.2:\n",
            "      Successfully uninstalled numpy-1.25.2\n",
            "Successfully installed numpy-1.26.4 pyentrp-0.9.0\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "<ipython-input-10-a6d04b419c1d>:42: RuntimeWarning: divide by zero encountered in scalar divide\n",
            "  skewness_inter_peak_distance = np.mean((np.diff(np.where(peak_heights > 0)[0]) - mean_inter_peak_distance) ** 3) / (np.std(np.diff(np.where(peak_heights > 0)[0])) ** 3)\n",
            "<ipython-input-10-a6d04b419c1d>:24: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  skewness = np.mean((segment - mean) ** 3) / (np.std(segment) ** 3)\n",
            "<ipython-input-10-a6d04b419c1d>:26: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  kurtosis = np.mean((segment - mean) ** 4) / (np.std(segment) ** 4)  # Kurtosis\n",
            "<ipython-input-10-a6d04b419c1d>:38: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  skewness_peak_height = np.mean((peak_heights - mean_peak_height) ** 3) / (np.std(peak_heights) ** 3)\n",
            "<ipython-input-10-a6d04b419c1d>:40: RuntimeWarning: divide by zero encountered in scalar divide\n",
            "  mean_inter_peak_distance = len(segment) / np.sum(np.diff(np.where(peak_heights > 0)[0]))\n",
            "/usr/local/lib/python3.10/dist-packages/numpy/core/_methods.py:206: RuntimeWarning: Degrees of freedom <= 0 for slice\n",
            "  ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,\n",
            "/usr/local/lib/python3.10/dist-packages/numpy/core/_methods.py:163: RuntimeWarning: invalid value encountered in divide\n",
            "  arrmean = um.true_divide(arrmean, div, out=arrmean,\n",
            "/usr/local/lib/python3.10/dist-packages/numpy/core/_methods.py:198: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  ret = ret.dtype.type(ret / rcount)\n",
            "/usr/local/lib/python3.10/dist-packages/numpy/core/fromnumeric.py:3504: RuntimeWarning: Mean of empty slice.\n",
            "  return _methods._mean(a, axis=axis, dtype=dtype,\n",
            "/usr/local/lib/python3.10/dist-packages/numpy/core/_methods.py:129: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  ret = ret.dtype.type(ret / rcount)\n",
            "<ipython-input-10-a6d04b419c1d>:63: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  spectral_centroid = np.sum(f * pxx) / np.sum(pxx)  # Spectral Centroid\n",
            "/usr/local/lib/python3.10/dist-packages/scipy/stats/_entropy.py:133: RuntimeWarning: invalid value encountered in divide\n",
            "  pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)\n",
            "<ipython-input-10-a6d04b419c1d>:44: RuntimeWarning: divide by zero encountered in scalar divide\n",
            "  peak_frequency = len(segment) / np.sum(segment > (np.max(segment) / 2))\n",
            "<ipython-input-10-a6d04b419c1d>:55: RuntimeWarning: invalid value encountered in scalar divide\n",
            "  mean_frequency = np.sum(magnitude_spectrum * np.arange(len(magnitude_spectrum))) / np.sum(magnitude_spectrum)\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Total number of extracted features: 3955\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Z-Rkw6qsz5B5"
      },
      "outputs": [],
      "source": [
        "# cfg = tsfel.get_features_by_domain()\n",
        "# # Initialize an empty list to store features and labels\n",
        "# extracted_features_list = []\n",
        "\n",
        "# # Loop through each record and corresponding annotations\n",
        "# for record, annotations in zip(records, annotations_list):\n",
        "#     # Extract signals\n",
        "#     resp_signals_A = record.p_signal[:, 1]\n",
        "#     print(\"Record Name \", record.record_name)\n",
        "#     # Extract features and labels\n",
        "#     features = []\n",
        "#     for i in range(len(annotations.sample)):\n",
        "#         start = annotations.sample[i]\n",
        "#         end = annotations.sample[i + 1] if i < len(annotations.sample) - 1 else len(resp_signals_A)\n",
        "#         segment = resp_signals_A[start:end]\n",
        "#         try:\n",
        "#           segment_features = tsfel.time_series_features_extractor(cfg, segment, fs=100)\n",
        "#           features_dict = segment_features.to_dict('records')[0]  # Convert the first row to a dict\n",
        "#         except Exception as e:\n",
        "#           print(f\"Feature extraction failed for segment {i}: {e}\")\n",
        "#           features_dict = {}\n",
        "\n",
        "#         label = 1 if annotations.symbol[i] == 'A' else 0\n",
        "#         features_dict['label'] = label\n",
        "#         features.append(features_dict)\n",
        "\n",
        "#     # Append features for the current record to the overall list\n",
        "#     extracted_features_list.extend(features)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "o7D8YP7hhJXv",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "501a2899-8f54-442e-d111-8166a60046a5"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Shape of the DataFrame: (3955, 34)\n"
          ]
        }
      ],
      "source": [
        "import pandas as pd\n",
        "\n",
        "# Convert the list of extracted features into a DataFrame\n",
        "df = pd.DataFrame(extracted_features_list)\n",
        "\n",
        "# Fill NaN values with 0\n",
        "df = df.fillna(0)\n",
        "\n",
        "# Assign column names including the label column\n",
        "column_names = ['Mean', 'Std', 'Skewness', 'Area_Abs', 'Kurtosis', 'Min_Value', 'Max_Value',\n",
        "                'RMS', 'ZCR', 'Signal_Energy', 'Entropy_Signal',\n",
        "                'Mean_Peak_Height', 'Std_Peak_Height', 'Skewness_Peak_Height',\n",
        "                'Num_Peaks', 'Mean_Inter_Peak_Distance', 'Std_Peak_Distance', 'Skewness_Inter_Peak_Distance',\n",
        "                'Sum_Peak_Heights', 'Peak_Frequency', 'Max_Peak_Height', 'Min_Peak_Height',\n",
        "                'Dominant_Frequency', 'Central_Frequency', 'Band_Power',\n",
        "                'Spectral_Centroid', 'Spectral_Spread', 'Spectral_Skewness', 'Spectral_Kurtosis',\n",
        "                'Median', 'Percentile_25', 'Percentile_75', 'Data_Range',\n",
        "                'Label']\n",
        "\n",
        "# Rename the columns of the DataFrame\n",
        "df.columns = column_names\n",
        "\n",
        "# Save the DataFrame to a CSV file with labeled columns and excluding the index\n",
        "df.to_csv('Apnea_ECG_Signals_Dataset.csv', index=False)\n",
        "\n",
        "# Print the shape of the DataFrame\n",
        "print(\"Shape of the DataFrame:\", df.shape)"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "from sklearn.feature_selection import SelectKBest, f_classif\n",
        "\n",
        "# Convert the extracted features list into a DataFrame\n",
        "df_features = pd.DataFrame(extracted_features_list)\n",
        "\n",
        "# Fill NaN values with 0\n",
        "df_features = df_features.fillna(0)\n",
        "# Replace infinite values with NaN\n",
        "df_features = df_features.replace([np.inf, -np.inf], np.nan)\n",
        "# Fill NaN values (previously infinite values) with 0\n",
        "df_features = df_features.fillna(0)\n",
        "\n",
        "# Separate features and labels\n",
        "X = df_features.drop(columns=[33])\n",
        "y = df_features[33]\n",
        "\n",
        "# Perform feature selection using SelectKBest with ANOVA F-value as the scoring function\n",
        "k = 33  # Number of top features to select\n",
        "selector = SelectKBest(score_func=f_classif, k=k)\n",
        "X_selected = selector.fit_transform(X, y)\n",
        "\n",
        "# Get indices of selected features\n",
        "selected_feature_indices = selector.get_support(indices=True)\n",
        "\n",
        "# Get the names of selected features\n",
        "selected_feature_names = X.columns[selected_feature_indices]\n",
        "\n",
        "# Create a new DataFrame with selected features and labels\n",
        "df_selected_features = df_features[selected_feature_names]\n",
        "df_selected_features['label'] = y\n",
        "\n",
        "# Save the DataFrame to a CSV file\n",
        "df_selected_features.to_csv('Top_Features.csv', index=False)"
      ],
      "metadata": {
        "id": "9Hbe04vCv26P",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "419ceb8b-2fb0-4841-852c-6bbcc03238b9"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:112: UserWarning: Features [21 22] are constant.\n",
            "  warnings.warn(\"Features %s are constant.\" % constant_features_idx, UserWarning)\n",
            "/usr/local/lib/python3.10/dist-packages/sklearn/feature_selection/_univariate_selection.py:113: RuntimeWarning: invalid value encountered in divide\n",
            "  f = msb / msw\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "from sklearn.preprocessing import StandardScaler\n",
        "\n",
        "# Load the DataFrame containing the selected features and labels\n",
        "df_selected_features = pd.read_csv('Top_Features.csv')\n",
        "\n",
        "# Separate features and labels\n",
        "X = df_selected_features.drop(columns=['label'])\n",
        "y = df_selected_features['label']\n",
        "\n",
        "# # Define indices for splitting\n",
        "# total_samples = len(df_selected_features)\n",
        "# split_index = int(total_samples * 0.7)  # 70% for training, 30% for testing\n",
        "\n",
        "# # Split the dataset based on indices\n",
        "# X_train = X.iloc[:split_index]\n",
        "# y_train = y.iloc[:split_index]\n",
        "# X_test = X.iloc[split_index:]\n",
        "# y_test = y.iloc[split_index:]\n",
        "\n",
        "# Split the dataset randomly\n",
        "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n",
        "\n",
        "# Standardize features\n",
        "scaler = StandardScaler()\n",
        "X_train_scaled = scaler.fit_transform(X_train)\n",
        "X_test_scaled = scaler.transform(X_test)\n",
        "\n",
        "# Convert scaled features back to DataFrame\n",
        "X_train_scaled_df = pd.DataFrame(X_train_scaled, columns=X_train.columns)\n",
        "X_test_scaled_df = pd.DataFrame(X_test_scaled, columns=X_test.columns)\n",
        "\n",
        "# Create DataFrames for training and testing sets\n",
        "df_train = pd.concat([X_train_scaled_df, y_train.reset_index(drop=True)], axis=1)\n",
        "df_test = pd.concat([X_test_scaled_df, y_test.reset_index(drop=True)], axis=1)\n",
        "\n",
        "# Save the training and testing DataFrames to CSV files\n",
        "df_train.to_csv('features_train.csv', index=False)\n",
        "df_test.to_csv('features_test.csv', index=False)"
      ],
      "metadata": {
        "id": "P4pHXpUDQLQz"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from tensorflow import keras\n",
        "def ANN(X_train, y_train, X_test, y_test, loss, weights):\n",
        "    model = keras.Sequential([\n",
        "      keras.layers.Dense(128, input_dim=X_train.shape[1], activation='relu', kernel_regularizer=keras.regularizers.l2(0.01)),\n",
        "      keras.layers.Dropout(0.5),\n",
        "      keras.layers.Dense(64, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01)),\n",
        "      keras.layers.Dense(32, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01)),\n",
        "      keras.layers.Dense(2, activation='sigmoid')\n",
        "    ])\n",
        "\n",
        "    # Compile the model with a lower learning rate\n",
        "    optimizer = keras.optimizers.Adam(learning_rate=0.001)\n",
        "    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n",
        "\n",
        "    # Use early stopping to prevent overfitting\n",
        "    early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
        "\n",
        "    # Train the model\n",
        "    model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2, class_weight={0: 1, 1: 1}, callbacks=[early_stopping])\n",
        "\n",
        "    # Evaluate the model on the test set\n",
        "    _, accuracy = model.evaluate(X_test, y_test)\n",
        "    print(f\"Test Accuracy: {accuracy}\")\n",
        "\n",
        "    # Make predictions on the test set\n",
        "    y_pred = np.argmax(model.predict(X_test), axis=1)\n",
        "    y_test_original = np.argmax(y_test, axis=1)\n",
        "\n",
        "    # Print classification report and confusion matrix\n",
        "    print(\"Classification Report:\")\n",
        "    print(classification_report(y_test_original, y_pred))\n",
        "\n",
        "    print(\"Confusion Matrix:\")\n",
        "    print(confusion_matrix(y_test_original, y_pred))\n",
        "\n",
        "    return model, y_pred"
      ],
      "metadata": {
        "id": "QOEZl57SX28y"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from sklearn.model_selection import train_test_split\n",
        "import tensorflow as tf\n",
        "from tensorflow import keras\n",
        "from sklearn.metrics import confusion_matrix , classification_report"
      ],
      "metadata": {
        "id": "cKT-X_GBqYeH"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Load the dataset\n",
        "df = pd.read_csv('Top_Features.csv')\n",
        "\n",
        "# Extract features and labels\n",
        "X = df.iloc[:, :-1].values\n",
        "y = df.iloc[:, -1].values\n",
        "\n",
        "# Encode labels ('N' and 'A') to numerical values\n",
        "label_encoder = LabelEncoder()\n",
        "y = label_encoder.fit_transform(y)\n",
        "\n",
        "# Resample the dataset to handle imbalance\n",
        "oversampler = RandomOverSampler(sampling_strategy='minority')\n",
        "X_resampled, y_resampled = oversampler.fit_resample(X, y)\n",
        "\n",
        "# Split the dataset into training and testing sets\n",
        "X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size= 0.2, train_size=0.8, random_state=42)\n",
        "\n",
        "# Standardize features\n",
        "scaler = StandardScaler()\n",
        "X_train = scaler.fit_transform(X_train)\n",
        "X_test = scaler.transform(X_test)\n",
        "\n",
        "# Convert labels to one-hot encoding\n",
        "y_train = to_categorical(y_train)\n",
        "y_test = to_categorical(y_test)\n",
        "model, y_preds = ANN(X_train, y_train, X_test, y_test, 'binary_crossentropy', -1)"
      ],
      "metadata": {
        "id": "dFSHFRbyX5yi",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "62c5a3fc-ad97-4329-a844-8904ea937621"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 1/100\n",
            "94/94 [==============================] - 3s 12ms/step - loss: 1.5773 - accuracy: 0.9037 - val_loss: 0.9467 - val_accuracy: 0.9507\n",
            "Epoch 2/100\n",
            "94/94 [==============================] - 0s 5ms/step - loss: 0.7071 - accuracy: 0.9490 - val_loss: 0.5238 - val_accuracy: 0.9441\n",
            "Epoch 3/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.4312 - accuracy: 0.9500 - val_loss: 0.3694 - val_accuracy: 0.9454\n",
            "Epoch 4/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.3301 - accuracy: 0.9520 - val_loss: 0.3039 - val_accuracy: 0.9467\n",
            "Epoch 5/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2874 - accuracy: 0.9484 - val_loss: 0.2705 - val_accuracy: 0.9441\n",
            "Epoch 6/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2546 - accuracy: 0.9504 - val_loss: 0.2507 - val_accuracy: 0.9427\n",
            "Epoch 7/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2421 - accuracy: 0.9480 - val_loss: 0.2436 - val_accuracy: 0.9454\n",
            "Epoch 8/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2304 - accuracy: 0.9494 - val_loss: 0.2391 - val_accuracy: 0.9427\n",
            "Epoch 9/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2230 - accuracy: 0.9507 - val_loss: 0.2358 - val_accuracy: 0.9414\n",
            "Epoch 10/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2137 - accuracy: 0.9460 - val_loss: 0.2177 - val_accuracy: 0.9427\n",
            "Epoch 11/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2125 - accuracy: 0.9477 - val_loss: 0.2156 - val_accuracy: 0.9454\n",
            "Epoch 12/100\n",
            "94/94 [==============================] - 0s 3ms/step - loss: 0.2127 - accuracy: 0.9510 - val_loss: 0.2188 - val_accuracy: 0.9454\n",
            "Epoch 13/100\n",
            "94/94 [==============================] - 0s 5ms/step - loss: 0.2083 - accuracy: 0.9494 - val_loss: 0.2078 - val_accuracy: 0.9441\n",
            "Epoch 14/100\n",
            "94/94 [==============================] - 0s 5ms/step - loss: 0.2058 - accuracy: 0.9467 - val_loss: 0.2065 - val_accuracy: 0.9454\n",
            "Epoch 15/100\n",
            "94/94 [==============================] - 0s 5ms/step - loss: 0.2010 - accuracy: 0.9477 - val_loss: 0.2089 - val_accuracy: 0.9441\n",
            "Epoch 16/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.2011 - accuracy: 0.9467 - val_loss: 0.2092 - val_accuracy: 0.9494\n",
            "Epoch 17/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.2026 - accuracy: 0.9494 - val_loss: 0.2067 - val_accuracy: 0.9481\n",
            "Epoch 18/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1958 - accuracy: 0.9490 - val_loss: 0.2036 - val_accuracy: 0.9467\n",
            "Epoch 19/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1908 - accuracy: 0.9480 - val_loss: 0.2031 - val_accuracy: 0.9441\n",
            "Epoch 20/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1974 - accuracy: 0.9497 - val_loss: 0.2004 - val_accuracy: 0.9427\n",
            "Epoch 21/100\n",
            "94/94 [==============================] - 0s 4ms/step - loss: 0.1907 - accuracy: 0.9497 - val_loss: 0.2062 - val_accuracy: 0.9454\n",
            "Epoch 22/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1888 - accuracy: 0.9504 - val_loss: 0.2027 - val_accuracy: 0.9481\n",
            "Epoch 23/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1914 - accuracy: 0.9504 - val_loss: 0.2047 - val_accuracy: 0.9427\n",
            "Epoch 24/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1919 - accuracy: 0.9507 - val_loss: 0.2107 - val_accuracy: 0.9427\n",
            "Epoch 25/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1894 - accuracy: 0.9500 - val_loss: 0.1977 - val_accuracy: 0.9467\n",
            "Epoch 26/100\n",
            "94/94 [==============================] - 1s 11ms/step - loss: 0.1904 - accuracy: 0.9507 - val_loss: 0.2011 - val_accuracy: 0.9454\n",
            "Epoch 27/100\n",
            "94/94 [==============================] - 1s 12ms/step - loss: 0.1920 - accuracy: 0.9480 - val_loss: 0.1974 - val_accuracy: 0.9441\n",
            "Epoch 28/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1889 - accuracy: 0.9500 - val_loss: 0.1963 - val_accuracy: 0.9441\n",
            "Epoch 29/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1859 - accuracy: 0.9510 - val_loss: 0.1947 - val_accuracy: 0.9427\n",
            "Epoch 30/100\n",
            "94/94 [==============================] - 1s 8ms/step - loss: 0.1837 - accuracy: 0.9494 - val_loss: 0.1939 - val_accuracy: 0.9441\n",
            "Epoch 31/100\n",
            "94/94 [==============================] - 1s 8ms/step - loss: 0.1854 - accuracy: 0.9530 - val_loss: 0.1929 - val_accuracy: 0.9427\n",
            "Epoch 32/100\n",
            "94/94 [==============================] - 0s 4ms/step - loss: 0.1838 - accuracy: 0.9524 - val_loss: 0.1986 - val_accuracy: 0.9441\n",
            "Epoch 33/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1842 - accuracy: 0.9507 - val_loss: 0.1912 - val_accuracy: 0.9467\n",
            "Epoch 34/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1819 - accuracy: 0.9507 - val_loss: 0.1950 - val_accuracy: 0.9441\n",
            "Epoch 35/100\n",
            "94/94 [==============================] - 1s 10ms/step - loss: 0.1818 - accuracy: 0.9507 - val_loss: 0.1916 - val_accuracy: 0.9441\n",
            "Epoch 36/100\n",
            "94/94 [==============================] - 1s 10ms/step - loss: 0.1866 - accuracy: 0.9467 - val_loss: 0.1911 - val_accuracy: 0.9441\n",
            "Epoch 37/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1840 - accuracy: 0.9497 - val_loss: 0.1969 - val_accuracy: 0.9454\n",
            "Epoch 38/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1815 - accuracy: 0.9504 - val_loss: 0.1910 - val_accuracy: 0.9441\n",
            "Epoch 39/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1844 - accuracy: 0.9457 - val_loss: 0.1950 - val_accuracy: 0.9454\n",
            "Epoch 40/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1821 - accuracy: 0.9467 - val_loss: 0.1937 - val_accuracy: 0.9481\n",
            "Epoch 41/100\n",
            "94/94 [==============================] - 1s 9ms/step - loss: 0.1809 - accuracy: 0.9494 - val_loss: 0.1905 - val_accuracy: 0.9467\n",
            "Epoch 42/100\n",
            "94/94 [==============================] - 1s 7ms/step - loss: 0.1815 - accuracy: 0.9487 - val_loss: 0.1974 - val_accuracy: 0.9441\n",
            "Epoch 43/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1799 - accuracy: 0.9484 - val_loss: 0.1908 - val_accuracy: 0.9441\n",
            "Epoch 44/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1804 - accuracy: 0.9497 - val_loss: 0.1957 - val_accuracy: 0.9467\n",
            "Epoch 45/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1807 - accuracy: 0.9510 - val_loss: 0.1922 - val_accuracy: 0.9441\n",
            "Epoch 46/100\n",
            "94/94 [==============================] - 1s 6ms/step - loss: 0.1803 - accuracy: 0.9484 - val_loss: 0.1926 - val_accuracy: 0.9481\n",
            "30/30 [==============================] - 0s 4ms/step - loss: 0.1449 - accuracy: 0.9606\n",
            "Test Accuracy: 0.9605963826179504\n",
            "30/30 [==============================] - 1s 9ms/step\n",
            "Classification Report:\n",
            "              precision    recall  f1-score   support\n",
            "\n",
            "           0       0.95      0.97      0.96       459\n",
            "           1       0.97      0.95      0.96       480\n",
            "\n",
            "    accuracy                           0.96       939\n",
            "   macro avg       0.96      0.96      0.96       939\n",
            "weighted avg       0.96      0.96      0.96       939\n",
            "\n",
            "Confusion Matrix:\n",
            "[[446  13]\n",
            " [ 24 456]]\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "model.save(\"Apnea.h5\")"
      ],
      "metadata": {
        "id": "je9RDNtxqbRw",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "5c9b0c4e-0f45-46fb-ec1f-30661ed7a0e2"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.\n",
            "  saving_api.save_model(\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "\n",
        "# Load the model\n",
        "model = tf.keras.models.load_model(\"Apnea.h5\")"
      ],
      "metadata": {
        "id": "8_7q38fqqjOw"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# convert to tensorflow lite model\n",
        "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
        "tflite_model = converter.convert()\n",
        "open(\"Apnea.tflite\", \"wb\").write(tflite_model)"
      ],
      "metadata": {
        "id": "yO3wHftGqmF_",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "54d124bf-57b3-4e7c-a225-9e27dfe165dd"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "61480"
            ]
          },
          "metadata": {},
          "execution_count": 20
        }
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "authorship_tag": "ABX9TyN7l07Xwx3AGnc/zlePOTGZ",
      "include_colab_link": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}