{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3b668138-98ed-41dc-b17e-8f2d643d3e1a",
   "metadata": {},
   "source": [
    "# 1. Import library"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "293222c2-ea98-44d7-808d-06144b1ac015",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install wfdb\n",
    "\n",
    "# Handle data path\n",
    "from pathlib import Path\n",
    "import collections\n",
    "\n",
    "# Read and display data from Physionet\n",
    "import wfdb\n",
    "import pprint\n",
    "import collections\n",
    "from IPython.display import clear_output\n",
    "\n",
    "# Data manipulation and plotting\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "from scipy.signal import resample\n",
    "from scipy.signal import butter,filtfilt\n",
    "\n",
    "# Divide data into train and test set and save to HDF5\n",
    "import h5py\n",
    "import os\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import preprocessing\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "50fae999-0227-4409-8865-f756af97f49e",
   "metadata": {},
   "source": [
    "# 2. Classification"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d94b661f-7b3f-4a28-b17b-e3bbef3f6a3e",
   "metadata": {},
   "source": [
    "# 2.1 Load data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a739cf63-0358-436c-9270-1aa076d68d62",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Deep learning models\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torchvision\n",
    "from torch import nn\n",
    "from scipy.stats import truncnorm\n",
    "from torch.nn.parameter import Parameter\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "from torch.nn import Conv1d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "df248d5a-136b-45ef-a0da-cc565800710b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# read data from h5 file \n",
    "\n",
    "process_dir = Path(\"./\") / \"data\" / \"processed_data\"\n",
    "\n",
    "# Use this for inter-patient\n",
    "file_name = 'ECG_MIT-BIH_processed_data_interpatient_360Hz.h5'\n",
    "\n",
    "# Use this for intra-patient\n",
    "# file_name = 'ECG_MIT-BIH_processed_data_intrapatient_360Hz.h5'\n",
    "\n",
    "file = h5py.File(process_dir / file_name, 'r')\n",
    "train_set_d = file['train_data'][:]\n",
    "train_set_l = file['train_labels'][:]\n",
    "test_set_d = file['test_data'][:]\n",
    "test_set_l = file['test_labels'][:]\n",
    "file.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13962656-673b-43e1-8af7-ef840003a9f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_ecg_sample = train_set_d[14]\n",
    "train_time_in_second = np.arange(len(train_ecg_sample)) / 360.\n",
    "\n",
    "plt.rcParams[\"figure.figsize\"]= 15, 5\n",
    "plt.plot(train_time_in_second, train_ecg_sample, c='b', label='360 Hz')\n",
    "plt.xlabel('time in second')\n",
    "plt.ylabel('ECG value in mV')\n",
    "plt.title('Train ECG data for one beat')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "plt.clf()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4747d4a3-e5f9-403f-bd76-a575c2b4ecee",
   "metadata": {},
   "outputs": [],
   "source": [
    "## plot a sample test set signal\n",
    "test_ecg_sample = test_set_d[0]\n",
    "test_time_in_second = np.arange(len(test_ecg_sample)) / 360.\n",
    "\n",
    "plt.rcParams[\"figure.figsize\"]= 15, 5\n",
    "plt.plot(test_time_in_second, test_ecg_sample, c='b', label='360 Hz')\n",
    "plt.xlabel('time in second')\n",
    "plt.ylabel('ECG value in mV')\n",
    "plt.title('Test ECG data for one beat')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "plt.clf()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9624af65-1a31-46e9-9397-3d7aa0bcac9c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import Dataset, DataLoader, TensorDataset\n",
    "from sklearn.model_selection import StratifiedShuffleSplit\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "\n",
    "import torch\n",
    "\n",
    "sss = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n",
    "for train_index, test_index in sss.split(train_set_d, train_set_l):\n",
    "    cnn_train_data, cnn_val_data = train_set_d[train_index], train_set_d[test_index]\n",
    "    cnn_train_labels, cnn_val_labels = train_set_l[train_index], train_set_l[test_index]\n",
    "    \n",
    "# cnn_train_labels = to_categorical(cnn_train_labels, num_classes=5)\n",
    "# cnn_val_labels = to_categorical(cnn_val_labels, num_classes=5)\n",
    "# cnn_test_labels = to_categorical(test_set_l, num_classes=5)\n",
    "cnn_test_labels = test_set_l\n",
    "\n",
    "batch_size=128\n",
    "\n",
    "train_reshape_data = np.reshape(cnn_train_data, (cnn_train_data.shape[0], 1, cnn_train_data.shape[1]))\n",
    "train_tensor = TensorDataset(torch.FloatTensor(train_reshape_data), torch.LongTensor(cnn_train_labels))\n",
    "train_dataloader = DataLoader(train_tensor, batch_size=batch_size, shuffle=True)\n",
    "\n",
    "val_reshape_data = np.reshape(cnn_val_data, (cnn_val_data.shape[0], 1, cnn_val_data.shape[1]))\n",
    "val_tensor = TensorDataset(torch.FloatTensor(val_reshape_data), torch.LongTensor(cnn_val_labels))\n",
    "val_dataloader = DataLoader(val_tensor, batch_size=batch_size, shuffle=True)\n",
    "    \n",
    "test_reshape_data = np.reshape(test_set_d, (test_set_d.shape[0], 1, test_set_d.shape[1]))\n",
    "test_tensor = TensorDataset(torch.FloatTensor(test_reshape_data), torch.LongTensor(cnn_test_labels))\n",
    "test_dataloader = DataLoader(test_tensor)\n",
    "\n",
    "print(train_reshape_data.shape, val_reshape_data.shape, test_reshape_data.shape)\n",
    "# print(Counter(cnn_train_labels), Counter(cnn_val_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45e4ebf7-2450-4daf-8d2a-b575fee89af2",
   "metadata": {},
   "source": [
    "## 2.2 Setup the model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72018dfa-1c9d-46bc-a846-88d06058994e",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "# a generator for batches of data\n",
    "# yields data (batchsize) and labels (batchsize)\n",
    "# if shuffle is True, it will load batches in a random order\n",
    "def DataBatch(data, label, batchsize, shuffle=True):\n",
    "    n = data.shape[0]\n",
    "    if shuffle:\n",
    "        index = np.random.permutation(n)\n",
    "    else:\n",
    "        index = np.arange(n)\n",
    "    for i in range(int(np.ceil(n/batchsize))):\n",
    "        inds = index[i*batchsize : min(n,(i+1)*batchsize)]\n",
    "        yield data[inds], label[inds]\n",
    "\n",
    "# tests the accuracy of a classifier\n",
    "def test(test_data_loader, classifier):\n",
    "    correct=0.\n",
    "    total_length = 0\n",
    "    with torch.no_grad():\n",
    "        for (data,label) in test_data_loader:\n",
    "            data, label = data.to(device), label\n",
    "            prediction = classifier(data)\n",
    "            total_length += label.size(0)\n",
    "            # correct += np.sum(prediction==label)\n",
    "            correct += (prediction == label.numpy()).sum().item()\n",
    "    return correct/(total_length)*100\n",
    "\n",
    "def testLoss(test_data_loader, criterion, classifier):\n",
    "    test_loss = []\n",
    "    with torch.no_grad():\n",
    "        for (data,labels) in test_data_loader:\n",
    "            data, labels = data.to(device), labels.to(device)\n",
    "            prediction = classifier.forward(data)\n",
    "            loss = criterion(prediction, labels)\n",
    "            test_loss.append(loss.item())\n",
    "    return np.mean(np.array(test_loss))\n",
    "\n",
    "\n",
    "def Confusion(test_data_loader, classifier, num_class=5):\n",
    "    M=np.zeros((num_class,num_class))\n",
    "    acc=0.0\n",
    "    correct=0.\n",
    "    with torch.no_grad():\n",
    "        for (data,label) in test_data_loader:\n",
    "            data, label = data.to(device), label.to(device)\n",
    "            prediction = classifier(data)\n",
    "            label = label.numpy()\n",
    "            correct += np.sum(prediction==label)\n",
    "            for i in range(len(label)):\n",
    "                M[label[i],prediction[i]] += 1\n",
    "        for i in range(num_class):\n",
    "            M[i,:] /= np.sum(M[i,:])\n",
    "        acc = correct/(test_data_loader.dataset.tensors[0].shape[0])*100\n",
    "    \n",
    "    return M, acc\n",
    "\n",
    "def VisualizeConfusion(M):\n",
    "    #plt.figure(figsize=(14, 6))\n",
    "    plt.imshow(M)\n",
    "    plt.show()\n",
    "    \n",
    "def predict(test_data_loader, classifier):\n",
    "    preds = []\n",
    "    with torch.no_grad():\n",
    "        for (data,label) in test_data_loader:\n",
    "            data, label = data.to(device), label.to(device)\n",
    "            prediction = classifier(data)\n",
    "            preds += list(prediction)\n",
    "\n",
    "    return np.array(preds)\n",
    "\n",
    "def predict_proba(test_data_loader, classifier):\n",
    "    proba = torch.tensor([]).to(device)\n",
    "    with torch.no_grad():\n",
    "        for (data,label) in test_data_loader:\n",
    "            data, label = data.to(device), label.to(device)\n",
    "            prediction = classifier.forward(data)\n",
    "            proba = torch.cat((proba, prediction), 0)\n",
    "\n",
    "    return proba.cpu().numpy()\n",
    "\n",
    "def ToOneVsAll(labels, one_label):\n",
    "    output_labels = np.copy(labels)\n",
    "    for i, y in enumerate(labels):\n",
    "        if y == one_label:\n",
    "            output_labels[i] = 1\n",
    "        else:\n",
    "            output_labels[i] = 0\n",
    "\n",
    "    return output_labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20f1dd2f-0896-42b7-aaa3-b826dc0e8b96",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.nn.modules.activation import Softmax\n",
    "\n",
    "class CNN(nn.Module):\n",
    "    \n",
    "    def __init__(self, in_dim=1024, classes=5, n_filters=256, filter_size=2, hidden=128, dropout_rate=0.5):\n",
    "        super(CNN,self).__init__()\n",
    "        self.conv = nn.Sequential(\n",
    "            nn.Conv1d(1, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv1d(n_filters, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv1d(n_filters, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.BatchNorm1d(n_filters, affine=True),\n",
    "            nn.Dropout(dropout_rate),\n",
    "            nn.Conv1d(n_filters, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv1d(n_filters, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv1d(n_filters, n_filters, filter_size, padding='same'),\n",
    "            nn.ReLU(),\n",
    "            nn.BatchNorm1d(n_filters, affine=True),\n",
    "            nn.Dropout(dropout_rate),\n",
    "            nn.AvgPool1d(in_dim, stride=in_dim)\n",
    "        )\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(n_filters, hidden),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(hidden, classes),\n",
    "            nn.Softmax(dim=1)\n",
    "        )\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.conv(x) \n",
    "        x = torch.flatten(x, 1)\n",
    "        return self.fc(x)\n",
    "            \n",
    "    def __call__(self, x):\n",
    "        #inputs = Variable(torch.FloatTensor(x))\n",
    "        prediction = self.forward(x)\n",
    "        return np.argmax(prediction.data.cpu().numpy(), 1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b3ead05a-5e00-48d3-990e-9b78740d5a73",
   "metadata": {},
   "source": [
    "## 2.3 Train model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cac67f9d-94f2-4d79-809d-e43ada4dcdd9",
   "metadata": {},
   "outputs": [],
   "source": [
    "focal_loss = torch.hub.load(\n",
    "\t'adeelh/pytorch-multi-class-focal-loss',\n",
    "\tmodel='focal_loss',\n",
    "\talpha=torch.tensor([0.25,0.25,0.25, 0.25, 0.25]),\n",
    "\tgamma=2,\n",
    "\treduction='mean',\n",
    "\tdevice=device,\n",
    "\tdtype=torch.float32,\n",
    "\tforce_reload=False\n",
    ")\n",
    "\n",
    "cnn = CNN()\n",
    "cnn.to(device)\n",
    "epochs = 10\n",
    "\n",
    "# criterion = nn.CrossEntropyLoss()\n",
    "# criterion.to(device)\n",
    "optimizer = optim.Adam(cnn.parameters(), lr=1e-3, weight_decay=1e-6)\n",
    "\n",
    "train_loss = []\n",
    "val_accuracy = []\n",
    "val_accuracy = []\n",
    "total_val_loss = []\n",
    "total_test_loss = []\n",
    "\n",
    "for epoch in range(epochs):\n",
    "    cnn.to(device)\n",
    "    cnn.train()  # set network in training mode\n",
    "    epoch_val_loss = []\n",
    "\n",
    "    for (data,labels) in train_dataloader:\n",
    "        # Now train the model using the optimizer and the batch data\n",
    "        # print(data.shape, labels.shape)\n",
    "        data, labels = data.to(device), labels.to(device)\n",
    "        prediction = cnn.forward(data)\n",
    "        labels=labels.to(torch.int64).to(device)\n",
    "        loss = focal_loss(prediction, labels)\n",
    "        epoch_val_loss.append(loss.item())\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "    epoch_mean_val_loss = np.mean(np.array(epoch_val_loss))\n",
    "\n",
    "    cnn.to(device)\n",
    "    cnn.eval()  # set network in evaluation mode\n",
    "    val_acc = test(val_dataloader, cnn)\n",
    "    val_loss = testLoss(val_dataloader, focal_loss, cnn)\n",
    "    total_val_loss.append(val_loss)\n",
    "\n",
    "    print ('Epoch:%d Val Accuracy: %f, train loss: %f, val loss: %f'%(epoch+1, val_acc, epoch_mean_val_loss, val_loss))\n",
    "    \n",
    "torch.save(cnn.state_dict(), Path('./model/focal_loss_interpatient.pt'))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e6264f29-7d2f-4ca0-a823-a9b3f5ae3dcd",
   "metadata": {},
   "source": [
    "# 2.4 Test model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be3b43d0-367a-4e63-a7ea-88b525663e4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.metrics import plot_confusion_matrix\n",
    "from sklearn.metrics import ConfusionMatrixDisplay\n",
    "\n",
    "def test_model(model, test_data_loader, device):\n",
    "    \"\"\"\n",
    "    This function will run test of the model on the test dataset and return \n",
    "        - classification report string (for display purpose)\n",
    "        - dictionary of classification report (for query purpose)\n",
    "        - confusion matrix\n",
    "    \"\"\"\n",
    "    \n",
    "    predictions = []\n",
    "    labels = []\n",
    "    model.to(device)\n",
    "    with torch.no_grad():\n",
    "        for (data,label) in test_data_loader:\n",
    "            data, label = data.to(device), label\n",
    "            predictions += list(model(data))\n",
    "            labels += list(label.numpy())\n",
    "            \n",
    "            # print(predictions[0], labels[0])\n",
    "            # break\n",
    "            \n",
    "    \n",
    "            \n",
    "    predictions = np.array(predictions)\n",
    "    labels = np.array(labels)\n",
    "        \n",
    "    target_names = ['N', 'S', 'V', 'F', 'Q']\n",
    "    report = classification_report(labels, predictions, target_names=target_names, digits=3)\n",
    "    report_dict = classification_report(labels, predictions, target_names=target_names, output_dict=True)\n",
    "    c_matrix = confusion_matrix(labels, predictions)\n",
    "    return report, report_dict, c_matrix\n",
    "\n",
    "cnn = CNN()\n",
    "cnn.load_state_dict(torch.load(Path('./model/focal_loss_interpatient.pt')))\n",
    "cnn.eval()\n",
    "report, report_dict, c_matrix = test_model(cnn, test_dataloader, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c009fd43-1762-48d7-9adb-080465cb657e",
   "metadata": {},
   "outputs": [],
   "source": [
    "print('Ttraining result:\\n', report)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b3c2e17-4323-48cc-993d-3be8b7584b66",
   "metadata": {},
   "outputs": [],
   "source": [
    "import itertools\n",
    "\n",
    "## display confusion matrix\n",
    "display_labels = ['N', 'S', 'V', 'F', 'Q']\n",
    "\n",
    "def plot_confusion_matrix(cm, classes,\n",
    "                          normalize=False,\n",
    "                          title='Confusion matrix',\n",
    "                          cmap=plt.cm.Blues):\n",
    "    \"\"\"\n",
    "    This function prints and plots the confusion matrix.\n",
    "    Normalization can be applied by setting `normalize=True`.\n",
    "    \"\"\"\n",
    "    if normalize:\n",
    "        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
    "        print(\"Normalized confusion matrix\")\n",
    "    else:\n",
    "        print('Confusion matrix, without normalization')\n",
    "\n",
    "    plt.imshow(cm, interpolation='nearest', cmap=cmap)\n",
    "    plt.title(title)\n",
    "    plt.colorbar()\n",
    "    tick_marks = np.arange(len(classes))\n",
    "    plt.xticks(tick_marks, classes, rotation=45)\n",
    "    plt.yticks(tick_marks, classes)\n",
    "\n",
    "    fmt = '.2f' if normalize else 'd'\n",
    "    thresh = cm.max() / 2.\n",
    "    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n",
    "        plt.text(j, i, format(cm[i, j], fmt),\n",
    "                 horizontalalignment=\"center\",\n",
    "                 color=\"white\" if cm[i, j] > thresh else \"black\")\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.ylabel('True label')\n",
    "    plt.xlabel('Predicted label')\n",
    "    plt.show()\n",
    "    plt.clf()\n",
    "    \n",
    "plot_confusion_matrix(c_matrix, display_labels ,\n",
    "                      title='Normalzied Confusion Matrix', normalize=True, cmap='Greys')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "78ebcb58-9836-4bea-a346-1c20000e4c55",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
