{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "2e07f54b",
   "metadata": {},
   "source": [
    "# EDITH: ECG biometric aided by deep learning for reliable individual authentication"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "13076139",
   "metadata": {},
   "source": [
    "# PREPROCESSING"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9b8076f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from scipy.signal import savgol_filter\n",
    "from pykalman import KalmanFilter\n",
    "\n",
    "# Load ECG data from CSV file\n",
    "ecg = np.genfromtxt('MIT_BIH.csv', delimiter=',')\n",
    "\n",
    "# Remove NaN values\n",
    "ecg = ecg[~np.isnan(ecg).any(axis=1)]\n",
    "\n",
    "# Apply Savitzky-Golay filter to remove noise\n",
    "ecg = savgol_filter(ecg, window_length=5, polyorder=2, axis=0)\n",
    "\n",
    "# Apply Kalman filter to remove baseline drift\n",
    "kf = KalmanFilter(n_dim_obs=ecg.shape[1], n_dim_state=ecg.shape[1])\n",
    "ecg = kf.em(ecg).smooth(ecg)[0]\n",
    "\n",
    "# Apply polynomial fitting algorithm to remove baseline wander\n",
    "for i in range(ecg.shape[1]):\n",
    "    p = np.polyfit(np.arange(ecg.shape[0]), ecg[:, i], deg=5)\n",
    "    ecg[:, i] = ecg[:, i] - np.polyval(p, np.arange(ecg.shape[0]))\n",
    "\n",
    "# Save preprocessed ECG data to CSV file\n",
    "np.savetxt('preprocessed_ecg_data1.csv', ecg, delimiter=',')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4517e602",
   "metadata": {},
   "source": [
    "# FEATURE EXTRACTION"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "b1d8b802",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\sakth\\OneDrive\\Desktop\\PYTHON IDE\\lib\\site-packages\\scipy\\signal\\_spectral_py.py:1999: UserWarning: nperseg = 256 is greater than input length  = 11, using nperseg = 11\n",
      "  warnings.warn('nperseg = {0:d} is greater than input length '\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from scipy.stats import kurtosis, skew\n",
    "from scipy.signal import welch\n",
    "\n",
    "# Load the preprocessed dataset\n",
    "preprocessed_data = pd.read_csv('preprocessed_ecg_data1.csv')\n",
    "\n",
    "# Extract ECG signals from the dataset\n",
    "ecg_signals = preprocessed_data.iloc[:, 1:].values  # Assuming ECG signals are in columns 1 to 4\n",
    "\n",
    "# Initialize an empty list to store the extracted features\n",
    "features = []\n",
    "\n",
    "# Iterate over each ECG signal in the dataset\n",
    "for ecg_signal in ecg_signals:\n",
    "    # Statistical features\n",
    "    mean = np.mean(ecg_signal)\n",
    "    std = np.std(ecg_signal)\n",
    "    kurt = kurtosis(ecg_signal)\n",
    "    skewness = skew(ecg_signal)\n",
    "    \n",
    "    # Frequency-domain features\n",
    "    f, psd = welch(ecg_signal)\n",
    "    peak_freq = f[np.argmax(psd)]\n",
    "    total_power = np.sum(psd)\n",
    "    \n",
    "    # Append the features to the list\n",
    "    features.append([mean, std, kurt, skewness, peak_freq, total_power])\n",
    "\n",
    "# Convert the list of features to a NumPy array\n",
    "features = np.array(features)\n",
    "\n",
    "# Save the extracted features to a new CSV file\n",
    "feature_df = pd.DataFrame(features, columns=['mean', 'std', 'kurtosis', 'skewness', 'peak_freq', 'total_power'])\n",
    "feature_df.to_csv('ecg_features9091.csv', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f1e98972",
   "metadata": {},
   "source": [
    "# SEGMENTATION"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "562071fd",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           mean        std  kurtosis  skewness  peak_freq  total_power\n",
      "0     16.399851  16.012640 -0.874172 -0.870161  -0.022438    22.705893\n",
      "1     17.729595  17.464631 -0.667751 -0.909989  -0.022438    24.621334\n",
      "2     17.541390  17.691539  0.054332 -1.021467  -0.022438    21.925744\n",
      "3     15.645330  16.228724  0.052383 -1.011028  -0.022438    19.673023\n",
      "4     15.315998  15.918923 -0.569332 -0.912059  -0.022438    21.793750\n",
      "...         ...        ...       ...       ...        ...          ...\n",
      "4054   0.365603   0.099693  0.497942 -1.092687  -0.022438    -0.053639\n",
      "4055   0.354579   0.091706  0.469110 -1.090322  -0.022438    -0.053721\n",
      "4056   0.351056   0.091502  0.325874 -1.076074  -0.022438    -0.053406\n",
      "4057   0.360545   0.099181  0.299558 -1.073489  -0.022438    -0.053190\n",
      "4058   0.377014   0.110440  0.341809 -1.077281  -0.022438    -0.053011\n",
      "\n",
      "[4059 rows x 6 columns]\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# Load the dataset\n",
    "dataset = pd.read_csv('ecg_features9091.csv')\n",
    "\n",
    "# Select the attributes for segmentation\n",
    "attributes = ['mean', 'std', 'kurtosis', 'skewness', 'peak_freq', 'total_power']\n",
    "segmentation_data = dataset[attributes]\n",
    "\n",
    "# Perform Z-score normalization\n",
    "scaler = StandardScaler()\n",
    "normalized_data = scaler.fit_transform(segmentation_data)\n",
    "\n",
    "# Convert the normalized data back to a DataFrame\n",
    "normalized_df = pd.DataFrame(normalized_data, columns=attributes)\n",
    "\n",
    "# Print the normalized data\n",
    "print(normalized_df)\n",
    "normalized_df.to_csv('segment101.csv', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "604c345a",
   "metadata": {},
   "source": [
    "# MODEL BULIDING"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2cf15ef1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/20\n",
      "102/102 [==============================] - 2s 6ms/step - loss: 0.2571 - accuracy: 0.9794 - val_loss: 0.0405 - val_accuracy: 0.9963\n",
      "Epoch 2/20\n",
      "102/102 [==============================] - 0s 3ms/step - loss: 0.0156 - accuracy: 0.9988 - val_loss: 0.0110 - val_accuracy: 0.9988\n",
      "Epoch 3/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 0.0037 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 0.9988\n",
      "Epoch 4/20\n",
      "102/102 [==============================] - 0s 3ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 0.9988\n",
      "Epoch 5/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 9.6537e-04 - accuracy: 1.0000 - val_loss: 0.0072 - val_accuracy: 0.9988\n",
      "Epoch 6/20\n",
      "102/102 [==============================] - 0s 3ms/step - loss: 6.2767e-04 - accuracy: 1.0000 - val_loss: 0.0072 - val_accuracy: 0.9988\n",
      "Epoch 7/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 4.3665e-04 - accuracy: 1.0000 - val_loss: 0.0072 - val_accuracy: 0.9988\n",
      "Epoch 8/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 3.1739e-04 - accuracy: 1.0000 - val_loss: 0.0072 - val_accuracy: 0.9988\n",
      "Epoch 9/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 2.3762e-04 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 0.9988\n",
      "Epoch 10/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 1.8180e-04 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 0.9988\n",
      "Epoch 11/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 1.4117e-04 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 0.9988\n",
      "Epoch 12/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 1.1116e-04 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 0.9988\n",
      "Epoch 13/20\n",
      "102/102 [==============================] - 0s 3ms/step - loss: 8.8546e-05 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 0.9988\n",
      "Epoch 14/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 7.1373e-05 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 0.9988\n",
      "Epoch 15/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 5.8110e-05 - accuracy: 1.0000 - val_loss: 0.0082 - val_accuracy: 0.9988\n",
      "Epoch 16/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 4.7934e-05 - accuracy: 1.0000 - val_loss: 0.0083 - val_accuracy: 0.9988\n",
      "Epoch 17/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 3.9873e-05 - accuracy: 1.0000 - val_loss: 0.0084 - val_accuracy: 0.9988\n",
      "Epoch 18/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 3.3449e-05 - accuracy: 1.0000 - val_loss: 0.0086 - val_accuracy: 0.9988\n",
      "Epoch 19/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 2.8418e-05 - accuracy: 1.0000 - val_loss: 0.0086 - val_accuracy: 0.9988\n",
      "Epoch 20/20\n",
      "102/102 [==============================] - 0s 4ms/step - loss: 2.4221e-05 - accuracy: 1.0000 - val_loss: 0.0088 - val_accuracy: 0.9988\n",
      "26/26 [==============================] - 0s 2ms/step\n",
      "Accuracy: 0.9938605644398069\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "import pandas as pd\n",
    "from tensorflow import keras\n",
    "\n",
    "# Load the segmented dataset from CSV\n",
    "dataset = pd.read_csv('segment101.csv')\n",
    "\n",
    "# Extract features and labels\n",
    "X = dataset.iloc[:, :5].values  # Adjust the column range according to your dataset\n",
    "y = dataset.iloc[:, 5].values\n",
    "\n",
    "# Set the threshold for converting continuous labels to binary\n",
    "threshold = 0.5\n",
    "\n",
    "# Convert labels to binary based on the threshold\n",
    "y_binary = np.where(y >= threshold, 1, 0)\n",
    "\n",
    "# Split the dataset into training and testing sets\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y_binary, test_size=0.2, random_state=42)\n",
    "\n",
    "# Reshape the data for model input\n",
    "X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\n",
    "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\n",
    "\n",
    "# Define the CNN model\n",
    "model = Sequential()\n",
    "model.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(5, 1)))\n",
    "model.add(MaxPooling1D(pool_size=2))\n",
    "model.add(Flatten())\n",
    "model.add(Dense(64, activation='relu'))\n",
    "model.add(Dense(1, activation='sigmoid'))\n",
    "\n",
    "# Compile the model\n",
    "model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])\n",
    "\n",
    "# Train the model\n",
    "model.fit(X_train, y_train, epochs=20, batch_size=32, validation_data=(X_test, y_test))\n",
    "\n",
    "# Evaluate the model\n",
    "y_pred = model.predict(X_test)\n",
    "y_pred_binary = np.where(y_pred >= threshold, 1, 0)\n",
    "accuracy = (y_test == y_pred_binary).mean()\n",
    "print(\"Accuracy:\", accuracy)\n",
    "# Load the model\n",
    "model.save('cnn_model10100.h5')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6c144c23",
   "metadata": {},
   "source": [
    "# Evaluation metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f6231554",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Precision: 1.0\n",
      "Recall: 0.6666666666666666\n",
      "F1 Score: 0.8\n",
      "Accuracy: 0.9987684729064039\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'fpr' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[16], line 29\u001b[0m\n\u001b[0;32m     27\u001b[0m \u001b[38;5;66;03m# Plot the ROC curve\u001b[39;00m\n\u001b[0;32m     28\u001b[0m plt\u001b[38;5;241m.\u001b[39mfigure(figsize\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m8\u001b[39m, \u001b[38;5;241m6\u001b[39m))\n\u001b[1;32m---> 29\u001b[0m plt\u001b[38;5;241m.\u001b[39mplot(\u001b[43mfpr\u001b[49m, tpr, label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mROC curve (area = \u001b[39m\u001b[38;5;132;01m%0.2f\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;241m%\u001b[39m auc_roc)\n\u001b[0;32m     30\u001b[0m plt\u001b[38;5;241m.\u001b[39mplot([\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m1\u001b[39m], [\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m1\u001b[39m], \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mk--\u001b[39m\u001b[38;5;124m'\u001b[39m)  \u001b[38;5;66;03m# Add the diagonal line\u001b[39;00m\n\u001b[0;32m     31\u001b[0m plt\u001b[38;5;241m.\u001b[39mxlabel(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mFalse Positive Rate\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "\u001b[1;31mNameError\u001b[0m: name 'fpr' is not defined"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<Figure size 800x600 with 0 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.metrics import classification_report, confusion_matrix\n",
    "\n",
    "# Assuming you have loaded the test data and made predictions using the trained model\n",
    "y_true = y_test  # True labels\n",
    "y_pred = y_pred_binary  # Predicted labels\n",
    "\n",
    "# Calculate confusion matrix\n",
    "cm = confusion_matrix(y_true, y_pred)\n",
    "tn, fp, fn, tp = cm.ravel()\n",
    "\n",
    "# Calculate precision, recall, and F1 score\n",
    "precision = tp / (tp + fp)\n",
    "recall = tp / (tp + fn)\n",
    "f1_score = 2 * (precision * recall) / (precision + recall)\n",
    "accuracy = (tp + tn) / (tp + tn + fp + fn)\n",
    "\n",
    "# Print the metrics\n",
    "print(\"Precision:\", precision)\n",
    "print(\"Recall:\", recall)\n",
    "print(\"F1 Score:\", f1_score)\n",
    "print(\"Accuracy:\", accuracy)\n",
    "\n",
    "# Plot ROC curve\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# Plot the ROC curve\n",
    "plt.figure(figsize=(8, 6))\n",
    "plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % auc_roc)\n",
    "plt.plot([0, 1], [0, 1], 'k--')  # Add the diagonal line\n",
    "plt.xlabel('False Positive Rate')\n",
    "plt.ylabel('True Positive Rate')\n",
    "plt.title('Receiver Operating Characteristic (ROC) Curve')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8eed706e",
   "metadata": {},
   "source": [
    "# AUTHENTICATION"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "e1feba9e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1/1 [==============================] - 0s 194ms/step\n",
      "Authentication successful\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from keras.models import load_model\n",
    "\n",
    "# Load the pre-trained model\n",
    "model = load_model(\"cnn_model10100.h5\")\n",
    "\n",
    "# Load the segmented dataset from CSV\n",
    "segmented_data = pd.read_csv(\"one_segment.csv\")\n",
    "\n",
    "# Adjust the shape of the segmented data\n",
    "segmented_data = segmented_data.values  # Convert to numpy array\n",
    "segmented_data = np.reshape(segmented_data[:, :5], (segmented_data.shape[0], 5, 1))  # Reshape to (n_samples, 5, 1)\n",
    "\n",
    "# Feed the reshaped data to the model\n",
    "prediction = model.predict(segmented_data)\n",
    "\n",
    "# Make the authentication decision\n",
    "threshold = 0.5  # Define your threshold\n",
    "is_authenticated = np.all(prediction > threshold)\n",
    ".\n",
    "\n",
    "# Print the authentication decision\n",
    "if is_authenticated:\n",
    "    print(\"Authentication successful\")\n",
    "else:\n",
    "    print(\"Authentication failed\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0f69c147",
   "metadata": {},
   "source": [
    "# GUI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "fa1a4561",
   "metadata": {},
   "outputs": [
    {
     "ename": "SystemExit",
     "evalue": "0",
     "output_type": "error",
     "traceback": [
      "An exception has occurred, use %tb to see the full traceback.\n",
      "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\sakth\\OneDrive\\Desktop\\PYTHON IDE\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3516: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n",
      "  warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QFileDialog\n",
    "from PyQt5.QtCore import Qt\n",
    "import numpy as np\n",
    "from scipy.signal import savgol_filter\n",
    "from pykalman import KalmanFilter\n",
    "import pandas as pd\n",
    "from PyQt5.QtGui import QPixmap\n",
    "\n",
    "from scipy.stats import kurtosis, skew\n",
    "from scipy.signal import welch\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from keras.models import load_model\n",
    "from PyQt5.QtGui import QMovie\n",
    "from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QFileDialog\n",
    "from PyQt5.QtCore import Qt\n",
    "from PyQt5.QtGui import QFont\n",
    "from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QVBoxLayout, QWidget, QSizePolicy\n",
    "import sys\n",
    "from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel\n",
    "from PyQt5.QtCore import Qt\n",
    "from PyQt5.QtGui import QPixmap, QMovie\n",
    "from PyQt5.QtWidgets import QMessageBox\n",
    "class ECGPreprocessingApp(QMainWindow):\n",
    "    def __init__(self):\n",
    "        super(ECGPreprocessingApp, self).__init__()\n",
    "        # Set background GIF\n",
    "        \n",
    "        self.setWindowTitle(\"ECG Preprocessing App\")\n",
    "        self.setGeometry(110, 110, 2000, 1250)\n",
    "        self.setStyleSheet(\"QMainWindow::title {font-size: 50pt;}\")# Set the font size for the window's title bar\n",
    "\n",
    "        # Set background GIF\n",
    "        self.background_label = QLabel(self)\n",
    "        self.background_label.setGeometry(0, 0, self.width(), self.height())\n",
    "        self.background_label.setScaledContents(True)  # Scale the image to fit the label\n",
    "        self.movie = QMovie(\"70bm.gif\")\n",
    "        self.movie.setScaledSize(self.size())  # Set the size of the GIF to match the window size\n",
    "        self.background_label.setMovie(self.movie)\n",
    "        self.movie.start()\n",
    "            # Set background GIF\n",
    "        #self.setStyleSheet(\"background-image: url(ekg-heart-rate.gif); background-repeat: no-repeat; background-position: center;\")\n",
    "\n",
    "          # Set background color\n",
    "        self.setStyleSheet(\"background-color: white;\")\n",
    "        # Create labels\n",
    "        self.status_label = QLabel(\"ECG AUTHENTICATION\", self)\n",
    "        self.status_label.setGeometry(800, 10, 300, 30)\n",
    "        self.status_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(15)  # Set the desired font size\n",
    "        self.status_label.setFont(font)\n",
    "        \n",
    "        self.status_label = QLabel(\"No file selected.\", self)\n",
    "        self.status_label.setGeometry(800, 50, 300, 30)\n",
    "        self.status_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.status_label.setFont(font)\n",
    "\n",
    "        self.preprocessing_label = QLabel(\"Preprocessing: Not completed\", self)\n",
    "        self.preprocessing_label.setGeometry(800, 90, 300, 30)\n",
    "        self.preprocessing_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.preprocessing_label.setFont(font)\n",
    "        \n",
    "        self.feature_extraction_label = QLabel(\"Feature Extraction: Not completed\", self)\n",
    "        self.feature_extraction_label.setGeometry(800, 130, 300, 30)\n",
    "        self.feature_extraction_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.feature_extraction_label.setFont(font)\n",
    "        \n",
    "        self.segmentation_label = QLabel(\"Segmentation: Not completed\", self)\n",
    "        self.segmentation_label.setGeometry(800, 170, 300, 30)\n",
    "        self.segmentation_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.segmentation_label.setFont(font)\n",
    "        \n",
    "        self.model_building_label = QLabel(\"Model Building: Not completed\", self)\n",
    "        self.model_building_label.setGeometry(800, 210, 300, 30)\n",
    "        self.model_building_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.model_building_label.setFont(font)\n",
    "        \n",
    "        self.authentication_label = QLabel(\"Authentication: Not completed\", self)\n",
    "        self.authentication_label.setGeometry(800, 250, 300, 30)\n",
    "        self.authentication_label.setAlignment(Qt.AlignCenter)\n",
    "        font = QFont()\n",
    "        font.setPointSize(10)  # Set the desired font size\n",
    "        self.authentication_label.setFont(font)\n",
    "\n",
    "         # Create buttons\n",
    "        self.select_file_button = QPushButton(\"Select File\", self)\n",
    "        self.select_file_button.setGeometry(900, 340, 150, 50)\n",
    "        self.select_file_button.clicked.connect(self.select_file)\n",
    "        self.select_file_button.setFont(QFont(\"Arial\", 10))\n",
    "\n",
    "        self.preprocessing_button = QPushButton(\"Preprocessing\", self)\n",
    "        self.preprocessing_button.setGeometry(900, 400, 150, 50)\n",
    "        self.preprocessing_button.setEnabled(False)\n",
    "        self.preprocessing_button.clicked.connect(self.preprocessing)\n",
    "        self.preprocessing_button.setFont(QFont(\"Arial\", 10))\n",
    "\n",
    "        self.feature_extraction_button = QPushButton(\"Feature Extraction\", self)\n",
    "        self.feature_extraction_button.setGeometry(900, 460, 150, 50)\n",
    "        self.feature_extraction_button.setEnabled(False)\n",
    "        self.feature_extraction_button.clicked.connect(self.feature_extraction)\n",
    "        self.feature_extraction_button.setFont(QFont(\"Arial\", 10))\n",
    "\n",
    "        self.segmentation_button = QPushButton(\"Segmentation\", self)\n",
    "        self.segmentation_button.setGeometry(900, 520, 150, 50)\n",
    "        self.segmentation_button.setEnabled(False)\n",
    "        self.segmentation_button.clicked.connect(self.segmentation)\n",
    "        self.segmentation_button.setFont(QFont(\"Arial\", 10))\n",
    "\n",
    "        self.model_building_button = QPushButton(\"Model Building\", self)\n",
    "        self.model_building_button.setGeometry(900, 580, 150, 50)\n",
    "        self.model_building_button.setEnabled(False)\n",
    "        self.model_building_button.clicked.connect(self.model_building)\n",
    "        self.model_building_button.setFont(QFont(\"Arial\", 10))\n",
    "\n",
    "        self.authentication_button = QPushButton(\"Authentication\", self)\n",
    "        self.authentication_button.setGeometry(900, 640, 150, 50)\n",
    "        self.authentication_button.setEnabled(False)\n",
    "        self.authentication_button.clicked.connect(self.authenticate)\n",
    "        self.authentication_button.setFont(QFont(\"Arial\", 10))\n",
    "        \n",
    "        self.is_authenticated = False\n",
    "        \n",
    "\n",
    "    def select_file(self):\n",
    "        file_dialog = QFileDialog()\n",
    "        file_path, _ = file_dialog.getOpenFileName(self, \"Select ECG File\", \"\", \"CSV Files (*.csv)\")\n",
    "\n",
    "        if file_path:\n",
    "            self.status_label.setText(\"File selected: {}\".format(file_path))\n",
    "            self.file_path = file_path\n",
    "            self.preprocessing_button.setEnabled(True)\n",
    "\n",
    "    def preprocessing(self):\n",
    "        # Load ECG data from CSV file\n",
    "        ecg = np.genfromtxt(self.file_path, delimiter=',')\n",
    "\n",
    "        # Remove NaN values\n",
    "        ecg = ecg[~np.isnan(ecg).any(axis=1)]\n",
    "\n",
    "        # Apply Savitzky-Golay filter to remove noise\n",
    "        ecg = savgol_filter(ecg, window_length=5, polyorder=2, axis=0)\n",
    "\n",
    "        # Apply Kalman filter to remove baseline drift\n",
    "        kf = KalmanFilter(n_dim_obs=ecg.shape[1], n_dim_state=ecg.shape[1])\n",
    "        ecg = kf.em(ecg).smooth(ecg)[0]\n",
    "\n",
    "        # Apply polynomial fitting algorithm to remove baseline wander\n",
    "        for i in range(ecg.shape[1]):\n",
    "            p = np.polyfit(np.arange(ecg.shape[0]), ecg[:, i], deg=5)\n",
    "            ecg[:, i] = ecg[:, i] - np.polyval(p, np.arange(ecg.shape[0]))\n",
    "\n",
    "        # Save preprocessed ECG data to CSV file\n",
    "        np.savetxt('preprocessed_ecg_data.csv', ecg, delimiter=',')\n",
    "\n",
    "        self.preprocessing_label.setText(\"Preprocessing: Completed\")\n",
    "        self.feature_extraction_button.setEnabled(True)\n",
    "\n",
    "    def feature_extraction(self):\n",
    "        # Load the preprocessed dataset\n",
    "        preprocessed_data = pd.read_csv('preprocessed_ecg_data.csv')\n",
    "\n",
    "        # Extract ECG signals from the dataset\n",
    "        ecg_signals = preprocessed_data.iloc[:, 1:].values  # Assuming ECG signals are in columns 1 to 4\n",
    "\n",
    "        # Initialize an empty list to store the extracted features\n",
    "        features = []\n",
    "\n",
    "        # Iterate over each ECG signal in the dataset\n",
    "        for ecg_signal in ecg_signals:\n",
    "            # Statistical features\n",
    "            mean = np.mean(ecg_signal)\n",
    "            std = np.std(ecg_signal)\n",
    "            kurt = kurtosis(ecg_signal)\n",
    "            skewness = skew(ecg_signal)\n",
    "\n",
    "            # Frequency-domain features\n",
    "            f, psd = welch(ecg_signal)\n",
    "            peak_freq = f[np.argmax(psd)]\n",
    "            total_power = np.sum(psd)\n",
    "\n",
    "            # Append the features to the list\n",
    "            features.append([mean, std, kurt, skewness, peak_freq, total_power])\n",
    "\n",
    "        # Convert the list of features to a NumPy array\n",
    "        features = np.array(features)\n",
    "\n",
    "        # Save the extracted features to a new CSV file\n",
    "        feature_df = pd.DataFrame(features, columns=['mean', 'std', 'kurtosis', 'skewness', 'peak_freq', 'total_power'])\n",
    "        feature_df.to_csv('ecg_features.csv', index=False)\n",
    "\n",
    "        self.feature_extraction_label.setText(\"Feature Extraction: Completed\")\n",
    "        self.segmentation_button.setEnabled(True)\n",
    "\n",
    "    def segmentation(self):\n",
    "        # Load the dataset\n",
    "        dataset = pd.read_csv('ecg_features.csv')\n",
    "\n",
    "        # Select the attributes for segmentation\n",
    "        attributes = ['mean', 'std', 'kurtosis', 'skewness', 'peak_freq', 'total_power']\n",
    "        segmentation_data = dataset[attributes]\n",
    "\n",
    "        # Perform Z-score normalization\n",
    "        scaler = StandardScaler()\n",
    "        normalized_data = scaler.fit_transform(segmentation_data)\n",
    "\n",
    "        # Convert the normalized data back to a DataFrame\n",
    "        normalized_df = pd.DataFrame(normalized_data, columns=attributes)\n",
    "\n",
    "        # Save the normalized data to a new CSV file\n",
    "        normalized_df.to_csv('segmented_data.csv', index=False)\n",
    "\n",
    "        self.segmentation_label.setText(\"Segmentation: Completed\")\n",
    "        self.model_building_button.setEnabled(True)\n",
    "\n",
    "    def model_building(self):\n",
    "        # Load the segmented dataset from CSV\n",
    "        segmented_data = pd.read_csv(\"one_segment.csv\")\n",
    "\n",
    "        # Load the pre-trained model\n",
    "        model = load_model(\"cnn_model10100.h5\")\n",
    "\n",
    "        # Adjust the shape of the segmented data\n",
    "        segmented_data = segmented_data.values  # Convert to numpy array\n",
    "        segmented_data = np.reshape(segmented_data[:, :5], (segmented_data.shape[0], 5, 1))  # Reshape to (n_samples, 5, 1)\n",
    "\n",
    "        # Feed the reshaped data to the model\n",
    "        prediction = model.predict(segmented_data)\n",
    "\n",
    "        # Make the authentication decision\n",
    "        threshold = 0.5  # Define your threshold\n",
    "        self.is_authenticated = np.all(prediction > threshold)\n",
    "\n",
    "        self.model_building_label.setText(\"Model Building: Completed\")\n",
    "        self.authentication_button.setEnabled(True)\n",
    "\n",
    "    def authenticate(self):\n",
    "        if self.is_authenticated:\n",
    "            authentication_result = \"Authentication successful\"\n",
    "        else:\n",
    "            authentication_result = \"Authentication failed\"\n",
    "\n",
    "        msg_box = QMessageBox()\n",
    "        msg_box.setText(authentication_result)\n",
    "        msg_box.exec_()\n",
    "       # self.authenticate_button.clicked.connect(self.authenticate)\n",
    "if __name__ == '__main__':\n",
    "    \n",
    "    \n",
    "    \n",
    "    app = QApplication(sys.argv)\n",
    "    window = ECGPreprocessingApp()\n",
    "    window.show()\n",
    "    sys.exit(app.exec_())\n",
    "\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ca1bc4d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
