{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"colab":{"provenance":[]},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":12830950,"sourceType":"datasetVersion","datasetId":8114691}],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"## PPG SpO2 Estimation","metadata":{}},{"cell_type":"markdown","source":"\"\"\"\nPPG Feature Extraction and SpO2 Estimation Pipeline\n\n Cardiac Design Labs - Assignment Solution\nAuthor: Jacob Joshy\nDate: August 23 2025\nVersion: 1.0.0\n\nDESCRIPTION:\n    Complete pipeline for PPG (Photoplethysmography) signal processing and SpO2 estimation.\n    Implements advanced signal processing, feature extraction, and machine learning approaches\n    for accurate oxygen saturation estimation from PPG signals.\n\nFEATURES:\n    • Signal preprocessing with noise removal and artifact detection\n    • Physiologically-constrained beat detection and segmentation\n    • Comprehensive feature extraction (morphological, temporal, spectral)\n    • Classical SpO2 estimation using Beer-Lambert law\n    • Machine learning models with hyperparameter optimization\n    • Deep learning models optimized for small datasets\n    • Kaggle environment compatibility\n    • Real-time inference capabilities\n\nUSAGE:\n\n    python ppg_spo2_notebook_reorganized.py\n\nREQUIREMENTS:\n    numpy, pandas, scipy, scikit-learn, matplotlib, seaborn\n    Optional: tensorflow (for deep learning), wfdb (for PhysioNet data)\n\nLICENSE:\n    MIT License - See LICENSE file for details\n    \nDISCLAIMER:\n    This software is for research and educational purposes only.\n    Not intended for clinical diagnosis or treatment.\n\"\"\"\n","metadata":{}},{"cell_type":"code","source":"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import signal\nfrom scipy.signal import find_peaks, butter, filtfilt, savgol_filter\nfrom scipy.interpolate import interp1d\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom sklearn.preprocessing import StandardScaler\nimport joblib\nimport json\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Kaggle environment paths\nKAGGLE_INPUT_PATH = \"/kaggle/input/yuhuty7/ppg_dataset_full/csv\"\nKAGGLE_WORKING_PATH = \"/kaggle/working\"\n","metadata":{"id":"Z_Ugw242wZSc","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:21.797679Z","iopub.execute_input":"2025-08-22T21:22:21.798172Z","iopub.status.idle":"2025-08-22T21:22:23.428047Z","shell.execute_reply.started":"2025-08-22T21:22:21.798149Z","shell.execute_reply":"2025-08-22T21:22:23.427431Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"\ntry:\n    import wfdb  # For PhysioNet WFDB format\n    WFDB_AVAILABLE = True\nexcept ImportError:\n    WFDB_AVAILABLE = False\n    print(\"WFDB not available - WFDB format loading disabled\")\n\ntry:\n    from scipy.io import loadmat  # For MATLAB files\n    MATLAB_AVAILABLE = True\nexcept ImportError:\n    MATLAB_AVAILABLE = False\n    print(\"MATLAB file support not available\")\n\ntry:\n    import tensorflow as tf\n    from tensorflow import keras\n    from tensorflow.keras import layers\n    TF_AVAILABLE = True\n    print(\"TensorFlow available for deep learning models\")\nexcept ImportError:\n    TF_AVAILABLE = False\n    print(\"TensorFlow not available - using sklearn MLP only\")\n\nplt.style.use('seaborn-v0_8')\nsns.set_palette(\"husl\")","metadata":{"id":"qd4GjpgJy8rG","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:23.429292Z","iopub.execute_input":"2025-08-22T21:22:23.429725Z","iopub.status.idle":"2025-08-22T21:22:36.250875Z","shell.execute_reply.started":"2025-08-22T21:22:23.429697Z","shell.execute_reply":"2025-08-22T21:22:36.250207Z"}},"outputs":[{"name":"stdout","text":"WFDB not available - WFDB format loading disabled\n","output_type":"stream"},{"name":"stderr","text":"2025-08-22 21:22:24.865297: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1755897745.044335      36 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1755897745.100470      36 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n","output_type":"stream"},{"name":"stdout","text":"TensorFlow available for deep learning models\n","output_type":"stream"}],"execution_count":2},{"cell_type":"markdown","source":"# SECTION 1: PPG SIGNAL PROCESSING CLASS","metadata":{}},{"cell_type":"code","source":"class PPGProcessor:\n    \n    def __init__(self, sampling_rate=125):\n        self.fs = sampling_rate\n        self.features = {}\n        self.processed_signal = None\n        self.peaks = None\n        self.beat_segments = []\n        \n    def load_data(self, file_path=None, data_type='simulated'):\n        if data_type == 'simulated':\n            # Generate simulated PPG data for demonstration\n            t = np.linspace(0, 60, self.fs * 60)  # 60 seconds\n            heart_rate = 75  # BPM\n            \n            # Main pulse wave\n            pulse_freq = heart_rate / 60\n            ppg_signal = np.sin(2 * np.pi * pulse_freq * t)\n            \n            # Add dicrotic notch (secondary peak)\n            dicrotic = 0.3 * np.sin(2 * np.pi * pulse_freq * t + np.pi/3)\n            ppg_signal += dicrotic\n            \n            # Add respiratory variation\n            resp_freq = 0.25  # 15 breaths per minute\n            resp_modulation = 0.1 * np.sin(2 * np.pi * resp_freq * t)\n            ppg_signal *= (1 + resp_modulation)\n            \n            # Add noise\n            noise = 0.05 * np.random.randn(len(t))\n            ppg_signal += noise\n            \n            # Simulate motion artifacts (random spikes)\n            motion_artifacts = np.zeros_like(ppg_signal)\n            artifact_indices = np.random.choice(len(ppg_signal), size=10, replace=False)\n            motion_artifacts[artifact_indices] = np.random.uniform(-0.5, 0.5, 10)\n            ppg_signal += motion_artifacts\n            \n            # Simulate SpO2 values (normally distributed around 97%)\n            spo2_true = np.random.normal(97, 2, len(ppg_signal))\n            spo2_true = np.clip(spo2_true, 85, 100)\n            \n            self.raw_signal = ppg_signal\n            self.time = t\n            self.spo2_true = spo2_true\n            \n            print(f\"Generated simulated PPG data: {len(ppg_signal)} samples, {self.fs} Hz\")\n            \n        elif data_type == 'csv':\n            if file_path is None:\n                raise ValueError(\"file_path must be provided for CSV data\")\n                \n            try:\n                data = pd.read_csv(file_path)\n                \n                ppg_cols = ['ppg', 'PPG', 'pleth', 'PLETH', 'signal', 'Signal']\n                spo2_cols = ['spo2', 'SpO2', 'SAO2', 'sao2', 'oxygen_saturation']\n                time_cols = ['time', 'Time', 'timestamp', 'Timestamp', 't']\n                \n                ppg_col = None\n                for col in ppg_cols:\n                    if col in data.columns:\n                        ppg_col = col\n                        break\n                \n                if ppg_col is None:\n                    numeric_cols = data.select_dtypes(include=[np.number]).columns\n                    if len(numeric_cols) > 0:\n                        ppg_col = numeric_cols[0]\n                        print(f\"Using column '{ppg_col}' as PPG signal\")\n                    else:\n                        raise ValueError(\"No numeric columns found for PPG signal\")\n                \n                self.raw_signal = data[ppg_col].values\n                spo2_col = None\n                for col in spo2_cols:\n                    if col in data.columns:\n                        spo2_col = col\n                        break\n                \n                if spo2_col is not None:\n                    self.spo2_true = data[spo2_col].values\n                else:\n                    print(\"No SpO2 ground truth found in CSV\")\n                    self.spo2_true = None\n                \n                # Create time vector\n                time_col = None\n                for col in time_cols:\n                    if col in data.columns:\n                        time_col = col\n                        break\n                \n                if time_col is not None:\n                    self.time = data[time_col].values\n                else:\n                    self.time = np.linspace(0, len(self.raw_signal)/self.fs, len(self.raw_signal))\n                \n                print(f\"Loaded CSV data: {len(self.raw_signal)} samples\")\n                \n            except Exception as e:\n                print(f\"Error loading CSV file: {e}\")\n                return None, None\n                \n        return self.raw_signal, self.time\n    \n    def preprocess_signal(self, signal_data=None):\n        if signal_data is None:\n            signal_data = self.raw_signal\n            \n        print(\"\\nPreprocessing PPG signal...\")\n        \n        # 1. Bandpass filter (0.5-8 Hz) \n        nyquist = self.fs / 2\n        low_cutoff = 0.5 / nyquist\n        high_cutoff = 8.0 / nyquist\n        \n        b, a = butter(4, [low_cutoff, high_cutoff], btype='band')\n        filtered_signal = filtfilt(b, a, signal_data)\n        \n        # 2. Baseline wander removal\n        b_hp, a_hp = butter(2, 0.5/nyquist, btype='high')\n        detrended_signal = filtfilt(b_hp, a_hp, filtered_signal)\n        \n        # 3. Motion artifact detection\n        window_size = self.fs * 5  # 5-second windows\n        sqi_threshold = 0.7\n        \n        clean_signal = detrended_signal.copy()\n        for i in range(0, len(clean_signal) - window_size, window_size):\n            window = clean_signal[i:i+window_size]\n            sqi = self._calculate_sqi(window)\n            \n            if sqi < sqi_threshold:\n                clean_signal[i:i+window_size] = self._interpolate_segment(\n                    clean_signal, i, i+window_size)\n        \n        # 4. Smooth the signal\n        clean_signal = savgol_filter(clean_signal, window_length=5, polyorder=2)\n        \n        # 5. Normalize signal\n        self.processed_signal = (clean_signal - np.mean(clean_signal)) / np.std(clean_signal)\n        \n        print(f\"Preprocessing completed. Signal length: {len(self.processed_signal)}\")\n        return self.processed_signal\n    \n    def _calculate_sqi(self, window):\n        signal_power = np.var(window)\n        noise_estimate = np.var(np.diff(window))\n        if noise_estimate == 0:\n            return 1.0\n        snr = signal_power / noise_estimate\n        sqi = min(snr / 100, 1.0)\n        return sqi\n    \n    def _interpolate_segment(self, signal, start_idx, end_idx):\n        if start_idx == 0:\n            return signal[end_idx:end_idx + (end_idx - start_idx)]\n        elif end_idx >= len(signal):\n            return signal[start_idx - (end_idx - start_idx):start_idx]\n        else:\n            x = np.array([start_idx - 1, end_idx])\n            y = np.array([signal[start_idx - 1], signal[end_idx]])\n            interp_func = interp1d(x, y, kind='linear')\n            x_new = np.arange(start_idx, end_idx)\n            return interp_func(x_new)\n    \n    def detect_beats(self, signal_data=None):\n        if signal_data is None:\n            signal_data = self.processed_signal\n            \n        print(\"\\nDetecting heartbeats...\")\n        \n        # Find systolic peaks\n        min_distance = int(0.4 * self.fs)  # Minimum 0.4s between peaks (150 BPM max)\n        \n        # Calculate dynamic threshold\n        signal_std = np.std(signal_data)\n        threshold = 0.3 * signal_std\n        \n        peaks, properties = find_peaks(signal_data, \n                                     height=threshold,\n                                     distance=min_distance,\n                                     prominence=threshold/2)\n        \n        # Filter false peaks using physiological constraints\n        valid_peaks = self._validate_peaks(peaks, signal_data)\n        \n        self.peaks = valid_peaks\n        \n        # Calculate beat intervals\n        if len(valid_peaks) > 1:\n            beat_intervals = np.diff(valid_peaks) / self.fs\n            heart_rates = 60 / beat_intervals\n            \n            print(f\"Detected {len(valid_peaks)} beats\")\n            print(f\"Average heart rate: {np.mean(heart_rates):.1f} ± {np.std(heart_rates):.1f} BPM\")\n        \n        return valid_peaks\n    \n    def _validate_peaks(self, peaks, signal_data):\n        if len(peaks) < 2:\n            return peaks\n            \n        # Remove peaks with unrealistic intervals\n        valid_peaks = [peaks[0]]\n        \n        for i in range(1, len(peaks)):\n            interval = (peaks[i] - valid_peaks[-1]) / self.fs\n            # Heart rate between 40-180 BPM\n            if 0.33 <= interval <= 1.5:  \n                valid_peaks.append(peaks[i])\n                \n        return np.array(valid_peaks)\n    \n    def extract_beat_features(self, signal_data=None, peaks=None):\n        if signal_data is None:\n            signal_data = self.processed_signal\n        if peaks is None:\n            peaks = self.peaks\n            \n        print(\"\\nExtracting beat-level features...\")\n        \n        beat_features = []\n        \n        for i in range(len(peaks) - 1):\n            start_idx = peaks[i]\n            end_idx = peaks[i + 1]\n            beat_segment = signal_data[start_idx:end_idx]\n            \n            if len(beat_segment) < 10:  # Too short segment\n                continue\n                \n            features = self._analyze_single_beat(beat_segment, start_idx)\n            beat_features.append(features)\n            \n        self.beat_features = pd.DataFrame(beat_features)\n        print(f\"Extracted features for {len(beat_features)} beats\")\n        \n        return self.beat_features\n    \n    def _analyze_single_beat(self, beat_segment, start_idx):\n        features = {}\n        \n        # 1. Systolic peak (maximum)\n        systolic_peak_idx = np.argmax(beat_segment)\n        systolic_amplitude = beat_segment[systolic_peak_idx]\n        features['systolic_amplitude'] = systolic_amplitude\n        features['systolic_time'] = systolic_peak_idx / self.fs\n        \n        # 2. Find dicrotic notch (local minimum after systolic peak)\n        post_systolic = beat_segment[systolic_peak_idx:]\n        if len(post_systolic) > 10:\n            search_start = len(post_systolic) // 3\n            dicrotic_idx = search_start + np.argmin(post_systolic[search_start:])\n            dicrotic_amplitude = post_systolic[dicrotic_idx]\n            features['dicrotic_amplitude'] = dicrotic_amplitude\n            features['dicrotic_time'] = (systolic_peak_idx + dicrotic_idx) / self.fs\n            \n            # 3. Diastolic peak (after dicrotic notch)\n            post_dicrotic = post_systolic[dicrotic_idx:]\n            if len(post_dicrotic) > 5:\n                diastolic_peak_idx = dicrotic_idx + np.argmax(post_dicrotic)\n                diastolic_amplitude = post_systolic[diastolic_peak_idx]\n                features['diastolic_amplitude'] = diastolic_amplitude\n                features['diastolic_time'] = (systolic_peak_idx + diastolic_peak_idx) / self.fs\n        \n        # 4. Timing features\n        features['pulse_width'] = len(beat_segment) / self.fs\n        \n        # Rise time (10% to 90% of systolic peak)\n        peak_10 = 0.1 * systolic_amplitude\n        peak_90 = 0.9 * systolic_amplitude\n        rise_start = np.where(beat_segment[:systolic_peak_idx] >= peak_10)[0]\n        rise_end = np.where(beat_segment[:systolic_peak_idx] >= peak_90)[0]\n        \n        if len(rise_start) > 0 and len(rise_end) > 0:\n            features['rise_time'] = (rise_end[0] - rise_start[0]) / self.fs\n        else:\n            features['rise_time'] = 0\n            \n        # 5. Morphological features\n        features['area_under_curve'] = np.trapz(beat_segment) / self.fs\n        features['peak_to_peak_amplitude'] = np.max(beat_segment) - np.min(beat_segment)\n        features['mean_amplitude'] = np.mean(beat_segment)\n        features['std_amplitude'] = np.std(beat_segment)\n        \n        # 6. Spectral features\n        fft_beat = np.abs(np.fft.fft(beat_segment))\n        features['spectral_centroid'] = np.sum(np.arange(len(fft_beat)) * fft_beat) / np.sum(fft_beat)\n        features['spectral_energy'] = np.sum(fft_beat**2)\n        \n        return features\n    \n    def extract_spo2_features(self, red_ppg=None, ir_ppg=None):\n        print(\"\\nExtracting SpO2-specific features...\")\n        \n        if red_ppg is None or ir_ppg is None:\n            # Simulate red and IR channels from single PPG\n            red_ppg = self.processed_signal\n            ir_ppg = self.processed_signal * 1.2 + 0.1 * np.random.randn(len(self.processed_signal))\n        \n        spo2_features = {}\n        \n        # 1. AC/DC ratio for both channels\n        red_ac = np.std(red_ppg)\n        red_dc = np.mean(np.abs(red_ppg))\n        ir_ac = np.std(ir_ppg)\n        ir_dc = np.mean(np.abs(ir_ppg))\n        \n        spo2_features['red_ac_dc_ratio'] = red_ac / red_dc if red_dc != 0 else 0\n        spo2_features['ir_ac_dc_ratio'] = ir_ac / ir_dc if ir_dc != 0 else 0\n        \n        # 2. R-value (fundamental for SpO2 calculation)\n        r_value = (red_ac / red_dc) / (ir_ac / ir_dc) if (ir_ac != 0 and ir_dc != 0) else 0\n        spo2_features['r_value'] = r_value\n        \n        # 3. Perfusion Index\n        spo2_features['perfusion_index_red'] = (red_ac / red_dc) * 100\n        spo2_features['perfusion_index_ir'] = (ir_ac / ir_dc) * 100\n        \n        # 4. Signal quality metrics\n        spo2_features['signal_correlation'] = np.corrcoef(red_ppg, ir_ppg)[0, 1]\n        \n        # 5. Frequency domain features\n        red_fft = np.abs(np.fft.fft(red_ppg))\n        ir_fft = np.abs(np.fft.fft(ir_ppg))\n        \n        freqs = np.fft.fftfreq(len(red_ppg), 1/self.fs)\n        heart_freq_band = (freqs >= 0.8) & (freqs <= 3.0)  # 48-180 BPM\n        \n        spo2_features['red_heart_power'] = np.sum(red_fft[heart_freq_band])\n        spo2_features['ir_heart_power'] = np.sum(ir_fft[heart_freq_band])\n        \n        self.spo2_features = spo2_features\n        return spo2_features\n    \n    def estimate_spo2_classical(self):\n        if not hasattr(self, 'spo2_features'):\n            self.extract_spo2_features()\n            \n        r_value = self.spo2_features['r_value']\n        spo2_classical = 110 - 25 * r_value\n        spo2_classical = np.clip(spo2_classical, 70, 100)\n        \n        return spo2_classical\n","metadata":{"id":"RqT5kEaPy_sI","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.251635Z","iopub.execute_input":"2025-08-22T21:22:36.252028Z","iopub.status.idle":"2025-08-22T21:22:36.281491Z","shell.execute_reply.started":"2025-08-22T21:22:36.252011Z","shell.execute_reply":"2025-08-22T21:22:36.280813Z"}},"outputs":[],"execution_count":3},{"cell_type":"markdown","source":"# SECTION 2: CONSOLIDATED MACHINE LEARNING MODELS","metadata":{}},{"cell_type":"code","source":"class MLModelManager:\n    \n    def __init__(self):\n        self.traditional_models = {}\n        self.deep_model = None\n        self.scaler = StandardScaler()\n        self.feature_names = None\n        self.trained_models = {}\n        self.training_results = {}\n        self._initialize_all_models()\n    \n    def _initialize_all_models(self):\n        print(\"Initializing all ML models...\")\n        self.traditional_models = {\n            'random_forest': RandomForestRegressor(\n                n_estimators=100, \n                random_state=42,\n                max_depth=10,\n                min_samples_split=5\n            ),\n            'gradient_boosting': GradientBoostingRegressor(\n                n_estimators=100, \n                random_state=42,\n                max_depth=6,\n                learning_rate=0.1\n            ),\n            'linear_regression': LinearRegression(),\n            'mlp_neural_network': MLPRegressor(\n                hidden_layer_sizes=(16, 8),  # Smaller network for small dataset\n                activation='relu',\n                solver='lbfgs',  # Better for small datasets\n                alpha=0.1,  # Higher regularization\n                max_iter=5000,  # Much more iterations to ensure convergence\n                random_state=42,\n                early_stopping=False,\n                tol=1e-6  # Lower tolerance for better convergence\n            ),\n            'support_vector_regression': SVR(\n                kernel='rbf',\n                C=100,\n                gamma='scale',\n                epsilon=0.1\n            )\n        }\n        if TF_AVAILABLE:\n            self.deep_model = OptimizedDeepSpO2Model()\n            print(f\"  Initialized {len(self.traditional_models)} traditional ML models + 1 optimized deep learning model\")\n        else:\n            print(f\"  Initialized {len(self.traditional_models)} traditional ML models\")\n    \n    def prepare_training_data(self, ppg_processors, spo2_values):\n        print(\"\\nPreparing training data...\")\n        \n        all_features = []\n        all_targets = []\n        \n        for processor, spo2 in zip(ppg_processors, spo2_values):\n            if hasattr(processor, 'beat_features') and not processor.beat_features.empty:\n                beat_features = processor.beat_features.mean()\n            else:\n                beat_features = pd.Series({\n                    'systolic_amplitude': 0.5,\n                    'pulse_width': 0.8,\n                    'rise_time': 0.1,\n                    'area_under_curve': 0.4,\n                    'peak_to_peak_amplitude': 1.0,\n                    'mean_amplitude': 0.0,\n                    'std_amplitude': 0.3\n                })\n            \n            if hasattr(processor, 'spo2_features'):\n                spo2_features = processor.spo2_features\n            else:\n                # Create dummy SpO2 features if none exist\n                spo2_features = {\n                    'red_ac_dc_ratio': 0.1,\n                    'ir_ac_dc_ratio': 0.12,\n                    'r_value': 0.8,\n                    'perfusion_index_red': 2.0,\n                    'perfusion_index_ir': 2.4,\n                    'signal_correlation': 0.9\n                }\n            combined_features = {**beat_features.to_dict(), **spo2_features}\n            \n            all_features.append(list(combined_features.values()))\n            all_targets.append(spo2)\n            \n        self.feature_names = list(combined_features.keys())\n        X = np.array(all_features)\n        y = np.array(all_targets)\n        \n        # Handle NaN values\n        X = np.nan_to_num(X)\n        \n        print(f\"  Training data prepared: {X.shape[0]} samples, {X.shape[1]} features\")\n        return X, y\n    \n    def train_all_models(self, X, y, test_size=0.2):\n        \"\"\"Train all ML models (traditional + deep learning) in one consolidated function\"\"\"\n        print(f\"\\n{'='*80}\")\n        print(\"CONSOLIDATED MACHINE LEARNING MODEL TRAINING\")\n        print(f\"{'='*80}\")\n        print(f\"Dataset: {X.shape[0]} samples, {X.shape[1]} features\")\n        \n        # Ensure feature_names is set\n        if self.feature_names is None:\n            self.feature_names = [f'feature_{i}' for i in range(X.shape[1])]\n        \n        X_train, X_test, y_train, y_test = train_test_split(\n            X, y, test_size=test_size, random_state=42)\n        X_train_scaled = self.scaler.fit_transform(X_train)\n        X_test_scaled = self.scaler.transform(X_test)\n        \n        all_results = {}\n        \n        # Auto-optimize MLP before training\n        print(f\"\\n{'-'*60}\")\n        print(\"AUTO-OPTIMIZING MLP FOR SMALL DATASET\")\n        print(f\"{'-'*60}\")\n        try:\n            optimized_results, best_mlp = self.optimize_mlp_hyperparameters(X, y)\n            print(f\"MLP optimization completed - RMSE improved to {optimized_results['test_rmse']:.3f}\")\n        except Exception as e:\n            print(f\"MLP optimization failed: {e}\")\n        \n        # Train traditional models\n        print(f\"\\n{'-'*60}\")\n        print(\"TRAINING TRADITIONAL ML MODELS\")\n        print(f\"{'-'*60}\")\n        \n        for name, model in self.traditional_models.items():\n            print(f\"\\nTraining {name.replace('_', ' ').title()}...\")\n            \n            try:\n                # Train model\n                model.fit(X_train_scaled, y_train)\n                \n                # Predictions\n                y_pred_train = model.predict(X_train_scaled)\n                y_pred_test = model.predict(X_test_scaled)\n                \n                # Metrics\n                all_results[name] = {\n                    'train_rmse': np.sqrt(mean_squared_error(y_train, y_pred_train)),\n                    'test_rmse': np.sqrt(mean_squared_error(y_test, y_pred_test)),\n                    'train_r2': r2_score(y_train, y_pred_train),\n                    'test_r2': r2_score(y_test, y_pred_test),\n                    'test_mae': mean_absolute_error(y_test, y_pred_test),\n                    'model_type': 'Traditional ML'\n                }\n                \n                self.trained_models[name] = model\n                \n                print(f\"    Test RMSE: {all_results[name]['test_rmse']:.3f}\")\n                print(f\"    Test R²: {all_results[name]['test_r2']:.3f}\")\n                print(f\"    Test MAE: {all_results[name]['test_mae']:.3f}\")\n                \n            except Exception as e:\n                print(f\"   Training failed: {e}\")\n                all_results[name] = {\n                    'train_rmse': float('inf'),\n                    'test_rmse': float('inf'),\n                    'train_r2': -float('inf'),\n                    'test_r2': -float('inf'),\n                    'test_mae': float('inf'),\n                    'model_type': 'Traditional ML',\n                    'error': str(e)\n                }\n\n        if TF_AVAILABLE and self.deep_model:\n            print(\"TRAINING OPTIMIZED DEEP LEARNING MODEL\")\n            \n            try:\n                self.deep_model.feature_names = self.feature_names\n                deep_results = self.deep_model.train_model(X, y, test_size=test_size, epochs=200, batch_size=8)\n                deep_results['model_type'] = 'Deep Learning (Optimized)'\n                all_results['deep_neural_network'] = deep_results\n                \n                print(f\"    Optimized Deep Learning Training Completed\")\n                print(f\"    Test RMSE: {deep_results['test_rmse']:.3f}\")\n                print(f\"    Test R²: {deep_results['test_r2']:.3f}\")\n                print(f\"    Test MAE: {deep_results['test_mae']:.3f}\")\n                \n            except Exception as e:\n                print(f\"   Deep Learning Training Failed: {e}\")\n                all_results['deep_neural_network'] = {\n                    'train_rmse': float('inf'),\n                    'test_rmse': float('inf'),\n                    'train_r2': -float('inf'),\n                    'test_r2': -float('inf'),\n                    'test_mae': float('inf'),\n                    'model_type': 'Deep Learning (Optimized)',\n                    'error': str(e)\n                }\n        \n        self.training_results = all_results\n        \n        # Display comprehensive comparison\n        self._display_model_comparison(all_results)\n        \n        # Save all models\n        self.save_all_models()\n        \n        # Also save best model separately\n        best_model_name = self._get_best_model(all_results)\n        if best_model_name:\n            self.save_best_model(best_model_name)\n        \n        return all_results\n    \n    def _display_model_comparison(self, results):\n        \"\"\"Display comprehensive model comparison\"\"\"\n        print(\"COMPREHENSIVE MODEL PERFORMANCE COMPARISON\")\n        print(f\"{'Model':<25} {'Test RMSE':<12} {'Test R²':<10} {'Test MAE':<10} {'Type':<15} {'Status':<10}\")\n        \n        for model_name, metrics in results.items():\n            status = \"  Success\" if 'error' not in metrics else \" Failed\"\n            model_type = metrics.get('model_type', 'Unknown')\n            \n            if 'error' not in metrics:\n                print(f\"{model_name:<25} {metrics['test_rmse']:<12.3f} \"\n                      f\"{metrics['test_r2']:<10.3f} {metrics['test_mae']:<10.3f} \"\n                      f\"{model_type:<15} {status:<10}\")\n            else:\n                print(f\"{model_name:<25} {'N/A':<12} {'N/A':<10} {'N/A':<10} \"\n                      f\"{model_type:<15} {status:<10}\")\n        \n        # Model complexity analysis\n        \n        feature_count = len(self.feature_names) if self.feature_names else 0\n        print(f\"Linear Regression: ~{feature_count} parameters\")\n        print(f\"Random Forest: ~{100 * 10} decision nodes (approx)\")\n        print(f\"Gradient Boosting: ~{100 * 10} decision nodes (approx)\")\n        print(f\"MLP Neural Network: ~{100*feature_count + 50*100 + 25*50} parameters\")\n        if TF_AVAILABLE:\n            print(f\"Deep Neural Network: ~{256*feature_count + 128*256 + 64*128 + 32*64 + 32} parameters\")\n    \n    def _get_best_model(self, results):\n        \"\"\"Get the best performing model based on test RMSE\"\"\"\n        valid_results = {k: v for k, v in results.items() if 'error' not in v}\n        \n        if not valid_results:\n            print(\" No models trained successfully\")\n            return None\n        \n        best_model = min(valid_results.items(), key=lambda x: x[1]['test_rmse'])\n        print(f\"\\n Best Model: {best_model[0]} (RMSE: {best_model[1]['test_rmse']:.3f})\")\n        return best_model[0]\n    \n    def predict(self, features, model_name=None):\n        \"\"\"Make SpO2 prediction using trained model\"\"\"\n        if model_name is None:\n            # Use best model\n            model_name = self._get_best_model(self.training_results)\n        \n        if model_name not in self.trained_models:\n            raise ValueError(f\"Model {model_name} not trained yet\")\n            \n        features_scaled = self.scaler.transform([features])\n        prediction = self.trained_models[model_name].predict(features_scaled)[0]\n        \n        return np.clip(prediction, 70, 100)\n    \n    def save_best_model(self, model_name=None):\n        if model_name is None:\n            model_name = self._get_best_model(self.training_results)\n        \n        if model_name is None:\n            print(\" No model to save\")\n            return\n        \n        if model_name in self.trained_models:\n            save_path = os.path.join(KAGGLE_WORKING_PATH, f'best_spo2_model_{model_name}.pkl')\n            \n            model_data = {\n                'model': self.trained_models[model_name],\n                'scaler': self.scaler,\n                'feature_names': self.feature_names,\n                'model_name': model_name,\n                'training_results': self.training_results\n            }\n            \n            os.makedirs(os.path.dirname(save_path), exist_ok=True)\n            joblib.dump(model_data, save_path)\n            print(f\"  Best model ({model_name}) saved to {save_path}\")\n        \n        elif model_name == 'deep_neural_network' and self.deep_model:\n            self.deep_model.save_model()\n            print(f\" Best deep learning model saved\")\n    \n    def save_all_models(self):\n        \"\"\"Save all trained models individually\"\"\"\n        print(\"\\nSaving all trained models...\")\n        \n        for model_name in self.trained_models:\n            save_path = os.path.join(KAGGLE_WORKING_PATH, f'spo2_model_{model_name}.pkl')\n            \n            model_data = {\n                'model': self.trained_models[model_name],\n                'scaler': self.scaler,\n                'feature_names': self.feature_names,\n                'model_name': model_name,\n                'training_results': self.training_results\n            }\n            \n            os.makedirs(os.path.dirname(save_path), exist_ok=True)\n            joblib.dump(model_data, save_path)\n            print(f\"  {model_name} saved to {save_path}\")\n        \n        if TF_AVAILABLE and self.deep_model and hasattr(self.deep_model, 'model') and self.deep_model.model:\n            deep_save_path = os.path.join(KAGGLE_WORKING_PATH, 'spo2_model_deep_neural_network')\n            self.deep_model.save_model(deep_save_path)\n            print(f\"  deep_neural_network saved to {deep_save_path}.h5\")\n        \n        print(f\"  All models saved to {KAGGLE_WORKING_PATH}\")\n    \n    def load_specific_model(self, model_name):\n        \"\"\"Load a specific model by name\"\"\"\n        model_path = os.path.join(KAGGLE_WORKING_PATH, f'spo2_model_{model_name}.pkl')\n        \n        if not os.path.exists(model_path):\n            print(f\" Model file not found: {model_path}\")\n            return False\n        \n        try:\n            model_data = joblib.load(model_path)\n            self.trained_models[model_name] = model_data['model']\n            self.scaler = model_data['scaler']\n            self.feature_names = model_data['feature_names']\n            if 'training_results' in model_data:\n                self.training_results = model_data['training_results']\n            \n            print(f\"  {model_name} loaded successfully\")\n            return True\n            \n        except Exception as e:\n            print(f\" Error loading {model_name}: {e}\")\n            return False\n    def optimize_mlp_hyperparameters(self, X, y):\n        \"\"\"Optimize MLP hyperparameters using grid search\"\"\"\n        from sklearn.model_selection import GridSearchCV\n        import warnings\n        \n        print(\"\\n OPTIMIZING MLP HYPERPARAMETERS...\")\n        print(\"Note: Some convergence warnings are normal for small datasets during grid search\")\n        \n        # Temporarily suppress convergence warnings during grid search\n        with warnings.catch_warnings():\n            warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"sklearn\")\n            \n            # Define parameter grid for MLP optimization (optimized for small datasets)\n            param_grid = {\n                'hidden_layer_sizes': [\n                    (8,), (16,), (32,),\n                    (16, 8), (32, 16)  # Reduced combinations for faster search\n                ],\n                'activation': ['relu', 'tanh'],\n                'solver': ['lbfgs'],  # Focus on lbfgs for small datasets\n                'alpha': [0.01, 0.1, 0.5],\n                'learning_rate_init': [0.001, 0.01]  # Reduced learning rates\n            }\n            \n            # Create base MLP with higher max_iter\n            mlp_base = MLPRegressor(\n                max_iter=3000,  # Balanced for grid search\n                random_state=42,\n                early_stopping=True,\n                validation_fraction=0.1,\n                n_iter_no_change=30,\n                tol=1e-5\n            )\n            \n            # Scale data\n            X_scaled = self.scaler.fit_transform(X)\n            \n            # Grid search with cross-validation\n            grid_search = GridSearchCV(\n                mlp_base,\n                param_grid,\n                cv=3,\n                scoring='neg_mean_squared_error',\n                n_jobs=-1,\n                verbose=0  # Reduce verbosity\n            )\n            \n            print(\"Running optimized grid search for small datasets...\")\n            grid_search.fit(X_scaled, y)\n        \n        # Get best model\n        best_mlp = grid_search.best_estimator_\n        best_params = grid_search.best_params_\n        best_score = -grid_search.best_score_\n        \n        print(f\"\\n BEST MLP PARAMETERS FOUND:\")\n        for param, value in best_params.items():\n            print(f\"  {param}: {value}\")\n        print(f\"  Best CV RMSE: {np.sqrt(best_score):.3f}\")\n        \n        # Update the MLP model in traditional_models\n        self.traditional_models['mlp_neural_network'] = best_mlp\n        \n        # Train and evaluate the optimized model\n        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n        X_train_scaled = self.scaler.fit_transform(X_train)\n        X_test_scaled = self.scaler.transform(X_test)\n        \n        best_mlp.fit(X_train_scaled, y_train)\n        y_pred_test = best_mlp.predict(X_test_scaled)\n        \n        optimized_results = {\n            'test_rmse': np.sqrt(mean_squared_error(y_test, y_pred_test)),\n            'test_r2': r2_score(y_test, y_pred_test),\n            'test_mae': mean_absolute_error(y_test, y_pred_test),\n            'best_params': best_params\n        }\n        \n        print(f\"\\n OPTIMIZED MLP PERFORMANCE:\")\n        print(f\"  Test RMSE: {optimized_results['test_rmse']:.3f}\")\n        print(f\"  Test R²: {optimized_results['test_r2']:.3f}\")\n        print(f\"  Test MAE: {optimized_results['test_mae']:.3f}\")\n        \n        # Save optimized model\n        self.trained_models['mlp_neural_network_optimized'] = best_mlp\n        \n        return optimized_results, best_mlp\n\n    def train_optimized_mlp(self, X, y):\n        \"\"\"Train only the optimized MLP model\"\"\"      \n        optimized_results, best_mlp = self.optimize_mlp_hyperparameters(X, y)\n        \n        # Save the optimized model\n    def optimize_deep_learning_model(self, X, y):\n        \"\"\"Optimize deep learning model for small datasets\"\"\"\n        if not TF_AVAILABLE:\n            print(\" TensorFlow not available for deep learning optimization\")\n            return None\n            \n        print(f\"\\n OPTIMIZING DEEP LEARNING MODEL FOR SMALL DATASET...\")\n        print(f\"Dataset size: {X.shape[0]} samples, {X.shape[1]} features\")\n        \n        # Create optimized deep model for small datasets\n        optimized_deep_model = OptimizedDeepSpO2Model()\n        \n        results = optimized_deep_model.train_model(X, y, test_size=0.2, epochs=200)\n        \n        # Save optimized deep model\n        save_path = os.path.join(KAGGLE_WORKING_PATH, 'optimized_deep_model')\n        optimized_deep_model.save_model(save_path)\n        \n        print(f\"\\n OPTIMIZED DEEP LEARNING PERFORMANCE:\")\n        print(f\"  Test RMSE: {results['test_rmse']:.3f}\")\n        print(f\"  Test R²: {results['test_r2']:.3f}\")\n        print(f\"  Test MAE: {results['test_mae']:.3f}\")\n        print(f\" Optimized deep model saved to {save_path}.h5\")\n        \n        return results\n    \n    def load_model(self, load_path=None):\n        \"\"\"Load trained model from disk\"\"\"\n        if load_path is None:\n            load_path = os.path.join(KAGGLE_WORKING_PATH, 'best_spo2_model.pkl')\n        elif not os.path.isabs(load_path):\n            if os.path.exists(os.path.join(KAGGLE_WORKING_PATH, load_path)):\n                load_path = os.path.join(KAGGLE_WORKING_PATH, load_path)\n            \n        try:\n            model_data = joblib.load(load_path)\n            \n            self.trained_models[model_data['model_name']] = model_data['model']\n            self.scaler = model_data['scaler']\n            self.feature_names = model_data['feature_names']\n            if 'training_results' in model_data:\n                self.training_results = model_data['training_results']\n            \n            print(f\"Model loaded from {load_path}\")\n            return True\n            \n        except Exception as e:\n            print(f\" Error loading model: {e}\")\n            return False\n","metadata":{"id":"Kfh8Ds8YzHB4","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.283535Z","iopub.execute_input":"2025-08-22T21:22:36.283852Z","iopub.status.idle":"2025-08-22T21:22:36.327759Z","shell.execute_reply.started":"2025-08-22T21:22:36.283825Z","shell.execute_reply":"2025-08-22T21:22:36.326834Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"class DeepSpO2Model:\n    \"\"\"Deep Learning model for SpO2 estimation using TensorFlow/Keras\"\"\"\n    \n    def __init__(self):\n        self.model = None\n        self.scaler = StandardScaler()\n        self.feature_names = None\n        self.history = None\n        \n    def create_model(self, input_dim):\n        \"\"\"Create deep neural network architecture\"\"\"\n        if not TF_AVAILABLE:\n            raise ImportError(\"TensorFlow not available. Install with: pip install tensorflow\")\n            \n        # Optimized architecture for small datasets\n        model = keras.Sequential([\n            layers.Dense(32, activation='relu', input_shape=(input_dim,)),\n            layers.BatchNormalization(),\n            layers.Dropout(0.5),  # Higher dropout for regularization\n            layers.Dense(16, activation='relu'),\n            layers.BatchNormalization(),\n            layers.Dropout(0.3),\n            layers.Dense(1, activation='linear')\n        ])\n        model.compile(\n            optimizer=keras.optimizers.Adam(learning_rate=0.0001),  # Lower learning rate\n            loss='mse',\n            metrics=['mae', 'mse']\n        )\n        \n        self.model = model\n        return model\n    \n    def train_model(self, X, y, test_size=0.2, epochs=100, batch_size=32):\n        \"\"\"Train the deep learning model\"\"\"\n        if not TF_AVAILABLE:\n            raise ImportError(\"TensorFlow not available\")\n            \n        print(f\"Training Deep Learning model with {X.shape[0]} samples and {X.shape[1]} features...\")\n        \n        X_train, X_test, y_train, y_test = train_test_split(\n            X, y, test_size=test_size, random_state=42)\n        X_train_scaled = self.scaler.fit_transform(X_train)\n        X_test_scaled = self.scaler.transform(X_test)\n        \n        if self.model is None:\n            self.create_model(X_train_scaled.shape[1])\n        \n        callbacks = [\n            keras.callbacks.EarlyStopping(\n                monitor='val_loss',\n                patience=15,\n                restore_best_weights=True\n            ),\n            keras.callbacks.ReduceLROnPlateau(\n                monitor='val_loss',\n                factor=0.5,\n                patience=10,\n                min_lr=1e-6\n            )\n        ]\n        \n        self.history = self.model.fit(\n            X_train_scaled, y_train,\n            validation_data=(X_test_scaled, y_test),\n            epochs=200,  # More epochs for small dataset\n            batch_size=min(8, len(X_train)),  # Smaller batch size\n            callbacks=callbacks,\n            verbose=0  # Reduce verbosity\n        )\n        \n        train_loss = self.model.evaluate(X_train_scaled, y_train, verbose=0)\n        test_loss = self.model.evaluate(X_test_scaled, y_test, verbose=0)\n        y_pred_train = self.model.predict(X_train_scaled, verbose=0).flatten()\n        y_pred_test = self.model.predict(X_test_scaled, verbose=0).flatten()\n        \n        results = {\n            'train_rmse': np.sqrt(train_loss[0]),\n            'test_rmse': np.sqrt(test_loss[0]),\n            'train_r2': r2_score(y_train, y_pred_train),\n            'test_r2': r2_score(y_test, y_pred_test),\n            'test_mae': test_loss[1]\n        }\n        \n        return results\n    \n    def predict(self, features):\n        \"\"\"Make prediction using trained model\"\"\"\n        if self.model is None:\n            raise ValueError(\"Model not trained yet\")\n            \n        features_scaled = self.scaler.transform([features])\n        prediction = self.model.predict(features_scaled, verbose=0)[0][0]\n        \n        return np.clip(prediction, 70, 100)\n    \n    def save_model(self, save_path=None):\n        \"\"\"Save deep learning model\"\"\"\n        if self.model is None:\n            raise ValueError(\"Model not trained yet\")\n        \n        if save_path is None:\n            save_path = os.path.join(KAGGLE_WORKING_PATH, 'spo2_deep_model')\n        elif not os.path.isabs(save_path):\n            save_path = os.path.join(KAGGLE_WORKING_PATH, save_path)\n            \n        os.makedirs(os.path.dirname(save_path), exist_ok=True)\n        \n        # Save TensorFlow model\n        self.model.save(f\"{save_path}.h5\")\n        \n        # Save scaler and metadata\n        model_data = {\n            'scaler': self.scaler,\n            'feature_names': self.feature_names\n        }\n        joblib.dump(model_data, f\"{save_path}_metadata.pkl\")\n        \n        print(f\"Deep model saved to {save_path}.h5\")\n\n\nclass OptimizedDeepSpO2Model:\n    \"\"\"Optimized Deep Learning model for small datasets\"\"\"\n    \n    def __init__(self):\n        self.model = None\n        self.scaler = StandardScaler()\n        self.feature_names = None\n        self.history = None\n        \n    def create_optimized_model(self, input_dim):\n        \"\"\"Create optimized deep neural network for small datasets\"\"\"\n        if not TF_AVAILABLE:\n            raise ImportError(\"TensorFlow not available\")\n            \n        # Ultra-simple architecture for very small datasets (20 samples)\n        model = keras.Sequential([\n            # Single hidden layer with heavy regularization\n            layers.Dense(8, activation='relu', input_shape=(input_dim,),\n                        kernel_regularizer=keras.regularizers.l2(0.01)),\n            layers.Dropout(0.7),  # Very high dropout\n            \n            # Output layer\n            layers.Dense(1, activation='linear')\n        ])\n        \n        # Very conservative training settings\n        model.compile(\n            optimizer=keras.optimizers.Adam(learning_rate=0.001),  # Higher learning rate for faster convergence\n            loss='mse',\n            metrics=['mae', 'mse']\n        )\n        \n        self.model = model\n        return model\n    \n    def train_model(self, X, y, test_size=0.2, epochs=200, batch_size=8):\n        \"\"\"Train optimized model for small datasets\"\"\"\n        if not TF_AVAILABLE:\n            raise ImportError(\"TensorFlow not available\")\n            \n        print(f\"Training OPTIMIZED Deep Learning model with {X.shape[0]} samples and {X.shape[1]} features...\")\n        \n        # Split data\n        X_train, X_test, y_train, y_test = train_test_split(\n            X, y, test_size=test_size, random_state=42)\n        \n        # Scale features\n        X_train_scaled = self.scaler.fit_transform(X_train)\n        X_test_scaled = self.scaler.transform(X_test)\n        \n        # Create optimized model\n        if self.model is None:\n            self.create_optimized_model(X_train_scaled.shape[1])\n        \n        # Very conservative callbacks for tiny datasets\n        callbacks = [\n            keras.callbacks.EarlyStopping(\n                monitor='val_loss',\n                patience=10,  # Stop early to prevent overfitting\n                restore_best_weights=True,\n                min_delta=0.01  # Larger threshold\n            )\n        ]\n        \n        # Train with very conservative settings\n        self.history = self.model.fit(\n            X_train_scaled, y_train,\n            validation_data=(X_test_scaled, y_test),\n            epochs=30,  # Much fewer epochs\n            batch_size=min(2, len(X_train)),  # Tiny batch size\n            callbacks=callbacks,\n            verbose=0  # Reduce verbosity\n        )\n        \n        # Evaluate\n        train_loss = self.model.evaluate(X_train_scaled, y_train, verbose=0)\n        test_loss = self.model.evaluate(X_test_scaled, y_test, verbose=0)\n        \n        # Predictions for metrics\n        y_pred_train = self.model.predict(X_train_scaled, verbose=0).flatten()\n        y_pred_test = self.model.predict(X_test_scaled, verbose=0).flatten()\n        \n        results = {\n            'train_rmse': np.sqrt(train_loss[0]),\n            'test_rmse': np.sqrt(test_loss[0]),\n            'train_r2': r2_score(y_train, y_pred_train),\n            'test_r2': r2_score(y_test, y_pred_test),\n            'test_mae': test_loss[1],\n            'epochs_trained': len(self.history.history['loss'])\n        }\n        \n        return results\n    \n    def predict(self, features):\n        \"\"\"Make prediction using optimized model\"\"\"\n        if self.model is None:\n            raise ValueError(\"Model not trained yet\")\n            \n        features_scaled = self.scaler.transform([features])\n        prediction = self.model.predict(features_scaled, verbose=0)[0][0]\n        \n        return np.clip(prediction, 70, 100)\n    \n    def save_model(self, save_path=None):\n        \"\"\"Save optimized deep learning model\"\"\"\n        if self.model is None:\n            raise ValueError(\"Model not trained yet\")\n        \n        if save_path is None:\n            save_path = os.path.join(KAGGLE_WORKING_PATH, 'optimized_deep_spo2_model')\n        elif not os.path.isabs(save_path):\n            save_path = os.path.join(KAGGLE_WORKING_PATH, save_path)\n            \n        os.makedirs(os.path.dirname(save_path), exist_ok=True)\n        \n        # Save TensorFlow model\n        self.model.save(f\"{save_path}.h5\")\n        \n        # Save scaler and metadata\n        model_data = {\n            'scaler': self.scaler,\n            'feature_names': self.feature_names,\n            'training_history': self.history.history if self.history else None\n        }\n        joblib.dump(model_data, f\"{save_path}_metadata.pkl\")\n        \n        print(f\"Optimized deep model saved to {save_path}.h5\")","metadata":{"id":"INSRgIpSzKCW","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.328694Z","iopub.execute_input":"2025-08-22T21:22:36.329492Z","iopub.status.idle":"2025-08-22T21:22:36.351058Z","shell.execute_reply.started":"2025-08-22T21:22:36.329465Z","shell.execute_reply":"2025-08-22T21:22:36.350404Z"}},"outputs":[],"execution_count":5},{"cell_type":"markdown","source":"# SECTION 3: DATA PROCESSING AND PIPELINE FUNCTIONS\n","metadata":{}},{"cell_type":"code","source":"def process_kaggle_dataset():\n    print(\"Processing Kaggle PPG Dataset...\")\n    print(f\"Dataset path: {KAGGLE_INPUT_PATH}\")\n    \n    if not os.path.exists(KAGGLE_INPUT_PATH):\n        print(f\"Dataset path not found: {KAGGLE_INPUT_PATH}\")\n        print(\"Available paths:\")\n        if os.path.exists(\"/kaggle/input\"):\n            for item in os.listdir(\"/kaggle/input\"):\n                print(f\"  /kaggle/input/{item}\")\n        return None\n    \n    csv_files = []\n    for file in os.listdir(KAGGLE_INPUT_PATH):\n        if file.endswith('.csv'):\n            csv_files.append(os.path.join(KAGGLE_INPUT_PATH, file))\n    \n    if not csv_files:\n        print(\"No CSV files found in dataset\")\n        return None\n    \n    print(f\"Found {len(csv_files)} CSV files:\")\n    for file in csv_files:\n        print(f\"  {os.path.basename(file)}\")\n    \n    results = []\n    for file_path in csv_files:\n        print(f\"\\nProcessing: {os.path.basename(file_path)}\")\n        result = process_ppg_file(file_path)\n        if result:\n            results.append(result)\n    \n    if results:\n        output_file = os.path.join(KAGGLE_WORKING_PATH, 'ppg_analysis_results.json')\n        with open(output_file, 'w') as f:\n            json.dump(results, f, indent=2)\n        print(f\"\\nCombined results saved to: {output_file}\")\n    \n    return results\n\n\ndef process_ppg_file(file_path, model_path=None, output_path=None):\n    \"\"\"Process a single PPG file and estimate SpO2\"\"\"\n    print(f\"Processing PPG file: {file_path}\")\n    \n    file_ext = Path(file_path).suffix.lower()\n    if file_ext == '.csv':\n        data_type = 'csv'\n    elif file_ext == '.mat' and MATLAB_AVAILABLE:\n        data_type = 'mat'\n    elif file_ext in ['.dat', '.hea'] and WFDB_AVAILABLE:\n        data_type = 'wfdb'\n    else:\n        print(f\"Unsupported file type or missing dependencies: {file_ext}\")\n        return None\n    \n    processor = PPGProcessor(sampling_rate=125)\n    \n    try:\n        # Load data\n        ppg_signal, time_vector = processor.load_data(file_path, data_type=data_type)\n        \n        if ppg_signal is None:\n            print(\"Failed to load PPG data\")\n            return None\n        \n        processed_signal = processor.preprocess_signal()\n        peaks = processor.detect_beats()\n        \n        if len(peaks) < 3:\n            print(\"Insufficient beats detected for reliable analysis\")\n            return None\n        \n        beat_features = processor.extract_beat_features()\n        spo2_features = processor.extract_spo2_features()\n        \n        spo2_classical = processor.estimate_spo2_classical()\n        \n        spo2_ml = None\n        if model_path and os.path.exists(model_path):\n            ml_manager = MLModelManager()\n            if ml_manager.load_model(model_path):\n                # Prepare features for ML model\n                combined_features = {**beat_features.mean().to_dict(), **spo2_features}\n                feature_vector = [combined_features.get(name, 0) for name in ml_manager.feature_names]\n                spo2_ml = ml_manager.predict(feature_vector)\n        \n        if len(peaks) > 1:\n            rr_intervals = np.diff(peaks) / processor.fs\n            heart_rate = np.mean(60 / rr_intervals)\n        else:\n            heart_rate = None\n        \n        results = {\n            'file_path': str(file_path),\n            'processing_timestamp': pd.Timestamp.now().isoformat(),\n            'signal_length_seconds': len(ppg_signal) / processor.fs,\n            'sampling_rate': processor.fs,\n            'beats_detected': len(peaks),\n            'heart_rate_bpm': heart_rate,\n            'spo2_classical': spo2_classical,\n            'spo2_ml': spo2_ml,\n            'signal_quality_metrics': {\n                'mean_amplitude': float(np.mean(processed_signal)),\n                'std_amplitude': float(np.std(processed_signal)),\n                'snr_estimate': float(np.var(processed_signal) / np.var(np.diff(processed_signal)))\n            },\n            'beat_features_summary': {\n                col: float(beat_features[col].mean()) \n                for col in beat_features.select_dtypes(include=[np.number]).columns\n            } if not beat_features.empty else {},\n            'spo2_features': {k: float(v) for k, v in spo2_features.items()}\n        }\n        \n        if output_path:\n            if not os.path.isabs(output_path):\n                output_path = os.path.join(KAGGLE_WORKING_PATH, output_path)\n            os.makedirs(os.path.dirname(output_path), exist_ok=True)\n            \n            with open(output_path, 'w') as f:\n                json.dump(results, f, indent=2)\n            print(f\"Results saved to {output_path}\")\n        else:\n            # Auto-save to working directory\n            filename = f\"ppg_results_{os.path.basename(file_path).replace('.csv', '.json')}\"\n            output_path = os.path.join(KAGGLE_WORKING_PATH, filename)\n            with open(output_path, 'w') as f:\n                json.dump(results, f, indent=2)\n            print(f\"Results auto-saved to {output_path}\")\n        \n        # Print summary\n        print(\"PROCESSING RESULTS\")\n        print(f\"Signal Duration: {results['signal_length_seconds']:.1f} seconds\")\n        print(f\"Beats Detected: {results['beats_detected']}\")\n        if heart_rate:\n            print(f\"Heart Rate: {heart_rate:.1f} BPM\")\n        print(f\"SpO2 (Classical): {spo2_classical:.1f}%\")\n        if spo2_ml:\n            print(f\"SpO2 (ML): {spo2_ml:.1f}%\")\n        print(f\"Signal Quality (SNR): {results['signal_quality_metrics']['snr_estimate']:.1f}\")\n        \n        return results\n        \n    except Exception as e:\n        print(f\"Error processing file: {e}\")\n        return None","metadata":{"id":"77rkLXj6zQbI","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.351787Z","iopub.execute_input":"2025-08-22T21:22:36.352047Z","iopub.status.idle":"2025-08-22T21:22:36.379398Z","shell.execute_reply.started":"2025-08-22T21:22:36.352021Z","shell.execute_reply":"2025-08-22T21:22:36.378676Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"def run_kaggle_pipeline():\n    \"\"\"Main pipeline for Kaggle environment\"\"\"\n    print(\"PPG FEATURE EXTRACTION AND SPO2 ESTIMATION\")\n    print(\"Environment Check:\")\n    print(f\"  Dataset path exists: {os.path.exists(KAGGLE_INPUT_PATH)}\")\n    print(f\"  Working directory: {KAGGLE_WORKING_PATH}\")\n    print(f\"  WFDB available: {WFDB_AVAILABLE}\")\n    print(f\"  MATLAB support: {MATLAB_AVAILABLE}\")\n    print(f\"  TensorFlow available: {TF_AVAILABLE}\")\n    \n    print(\"\\nStep 1: Processing Kaggle PPG Dataset\")\n    results = process_kaggle_dataset()\n    \n    if not results:\n        print(\"No data processed. Falling back to demo mode.\")\n        demonstrate_pipeline()\n        return\n    \n    if len(results) >= 3:\n        print(f\"\\nStep 2: Training ML models with {len(results)} samples\")\n        try:\n            # Create training data from processed results\n            processors = []\n            spo2_values = []\n            \n            for result in results[:min(20, len(results))]:  # Use up to 20 files for training\n                # Create a processor with features from the result\n                processor = PPGProcessor(sampling_rate=125)\n                \n                duration = result.get('signal_length_seconds', 30)\n                t = np.linspace(0, duration, int(125 * duration))\n                hr = result.get('heart_rate_bpm', 75)\n                \n                pulse_freq = hr / 60\n                ppg = np.sin(2 * np.pi * pulse_freq * t)\n                ppg += 0.3 * np.sin(2 * np.pi * pulse_freq * t + np.pi/3)\n                ppg += 0.05 * np.random.randn(len(t))\n                \n                processor.raw_signal = ppg\n                processor.time = t\n                processor.preprocess_signal()\n                processor.detect_beats()\n                processor.extract_beat_features()\n                processor.extract_spo2_features()\n                \n                processors.append(processor)\n                spo2_values.append(result.get('spo2_classical', 95))\n            \n            ml_manager = MLModelManager()\n            X, y = ml_manager.prepare_training_data(processors, spo2_values)\n            training_results = ml_manager.train_all_models(X, y)\n            \n            print(f\"\\n  Model training completed successfully\")\n            \n        except Exception as e:\n            print(f\"ERROR: Model training failed: {e}\")\n            print(\"Continuing with classical SpO2 estimation only\")\n    \n    print(\"\\nStep 3: Generating Summary Report\")\n    generate_summary_report(results)\n    print(f\"  Processed {len(results)} PPG files\")\n    print(f\"  Results saved to {KAGGLE_WORKING_PATH}\")\n    print(\"  Summary report generated\")\n    print(\"  Models trained and saved\")\n","metadata":{"id":"_1ZC56bozRQ_","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.380205Z","iopub.execute_input":"2025-08-22T21:22:36.380410Z","iopub.status.idle":"2025-08-22T21:22:36.398992Z","shell.execute_reply.started":"2025-08-22T21:22:36.380386Z","shell.execute_reply":"2025-08-22T21:22:36.398293Z"}},"outputs":[],"execution_count":7},{"cell_type":"markdown","source":"# SECTION 4: DEMONSTRATION AND MAIN EXECUTION","metadata":{}},{"cell_type":"code","source":"def generate_summary_report(results):\n    \"\"\"Generate a summary report of all processed files\"\"\"\n    if not results:\n        return\n    \n    summary = {\n        'total_files_processed': len(results),\n        'successful_analyses': len([r for r in results if r.get('beats_detected', 0) > 0]),\n        'average_heart_rate': np.mean([r.get('heart_rate_bpm', 0) for r in results if r.get('heart_rate_bpm')]),\n        'average_spo2_classical': np.mean([r.get('spo2_classical', 0) for r in results if r.get('spo2_classical')]),\n        'signal_quality_stats': {\n            'mean_snr': np.mean([r.get('signal_quality_metrics', {}).get('snr_estimate', 0) for r in results]),\n            'mean_amplitude': np.mean([r.get('signal_quality_metrics', {}).get('mean_amplitude', 0) for r in results])\n        },\n        'file_details': [\n            {\n                'filename': os.path.basename(r.get('file_path', '')),\n                'duration_seconds': r.get('signal_length_seconds', 0),\n                'beats_detected': r.get('beats_detected', 0),\n                'heart_rate_bpm': r.get('heart_rate_bpm', 0),\n                'spo2_classical': r.get('spo2_classical', 0)\n            }\n            for r in results\n        ]\n    }\n    \n    summary_path = os.path.join(KAGGLE_WORKING_PATH, 'ppg_analysis_summary.json')\n    with open(summary_path, 'w') as f:\n        json.dump(summary, f, indent=2)\n    \n    print(f\"Summary report saved to: {summary_path}\")\n    \n    print(\"\\nKEY STATISTICS:\")\n    print(f\"  Files processed: {summary['total_files_processed']}\")\n    print(f\"  Successful analyses: {summary['successful_analyses']}\")\n    if summary['average_heart_rate'] > 0:\n        print(f\"  Average heart rate: {summary['average_heart_rate']:.1f} BPM\")\n    if summary['average_spo2_classical'] > 0:\n        print(f\"  Average SpO2: {summary['average_spo2_classical']:.1f}%\")\n    print(f\"  Average signal quality (SNR): {summary['signal_quality_stats']['mean_snr']:.1f}\")\n\n\ndef demonstrate_pipeline():\n    \n    processor = PPGProcessor(sampling_rate=125)\n    \n    ppg_signal, time_vector = processor.load_data(data_type='simulated')\n    \n    processed_signal = processor.preprocess_signal()\n    \n    peaks = processor.detect_beats()\n    \n    beat_features = processor.extract_beat_features()\n    spo2_features = processor.extract_spo2_features()\n    \n    spo2_classical = processor.estimate_spo2_classical()\n    \n    print(f\"\\nClassical SpO2 Estimation: {spo2_classical:.1f}%\")\n    \n    print(f\"\\nExtracted Features Summary:\")\n    print(f\"Beat-level features: {len(beat_features)} beats analyzed\")\n    print(f\"SpO2-specific features: {len(spo2_features)} features\")\n    \n    if not beat_features.empty:\n        print(f\"\\nKey Beat Features (mean ± std):\")\n        numeric_features = beat_features.select_dtypes(include=[np.number])\n        for feature in ['systolic_amplitude', 'pulse_width', 'rise_time']:\n            if feature in numeric_features.columns:\n                mean_val = numeric_features[feature].mean()\n                std_val = numeric_features[feature].std()\n                print(f\"  {feature}: {mean_val:.3f} ± {std_val:.3f}\")\n    \n    print(f\"\\nSpO2 Features:\")\n    for key, value in spo2_features.items():\n        print(f\"  {key}: {value:.3f}\")\n    \n    return processor\n\n\ndef train_comprehensive_ml_demo():\n    \"\"\"Demonstrate comprehensive ML model training\"\"\"\n    \n    processors = []\n    spo2_ground_truth = []\n    \n    print(\"Generating diverse training datasets...\")\n    \n    for i in range(100):\n        processor = PPGProcessor(sampling_rate=125)\n        \n        t = np.linspace(0, 30, 125 * 30)\n        heart_rate = np.random.uniform(60, 100)\n        pulse_freq = heart_rate / 60\n        ppg = np.sin(2 * np.pi * pulse_freq * t)\n        \n        dicrotic_strength = np.random.uniform(0.2, 0.4)\n        ppg += dicrotic_strength * np.sin(2 * np.pi * pulse_freq * t + np.pi/3)\n        \n        noise_level = np.random.uniform(0.03, 0.08)\n        ppg += noise_level * np.random.randn(len(t))\n        \n        signal_quality = 1 - noise_level\n        perfusion_quality = np.random.uniform(0.7, 1.0)\n        base_spo2 = np.random.normal(97, 2)\n        \n        spo2_true = base_spo2 + 2 * signal_quality + 1 * perfusion_quality - abs(heart_rate - 75) * 0.02\n        spo2_true = np.clip(spo2_true, 85, 100)\n        \n        processor.raw_signal = ppg\n        processor.time = t\n        processor.spo2_true = spo2_true\n        \n        processor.preprocess_signal()\n        processor.detect_beats()\n        processor.extract_beat_features()\n        processor.extract_spo2_features()\n        \n        processors.append(processor)\n        spo2_ground_truth.append(spo2_true)\n    \n    ml_manager = MLModelManager()\n    X, y = ml_manager.prepare_training_data(processors, spo2_ground_truth)\n    \n    print(f\"\\nTraining data prepared: {X.shape[0]} samples, {X.shape[1]} features\")\n    \n    results = ml_manager.train_all_models(X, y)\n    \n    return ml_manager, results\n","metadata":{"id":"DC3FcVeszahJ","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.399774Z","iopub.execute_input":"2025-08-22T21:22:36.400067Z","iopub.status.idle":"2025-08-22T21:22:36.421382Z","shell.execute_reply.started":"2025-08-22T21:22:36.400031Z","shell.execute_reply":"2025-08-22T21:22:36.420663Z"}},"outputs":[],"execution_count":8},{"cell_type":"markdown","source":"# MAIN EXECUTION","metadata":{}},{"cell_type":"code","source":"def main():\n    \"\"\"Main function for command line interface\"\"\"\n    parser = argparse.ArgumentParser(description='PPG Feature Extraction and SpO2 Estimation - Reorganized')\n    parser.add_argument('--input', '-i', type=str, help='Input PPG file path')\n    parser.add_argument('--output', '-o', type=str, help='Output results file path')\n    parser.add_argument('--model', '-m', type=str, help='Path to trained ML model')\n\n    \n    args = parser.parse_args()\n    \n    if args.kaggle:\n        print(\"Processing Kaggle PPG dataset...\")\n        run_kaggle_pipeline()\n        \n    elif args.demo:\n        print(\"Running demonstration with simulated data...\")\n        demonstrate_pipeline()\n        \n    elif args.train:\n        print(\"Training ML models...\")\n        train_comprehensive_ml_demo()\n        \n    elif args.input:\n        if not os.path.exists(args.input):\n            print(f\"Input file not found: {args.input}\")\n            return\n            \n        results = process_ppg_file(args.input, args.model, args.output)\n        \n        if results is None:\n            print(\"Processing failed\")\n            return\n            \n    else:\n        print(\"No action specified. Use --help for usage information\")\n","metadata":{"id":"Qr9fgxDJzVQu","trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.422287Z","iopub.execute_input":"2025-08-22T21:22:36.422594Z","iopub.status.idle":"2025-08-22T21:22:36.439921Z","shell.execute_reply.started":"2025-08-22T21:22:36.422566Z","shell.execute_reply":"2025-08-22T21:22:36.439088Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"def run_notebook_pipeline():\n    try:\n        # Check if we're in Kaggle environment\n        if os.path.exists(\"/kaggle\"):\n            print(\"Detected Kaggle environment - running Kaggle pipeline...\")\n            run_kaggle_pipeline()\n        else:\n            print(\"Starting PPG Feature Extraction and SpO2 Estimation Pipeline...\")\n            \n            print(\"STEP 1: BASIC PIPELINE DEMONSTRATION\")\n            processor = demonstrate_pipeline()\n            \n            print(\"STEP 2: COMPREHENSIVE ML MODEL TRAINING\")\n            ml_manager, training_results = train_comprehensive_ml_demo()\n            \n\n            print(\"PIPELINE SUMMARY\")\n            print(\"  PPG signal preprocessing and noise removal\")\n            print(\"  Beat detection and segmentation\")\n            print(\"  Feature extraction (morphological, temporal, spectral)\")\n            print(\"  Classical SpO2 estimation using R-value\")\n            print(\"  CONSOLIDATED machine learning models\")\n            print(\"  Deep learning models (if TensorFlow available)\")\n            print(\"  Model comparison and selection\")\n            print(\"  Model persistence (save/load functionality)\")\n            print(\"  Kaggle environment optimization\")\n            \n            print(f\"\\nFiles Generated:\")\n            print(f\"  Best trained ML model saved to {KAGGLE_WORKING_PATH}\")\n            print(f\"  Comprehensive performance metrics\")\n            print(f\"  Model comparison results\")\n            \n    except Exception as e:\n        print(f\"Error in pipeline execution: {e}\")\n        print(\"Falling back to basic demo...\")\n        demonstrate_pipeline()\n\nif __name__ == \"__main__\":\n    try:\n        if 'ipykernel' in sys.modules or 'google.colab' in sys.modules or any('-f' in str(arg) for arg in sys.argv):\n            print(\"Detected notebook environment - running pipeline...\")\n            run_notebook_pipeline()\n        elif len(sys.argv) > 1 and not any('-f' in str(arg) for arg in sys.argv):\n            main()\n        else:\n            # Default execution\n            run_notebook_pipeline()\n    except Exception as e:\n        print(f\"Error in main execution: {e}\")\n        print(\"Running basic pipeline...\")\n        run_notebook_pipeline()\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-22T21:22:36.441862Z","iopub.execute_input":"2025-08-22T21:22:36.442405Z","iopub.status.idle":"2025-08-22T21:24:46.617045Z","shell.execute_reply.started":"2025-08-22T21:22:36.442360Z","shell.execute_reply":"2025-08-22T21:24:46.616381Z"}},"outputs":[{"name":"stdout","text":"Detected notebook environment - running pipeline...\nDetected Kaggle environment - running Kaggle pipeline...\nPPG FEATURE EXTRACTION AND SPO2 ESTIMATION\nEnvironment Check:\n  Dataset path exists: True\n  Working directory: /kaggle/working\n  WFDB available: False\n  MATLAB support: True\n  TensorFlow available: True\n\nStep 1: Processing Kaggle PPG Dataset\nProcessing Kaggle PPG Dataset...\nDataset path: /kaggle/input/yuhuty7/ppg_dataset_full/csv\nFound 67 CSV files:\n  s21_sit.csv\n  s12_run.csv\n  s20_sit.csv\n  s8_walk.csv\n  s6_sit.csv\n  s5_sit.csv\n  s15_run.csv\n  s7_sit.csv\n  s14_run.csv\n  s9_sit.csv\n  s13_walk.csv\n  s17_walk.csv\n  s22_sit.csv\n  s4_walk.csv\n  s17_sit.csv\n  s14_sit.csv\n  s19_walk.csv\n  s21_run.csv\n  s9_walk.csv\n  s18_run.csv\n  s10_run.csv\n  s22_walk.csv\n  s9_run.csv\n  s5_run.csv\n  s15_walk.csv\n  s7_walk.csv\n  s3_run.csv\n  s2_walk.csv\n  s18_sit.csv\n  s16_run.csv\n  s3_sit.csv\n  s10_sit.csv\n  s2_sit.csv\n  s2_run.csv\n  s8_sit.csv\n  s7_run.csv\n  s11_sit.csv\n  s19_sit.csv\n  s3_walk.csv\n  s18_walk.csv\n  s10_walk.csv\n  s16_sit.csv\n  s11_run.csv\n  s21_walk.csv\n  s15_sit.csv\n  s22_run.csv\n  s13_sit.csv\n  s1_run.csv\n  subjects_info.csv\n  s11_walk.csv\n  s1_walk.csv\n  s4_run.csv\n  s4_sit.csv\n  s8_run.csv\n  s6_walk.csv\n  s5_walk.csv\n  s14_walk.csv\n  s17_run.csv\n  s16_walk.csv\n  s20_walk.csv\n  s12_sit.csv\n  s1_sit.csv\n  s20_run.csv\n  s13_run.csv\n  s6_run.csv\n  s19_run.csv\n  s12_walk.csv\n\nProcessing: s21_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s21_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 244938 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244938\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 100.2 ± 36.1 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s21_sit.json\nPROCESSING RESULTS\nSignal Duration: 1959.5 seconds\nBeats Detected: 3\nHeart Rate: 100.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 3256.5\n\nProcessing: s12_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s12_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242336 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242336\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 68.5 ± 21.8 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s12_run.json\nPROCESSING RESULTS\nSignal Duration: 1938.7 seconds\nBeats Detected: 6\nHeart Rate: 68.5 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 532.2\n\nProcessing: s20_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s20_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 247724 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 247724\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s8_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s8_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243647 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243647\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 85.2 ± 34.7 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s8_walk.json\nPROCESSING RESULTS\nSignal Duration: 1949.2 seconds\nBeats Detected: 5\nHeart Rate: 85.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 1173.1\n\nProcessing: s6_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s6_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 249502 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 249502\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 93.5 ± 31.5 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s6_sit.json\nPROCESSING RESULTS\nSignal Duration: 1996.0 seconds\nBeats Detected: 3\nHeart Rate: 93.5 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 6473.6\n\nProcessing: s5_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s5_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 247196 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 247196\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s15_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s15_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 247497 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 247497\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 89.7 ± 38.1 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s15_run.json\nPROCESSING RESULTS\nSignal Duration: 1980.0 seconds\nBeats Detected: 6\nHeart Rate: 89.7 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 6429.4\n\nProcessing: s7_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s7_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 253006 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 253006\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 67.6 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s14_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s14_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243811 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243811\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 87.2 ± 29.5 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s14_run.json\nPROCESSING RESULTS\nSignal Duration: 1950.5 seconds\nBeats Detected: 7\nHeart Rate: 87.2 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 10946.0\n\nProcessing: s9_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s9_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243417 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243417\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 58.6 ± 4.9 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s9_sit.json\nPROCESSING RESULTS\nSignal Duration: 1947.3 seconds\nBeats Detected: 5\nHeart Rate: 58.6 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 5206.1\n\nProcessing: s13_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s13_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243439 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243439\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 77.8 ± 33.2 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s13_walk.json\nPROCESSING RESULTS\nSignal Duration: 1947.5 seconds\nBeats Detected: 5\nHeart Rate: 77.8 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 6514.0\n\nProcessing: s17_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s17_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242541 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242541\n\nDetecting heartbeats...\nDetected 4 beats\nAverage heart rate: 75.8 ± 30.8 BPM\n\nExtracting beat-level features...\nExtracted features for 3 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s17_walk.json\nPROCESSING RESULTS\nSignal Duration: 1940.3 seconds\nBeats Detected: 4\nHeart Rate: 75.8 BPM\nSpO2 (Classical): 84.7%\nSignal Quality (SNR): 7426.1\n\nProcessing: s22_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s22_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242634 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242634\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 76.6 ± 31.0 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s22_sit.json\nPROCESSING RESULTS\nSignal Duration: 1941.1 seconds\nBeats Detected: 5\nHeart Rate: 76.6 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 7343.8\n\nProcessing: s4_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s4_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 253201 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 253201\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 89.5 ± 25.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s4_walk.json\nPROCESSING RESULTS\nSignal Duration: 2025.6 seconds\nBeats Detected: 3\nHeart Rate: 89.5 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 11254.3\n\nProcessing: s17_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s17_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242937 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242937\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s14_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s14_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 248073 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 248073\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 71.5 ± 14.7 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s14_sit.json\nPROCESSING RESULTS\nSignal Duration: 1984.6 seconds\nBeats Detected: 3\nHeart Rate: 71.5 BPM\nSpO2 (Classical): 84.5%\nSignal Quality (SNR): 6587.2\n\nProcessing: s19_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s19_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246263 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246263\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 95.4 ± 29.6 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s19_walk.json\nPROCESSING RESULTS\nSignal Duration: 1970.1 seconds\nBeats Detected: 3\nHeart Rate: 95.4 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 5659.3\n\nProcessing: s21_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s21_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242532 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242532\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 84.8 ± 31.4 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s21_run.json\nPROCESSING RESULTS\nSignal Duration: 1940.3 seconds\nBeats Detected: 6\nHeart Rate: 84.8 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 7290.5\n\nProcessing: s9_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s9_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243734 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243734\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 79.2 ± 35.2 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s9_walk.json\nPROCESSING RESULTS\nSignal Duration: 1949.9 seconds\nBeats Detected: 5\nHeart Rate: 79.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 841.7\n\nProcessing: s18_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s18_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242126 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242126\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 75.8 ± 16.8 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s18_run.json\nPROCESSING RESULTS\nSignal Duration: 1937.0 seconds\nBeats Detected: 6\nHeart Rate: 75.8 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 3060.4\n\nProcessing: s10_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s10_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242815 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242815\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 76.9 ± 26.9 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s10_run.json\nPROCESSING RESULTS\nSignal Duration: 1942.5 seconds\nBeats Detected: 6\nHeart Rate: 76.9 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 5774.7\n\nProcessing: s22_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s22_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242658 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242658\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 89.2 ± 35.1 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s22_walk.json\nPROCESSING RESULTS\nSignal Duration: 1941.3 seconds\nBeats Detected: 7\nHeart Rate: 89.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 5192.3\n\nProcessing: s9_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s9_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 244245 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244245\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 93.0 ± 30.8 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s9_run.json\nPROCESSING RESULTS\nSignal Duration: 1954.0 seconds\nBeats Detected: 7\nHeart Rate: 93.0 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 3023.8\n\nProcessing: s5_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s5_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 275656 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 275656\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s15_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s15_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242284 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242284\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s7_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s7_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 245976 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245976\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 76.5 ± 23.4 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s7_walk.json\nPROCESSING RESULTS\nSignal Duration: 1967.8 seconds\nBeats Detected: 6\nHeart Rate: 76.5 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 1606.0\n\nProcessing: s3_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s3_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243727 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243727\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 90.7 ± 39.6 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s3_run.json\nPROCESSING RESULTS\nSignal Duration: 1949.8 seconds\nBeats Detected: 6\nHeart Rate: 90.7 BPM\nSpO2 (Classical): 85.0%\nSignal Quality (SNR): 3489.7\n\nProcessing: s2_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s2_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246864 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246864\n\nDetecting heartbeats...\nDetected 8 beats\nAverage heart rate: 100.5 ± 15.7 BPM\n\nExtracting beat-level features...\nExtracted features for 7 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s2_walk.json\nPROCESSING RESULTS\nSignal Duration: 1974.9 seconds\nBeats Detected: 8\nHeart Rate: 100.5 BPM\nSpO2 (Classical): 85.0%\nSignal Quality (SNR): 2412.9\n\nProcessing: s18_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s18_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242269 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242269\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s16_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s16_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246999 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246999\n\nDetecting heartbeats...\nDetected 3 beats\nAverage heart rate: 61.8 ± 1.3 BPM\n\nExtracting beat-level features...\nExtracted features for 2 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s16_run.json\nPROCESSING RESULTS\nSignal Duration: 1976.0 seconds\nBeats Detected: 3\nHeart Rate: 61.8 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 8313.2\n\nProcessing: s3_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s3_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246149 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246149\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s10_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s10_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242843 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242843\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 64.7 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s2_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s2_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246206 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246206\n\nDetecting heartbeats...\nDetected 8 beats\nAverage heart rate: 97.6 ± 10.5 BPM\n\nExtracting beat-level features...\nExtracted features for 7 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s2_sit.json\nPROCESSING RESULTS\nSignal Duration: 1969.6 seconds\nBeats Detected: 8\nHeart Rate: 97.6 BPM\nSpO2 (Classical): 85.0%\nSignal Quality (SNR): 2434.3\n\nProcessing: s2_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s2_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246578 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246578\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 97.2 ± 24.9 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s2_run.json\nPROCESSING RESULTS\nSignal Duration: 1972.6 seconds\nBeats Detected: 7\nHeart Rate: 97.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 1038.1\n\nProcessing: s8_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s8_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 244770 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244770\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 71.2 ± 24.6 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s8_sit.json\nPROCESSING RESULTS\nSignal Duration: 1958.2 seconds\nBeats Detected: 5\nHeart Rate: 71.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 9263.8\n\nProcessing: s7_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s7_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 248626 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 248626\n\nDetecting heartbeats...\nDetected 8 beats\nAverage heart rate: 100.2 ± 26.8 BPM\n\nExtracting beat-level features...\nExtracted features for 7 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s7_run.json\nPROCESSING RESULTS\nSignal Duration: 1989.0 seconds\nBeats Detected: 8\nHeart Rate: 100.2 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 774.4\n\nProcessing: s11_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s11_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243587 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243587\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 59.5 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s19_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s19_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 249594 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 249594\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 85.0 ± 30.6 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s19_sit.json\nPROCESSING RESULTS\nSignal Duration: 1996.8 seconds\nBeats Detected: 6\nHeart Rate: 85.0 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 3961.5\n\nProcessing: s3_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s3_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246587 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246587\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 77.3 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s18_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s18_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242627 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242627\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s10_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s10_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242859 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242859\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 70.1 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s16_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s16_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243993 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243993\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s11_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s11_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246891 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246891\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s21_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s21_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243193 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243193\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 97.1 ± 32.9 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s21_walk.json\nPROCESSING RESULTS\nSignal Duration: 1945.5 seconds\nBeats Detected: 6\nHeart Rate: 97.1 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 5739.7\n\nProcessing: s15_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s15_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242188 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242188\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 95.8 ± 41.0 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s15_sit.json\nPROCESSING RESULTS\nSignal Duration: 1937.5 seconds\nBeats Detected: 5\nHeart Rate: 95.8 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 5190.5\n\nProcessing: s22_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s22_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 244676 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244676\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 76.0 ± 32.3 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s22_run.json\nPROCESSING RESULTS\nSignal Duration: 1957.4 seconds\nBeats Detected: 5\nHeart Rate: 76.0 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 6182.8\n\nProcessing: s13_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s13_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243546 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243546\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 85.5 ± 30.8 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s13_sit.json\nPROCESSING RESULTS\nSignal Duration: 1948.4 seconds\nBeats Detected: 7\nHeart Rate: 85.5 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 1535.0\n\nProcessing: s1_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s1_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 245274 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245274\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 98.1 ± 27.8 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s1_run.json\nPROCESSING RESULTS\nSignal Duration: 1962.2 seconds\nBeats Detected: 7\nHeart Rate: 98.1 BPM\nSpO2 (Classical): 85.0%\nSignal Quality (SNR): 1475.8\n\nProcessing: subjects_info.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/subjects_info.csv\nUsing column 'height' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 66 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 66\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s11_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s11_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243266 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243266\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 60.9 ± 25.9 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s11_walk.json\nPROCESSING RESULTS\nSignal Duration: 1946.1 seconds\nBeats Detected: 5\nHeart Rate: 60.9 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 8797.5\n\nProcessing: s1_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s1_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 245902 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245902\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 92.1 ± 39.0 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s1_walk.json\nPROCESSING RESULTS\nSignal Duration: 1967.2 seconds\nBeats Detected: 7\nHeart Rate: 92.1 BPM\nSpO2 (Classical): 85.0%\nSignal Quality (SNR): 1740.8\n\nProcessing: s4_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s4_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 245748 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245748\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 79.6 ± 25.1 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s4_run.json\nPROCESSING RESULTS\nSignal Duration: 1966.0 seconds\nBeats Detected: 6\nHeart Rate: 79.6 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 2792.0\n\nProcessing: s4_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s4_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 246956 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246956\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 62.5 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s8_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s8_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242543 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242543\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s6_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s6_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 257678 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 257678\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 88.1 ± 37.7 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s6_walk.json\nPROCESSING RESULTS\nSignal Duration: 2061.4 seconds\nBeats Detected: 5\nHeart Rate: 88.1 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 7817.1\n\nProcessing: s5_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s5_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 245842 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245842\n\nDetecting heartbeats...\nDetected 4 beats\nAverage heart rate: 67.3 ± 7.2 BPM\n\nExtracting beat-level features...\nExtracted features for 3 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s5_walk.json\nPROCESSING RESULTS\nSignal Duration: 1966.7 seconds\nBeats Detected: 4\nHeart Rate: 67.3 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 7973.0\n\nProcessing: s14_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s14_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243841 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243841\n\nDetecting heartbeats...\nDetected 4 beats\nAverage heart rate: 76.9 ± 16.4 BPM\n\nExtracting beat-level features...\nExtracted features for 3 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s14_walk.json\nPROCESSING RESULTS\nSignal Duration: 1950.7 seconds\nBeats Detected: 4\nHeart Rate: 76.9 BPM\nSpO2 (Classical): 84.7%\nSignal Quality (SNR): 10698.4\n\nProcessing: s17_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s17_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242459 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242459\n\nDetecting heartbeats...\nDetected 4 beats\nAverage heart rate: 44.2 ± 3.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s17_run.json\nPROCESSING RESULTS\nSignal Duration: 1939.7 seconds\nBeats Detected: 4\nHeart Rate: 44.2 BPM\nSpO2 (Classical): 84.7%\nSignal Quality (SNR): 2998.6\n\nProcessing: s16_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s16_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243635 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243635\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s20_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s20_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242260 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242260\n\nDetecting heartbeats...\nDetected 7 beats\nAverage heart rate: 87.9 ± 16.9 BPM\n\nExtracting beat-level features...\nExtracted features for 6 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s20_walk.json\nPROCESSING RESULTS\nSignal Duration: 1938.1 seconds\nBeats Detected: 7\nHeart Rate: 87.9 BPM\nSpO2 (Classical): 84.8%\nSignal Quality (SNR): 3980.6\n\nProcessing: s12_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s12_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242960 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242960\n\nDetecting heartbeats...\nDetected 2 beats\nAverage heart rate: 59.1 ± 0.0 BPM\nInsufficient beats detected for reliable analysis\n\nProcessing: s1_sit.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s1_sit.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 254026 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 254026\n\nDetecting heartbeats...\nInsufficient beats detected for reliable analysis\n\nProcessing: s20_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s20_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 248682 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 248682\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 71.1 ± 12.2 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s20_run.json\nPROCESSING RESULTS\nSignal Duration: 1989.5 seconds\nBeats Detected: 6\nHeart Rate: 71.1 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 3778.8\n\nProcessing: s13_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s13_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 243115 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243115\n\nDetecting heartbeats...\nDetected 5 beats\nAverage heart rate: 96.0 ± 42.5 BPM\n\nExtracting beat-level features...\nExtracted features for 4 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s13_run.json\nPROCESSING RESULTS\nSignal Duration: 1944.9 seconds\nBeats Detected: 5\nHeart Rate: 96.0 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 3847.1\n\nProcessing: s6_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s6_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 248718 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 248718\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 81.0 ± 29.6 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s6_run.json\nPROCESSING RESULTS\nSignal Duration: 1989.7 seconds\nBeats Detected: 6\nHeart Rate: 81.0 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 4663.1\n\nProcessing: s19_run.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s19_run.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242379 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242379\n\nDetecting heartbeats...\nDetected 6 beats\nAverage heart rate: 81.7 ± 32.0 BPM\n\nExtracting beat-level features...\nExtracted features for 5 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s19_run.json\nPROCESSING RESULTS\nSignal Duration: 1939.0 seconds\nBeats Detected: 6\nHeart Rate: 81.7 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 5678.9\n\nProcessing: s12_walk.csv\nProcessing PPG file: /kaggle/input/yuhuty7/ppg_dataset_full/csv/s12_walk.csv\nUsing column 'ecg' as PPG signal\nNo SpO2 ground truth found in CSV\nLoaded CSV data: 242888 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242888\n\nDetecting heartbeats...\nDetected 8 beats\nAverage heart rate: 94.7 ± 26.4 BPM\n\nExtracting beat-level features...\nExtracted features for 7 beats\n\nExtracting SpO2-specific features...\nResults auto-saved to /kaggle/working/ppg_results_s12_walk.json\nPROCESSING RESULTS\nSignal Duration: 1943.1 seconds\nBeats Detected: 8\nHeart Rate: 94.7 BPM\nSpO2 (Classical): 84.9%\nSignal Quality (SNR): 843.0\n\nCombined results saved to: /kaggle/working/ppg_analysis_results.json\n\nStep 2: Training ML models with 46 samples\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244938\n\nDetecting heartbeats...\nDetected 3274 beats\nAverage heart rate: 100.2 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3273 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242336\n\nDetecting heartbeats...\nDetected 2214 beats\nAverage heart rate: 68.5 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2213 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243647\n\nDetecting heartbeats...\nDetected 2767 beats\nAverage heart rate: 85.2 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2766 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 249502\n\nDetecting heartbeats...\nDetected 3111 beats\nAverage heart rate: 93.5 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3110 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 247497\n\nDetecting heartbeats...\nDetected 2961 beats\nAverage heart rate: 89.7 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 2960 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243811\n\nDetecting heartbeats...\nDetected 2834 beats\nAverage heart rate: 87.2 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2833 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243417\n\nDetecting heartbeats...\nDetected 1901 beats\nAverage heart rate: 58.6 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 1900 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243439\n\nDetecting heartbeats...\nDetected 2524 beats\nAverage heart rate: 77.8 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2523 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242541\n\nDetecting heartbeats...\nDetected 2452 beats\nAverage heart rate: 75.8 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 2451 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242634\n\nDetecting heartbeats...\nDetected 2478 beats\nAverage heart rate: 76.6 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2477 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 253201\n\nDetecting heartbeats...\nDetected 3021 beats\nAverage heart rate: 89.5 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3020 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 248073\n\nDetecting heartbeats...\nDetected 2366 beats\nAverage heart rate: 71.5 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2365 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 246263\n\nDetecting heartbeats...\nDetected 3133 beats\nAverage heart rate: 95.4 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3132 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242532\n\nDetecting heartbeats...\nDetected 2741 beats\nAverage heart rate: 84.8 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2740 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 243734\n\nDetecting heartbeats...\nDetected 2573 beats\nAverage heart rate: 79.2 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2572 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242126\n\nDetecting heartbeats...\nDetected 2446 beats\nAverage heart rate: 75.8 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2445 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242815\n\nDetecting heartbeats...\nDetected 2490 beats\nAverage heart rate: 76.9 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 2489 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 242658\n\nDetecting heartbeats...\nDetected 2885 beats\nAverage heart rate: 89.2 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 2884 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 244245\n\nDetecting heartbeats...\nDetected 3029 beats\nAverage heart rate: 93.0 ± 1.0 BPM\n\nExtracting beat-level features...\nExtracted features for 3028 beats\n\nExtracting SpO2-specific features...\n\nPreprocessing PPG signal...\nPreprocessing completed. Signal length: 245976\n\nDetecting heartbeats...\nDetected 2508 beats\nAverage heart rate: 76.5 ± 0.9 BPM\n\nExtracting beat-level features...\nExtracted features for 2507 beats\n\nExtracting SpO2-specific features...\nInitializing all ML models...\n  Initialized 5 traditional ML models + 1 optimized deep learning model\n\nPreparing training data...\n  Training data prepared: 20 samples, 22 features\n\n================================================================================\nCONSOLIDATED MACHINE LEARNING MODEL TRAINING\n================================================================================\nDataset: 20 samples, 22 features\n\n------------------------------------------------------------\nAUTO-OPTIMIZING MLP FOR SMALL DATASET\n------------------------------------------------------------\n\n OPTIMIZING MLP HYPERPARAMETERS...\nNote: Some convergence warnings are normal for small datasets during grid search\nRunning optimized grid search for small datasets...\n","output_type":"stream"},{"name":"stderr","text":"/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n/usr/local/lib/python3.11/dist-packages/sklearn/neural_network/_multilayer_perceptron.py:541: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. OF ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n    https://scikit-learn.org/stable/modules/preprocessing.html\n  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n","output_type":"stream"},{"name":"stdout","text":"\n BEST MLP PARAMETERS FOUND:\n  activation: tanh\n  alpha: 0.5\n  hidden_layer_sizes: (32,)\n  learning_rate_init: 0.001\n  solver: lbfgs\n  Best CV RMSE: 0.109\n\n OPTIMIZED MLP PERFORMANCE:\n  Test RMSE: 0.064\n  Test R²: -4.277\n  Test MAE: 0.058\nMLP optimization completed - RMSE improved to 0.064\n\n------------------------------------------------------------\nTRAINING TRADITIONAL ML MODELS\n------------------------------------------------------------\n\nTraining Random Forest...\n    Test RMSE: 0.064\n    Test R²: -4.289\n    Test MAE: 0.041\n\nTraining Gradient Boosting...\n    Test RMSE: 0.038\n    Test R²: -0.879\n    Test MAE: 0.035\n\nTraining Linear Regression...\n    Test RMSE: 0.172\n    Test R²: -36.559\n    Test MAE: 0.158\n\nTraining Mlp Neural Network...\n    Test RMSE: 0.064\n    Test R²: -4.277\n    Test MAE: 0.058\n\nTraining Support Vector Regression...\n    Test RMSE: 0.114\n    Test R²: -15.549\n    Test MAE: 0.087\nTRAINING OPTIMIZED DEEP LEARNING MODEL\nTraining OPTIMIZED Deep Learning model with 20 samples and 22 features...\n","output_type":"stream"},{"name":"stderr","text":"I0000 00:00:1755897878.899528      36 gpu_device.cc:2022] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 13942 MB memory:  -> device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5\nI0000 00:00:1755897878.900171      36 gpu_device.cc:2022] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 13942 MB memory:  -> device: 1, name: Tesla T4, pci bus id: 0000:00:05.0, compute capability: 7.5\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nI0000 00:00:1755897881.389966     110 service.cc:148] XLA service 0x7a588c00aae0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\nI0000 00:00:1755897881.390548     110 service.cc:156]   StreamExecutor device (0): Tesla T4, Compute Capability 7.5\nI0000 00:00:1755897881.390568     110 service.cc:156]   StreamExecutor device (1): Tesla T4, Compute Capability 7.5\nI0000 00:00:1755897881.578472     110 cuda_dnn.cc:529] Loaded cuDNN version 90300\nI0000 00:00:1755897882.470238     110 device_compiler.h:188] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.\n","output_type":"stream"},{"name":"stdout","text":"    Optimized Deep Learning Training Completed\n    Test RMSE: 79.032\n    Test R²: -7949701.609\n    Test MAE: 79.026\nCOMPREHENSIVE MODEL PERFORMANCE COMPARISON\nModel                     Test RMSE    Test R²    Test MAE   Type            Status    \nrandom_forest             0.064        -4.289     0.041      Traditional ML    Success \ngradient_boosting         0.038        -0.879     0.035      Traditional ML    Success \nlinear_regression         0.172        -36.559    0.158      Traditional ML    Success \nmlp_neural_network        0.064        -4.277     0.058      Traditional ML    Success \nsupport_vector_regression 0.114        -15.549    0.087      Traditional ML    Success \ndeep_neural_network       79.032       -7949701.609 79.026     Deep Learning (Optimized)   Success \nLinear Regression: ~22 parameters\nRandom Forest: ~1000 decision nodes (approx)\nGradient Boosting: ~1000 decision nodes (approx)\nMLP Neural Network: ~8450 parameters\nDeep Neural Network: ~48672 parameters\n\nSaving all trained models...\n  mlp_neural_network_optimized saved to /kaggle/working/spo2_model_mlp_neural_network_optimized.pkl\n  random_forest saved to /kaggle/working/spo2_model_random_forest.pkl\n  gradient_boosting saved to /kaggle/working/spo2_model_gradient_boosting.pkl\n  linear_regression saved to /kaggle/working/spo2_model_linear_regression.pkl\n  mlp_neural_network saved to /kaggle/working/spo2_model_mlp_neural_network.pkl\n  support_vector_regression saved to /kaggle/working/spo2_model_support_vector_regression.pkl\nOptimized deep model saved to /kaggle/working/spo2_model_deep_neural_network.h5\n  deep_neural_network saved to /kaggle/working/spo2_model_deep_neural_network.h5\n  All models saved to /kaggle/working\n\n Best Model: gradient_boosting (RMSE: 0.038)\n  Best model (gradient_boosting) saved to /kaggle/working/best_spo2_model_gradient_boosting.pkl\n\n  Model training completed successfully\n\nStep 3: Generating Summary Report\nSummary report saved to: /kaggle/working/ppg_analysis_summary.json\n\nKEY STATISTICS:\n  Files processed: 46\n  Successful analyses: 46\n  Average heart rate: 83.1 BPM\n  Average SpO2: 84.9%\n  Average signal quality (SNR): 4848.1\n  Processed 46 PPG files\n  Results saved to /kaggle/working\n  Summary report generated\n  Models trained and saved\n","output_type":"stream"}],"execution_count":10}]}