from cgi import test
import wfdb
from pathlib import Path
import tsfel
import pywt
import cv2
from concurrent.futures import ProcessPoolExecutor, as_completed
from collections import defaultdict
import json

from typing import Dict, Tuple, List, Union, Optional, Iterable
from numpy.typing import NDArray

import numpy as np

from sklearn.model_selection import StratifiedGroupKFold, KFold, StratifiedKFold

from scipy import signal
from scipy.stats import kendalltau


def read_record(data_root: Path, patient_id: int, lead: str, label_mapping: Dict[str, str]) -> Tuple[NDArray, NDArray, NDArray]:
    data = wfdb.rdsamp(str(data_root / str(patient_id)))
    annotations = wfdb.rdann(str(data_root / str(patient_id)), extension='atr')
    y = np.asarray(annotations.symbol)
    loc = np.asarray(annotations.sample)
    lead_id = data[1]['sig_name'].index(lead)
    X = data[0][:, lead_id].astype(np.float32)
    mask = np.isin(y, list(label_mapping.keys()))
    loc = loc[mask].astype(np.int32)
    y = y[mask]
    return X, y, loc


def resample_record(X: NDArray, y: NDArray, loc: NDArray,
                    original_sampling_rate: int, new_sampling_rate: int) \
                    -> Tuple[NDArray, NDArray, NDArray]:
    X_resampled = signal.resample(X, int(len(X) * new_sampling_rate / original_sampling_rate))
    loc_resampled = loc.astype(np.float32) * new_sampling_rate / original_sampling_rate
    loc_resampled = np.round(loc_resampled).astype(np.int32)

    return X_resampled, y, loc_resampled


def remove_baseline_wander(X: NDArray, sampling_rate: int) -> NDArray:
    kernel_1 = int(0.2 * sampling_rate)
    if kernel_1 % 2 == 0:
        kernel_1 -= 1
    kernel_2 = int(0.6 * sampling_rate)
    if kernel_2 % 2 == 0:
        kernel_2 -= 1
    baseline = signal.medfilt(signal.medfilt(X, kernel_1), kernel_2)
    X -= baseline

    return X


def filter_heartbeats(y: NDArray, loc: NDArray, label_mapping: Dict) \
    -> Tuple[NDArray, NDArray]:
    indices = [i for i, label in enumerate(y) if label in label_mapping.keys()]
    y, loc = y[indices], loc[indices]
    y = np.asarray([label_mapping[l] for l in y], dtype=np.int32)

    return y, loc


def calculate_cwt(X: NDArray, sampling_rate: int, cwt_freqs=100) -> NDArray:
    scales = pywt.central_frequency('mexh') * sampling_rate / np.arange(1, cwt_freqs + 1, 1)
    X_cwt, _ = pywt.cwt(X, scales, 'mexh', 1 / sampling_rate)

    return X_cwt.T


def calculate_fft(X: NDArray) -> NDArray:
     return np.abs(np.fft.fft(X))


def calculate_features(X: NDArray, y: NDArray,
                       sorted_features: Optional[List[int]],
                       sampling_rate=360) -> Tuple[NDArray, List[int]]:
    features_dict = tsfel.get_features_by_domain()
    X_feat = tsfel.time_series_features_extractor(
         features_dict, np.concatenate(X),
         fs=sampling_rate, window_size=X.shape[-1],
         verbose=0
    ).values

    if sorted_features is None:
        tau_vals = sorted([(i, (result.pvalue, -abs(result.statistic))) for i, result in [(i, kendalltau(X_feat[:, i], y)) for i in range(X_feat.shape[-1])]], key=lambda r: r[1])
        sorted_features = [i for i, _ in tau_vals]
    X_feat = X_feat[:, sorted_features]

    return X_feat, sorted_features


def calculate_rr_interval_stats(loc: NDArray, time_encoded=False, sampling_rate=360) -> NDArray:
    loc = loc.astype(np.float32)
    
    if time_encoded:
        loc *= 1 / sampling_rate

    avg_rr = np.mean(np.diff(loc))
    prev_rr = []
    next_rr = []
    ratio_rr = []
    local_rr = []
    for i in range(1, len(loc) - 1):
            prev_rr.append(loc[i] - loc[i - 1] - avg_rr)
            next_rr.append(loc[i + 1] - loc[i] - avg_rr)
            ratio_rr.append((loc[i] - loc[i - 1]) / (loc[i + 1] - loc[i]))
            local_rr.append(np.mean(np.diff(loc[np.maximum(i - 10, 0):i + 1])) - avg_rr)
    prev_rr = np.asarray(prev_rr)
    next_rr = np.asarray(next_rr)
    local_rr = np.asarray(local_rr)
    ratio_rr = np.asarray(ratio_rr)
    return np.stack((prev_rr, next_rr, local_rr, ratio_rr)).T


def get_beat_to_beat_windows(X: NDArray, X_cwt: NDArray, y: NDArray, loc: NDArray, rr: NDArray,
                             window_size: int, time_encoded=True, sampling_rate=360) \
                             -> Tuple[NDArray, NDArray, NDArray, NDArray]:
    X_windows: List[NDArray] = []
    X_cwt_windows: List[NDArray] = []
    rr_windows: List[List[NDArray]] = []
    y_windows: List[NDArray] = []

    step_size = 1 / sampling_rate
    for i, (prev_rpeak, _, next_rpeak, y) in enumerate(zip(loc[:-2], loc[1:-1], loc[2:], y[1:-1])):
        window = X[prev_rpeak:next_rpeak + 1]
        cwt_window = X_cwt[prev_rpeak:next_rpeak + 1]
        if len(window) <= window_size:
            if time_encoded:
                timestamps = np.arange(window.shape[0], dtype=np.float32)[:, np.newaxis] * step_size
                window = np.concatenate([window[:, np.newaxis], timestamps], axis=-1)
                cwt_window = np.concatenate([cwt_window, timestamps], axis=-1)
            padding_size = window_size - len(window)
            padding_shape = ((0, padding_size), (0, 0))
            padding_shape_cwt = ((0, padding_size), (0, 0))
            window = np.pad(window, padding_shape, constant_values=-1)
            cwt_window = np.pad(cwt_window, padding_shape_cwt, constant_values=-1)

            X_windows.append(window)
            X_cwt_windows.append(cwt_window)
            rr_windows.append(rr[i])
            y_windows.append(y)
    return np.asarray(X_windows), np.asarray(X_cwt_windows), np.asarray(rr_windows), np.asarray(y_windows)


def get_windows(X: NDArray, X_cwt: NDArray, y: NDArray, loc: NDArray, rr: NDArray,
                n_before: int, n_after: int, time_encoded=False,
                sampling_rate=360) \
                -> Tuple[NDArray, NDArray, NDArray, NDArray]:
    X_windows: List[NDArray] = []
    X_cwt_windows: List[NDArray] = []
    rr_windows: List[List[NDArray]] = []
    y_windows: List[NDArray] = []

    step_size = 1 / sampling_rate

    for i, (rpeak, label) in enumerate(zip(loc, y)): 
            window = X[rpeak - n_before: rpeak + n_after]
            cwt_window = X_cwt[rpeak - n_before:rpeak + n_after]
            if len(window) == n_before + n_after:
                if time_encoded:
                    timestamps = np.arange(window.shape[0], dtype=np.float32)[:, np.newaxis] * step_size
                    window = np.concatenate([window[:, np.newaxis], timestamps], axis=-1)
                    cwt_window = np.concatenate([cwt_window, timestamps], axis=-1)

                X_windows.append(window)
                X_cwt_windows.append(cwt_window)
                rr_windows.append(rr[i])
                y_windows.append(label)
     
    return np.asarray(X_windows), np.asarray(X_cwt_windows), np.asarray(rr_windows), np.asarray(y_windows)


def process_record(data_root: Path, record: int,
                   label_mapping: Dict[str, str],
                   n_before: Optional[int]=None, n_after: Optional[int]=None,
                   window_size: Optional[int]=None,
                   lead='MLII', sampling_rate=360,
                   resampling: Optional[int]=None, time_encoded=False) \
    -> Optional[Tuple[NDArray, NDArray, NDArray, NDArray, NDArray]]:
    assert n_before is not None or window_size is not None,\
          'either n_before and n_after needs to be set or window_size needs to be set'
    try:
        X, y, loc = read_record(data_root, record, lead, label_mapping)
    except ValueError as e:
        return None

    if resampling is not None:
        X, y, loc = resample_record(X, y, loc, sampling_rate, resampling)
        sampling_rate = resampling

    X = remove_baseline_wander(X, sampling_rate)
    y, loc = filter_heartbeats(y, loc, label_mapping)
    if loc[-1] >= len(X):
        y, loc = y[:-1], loc[:-1]

    # Normalize wrt average R peak amplitude
    X = X / np.mean(X[loc])

    X_cwt = calculate_cwt(X, sampling_rate)
    rr_stats = calculate_rr_interval_stats(loc, time_encoded, sampling_rate)

    # Trim data for which we dont have RR information
    y = y[1:-1]
    loc = loc[1:-1]

    # Normalize data
    X_min, X_max = X.min(), X.max()
    X -= X_min
    X /= (X_max - X_min)

    X_cwt_min, X_cwt_max = X_cwt.min(), X_cwt.max()
    X_cwt -= X_cwt_min
    X_cwt /= (X_cwt_max - X_cwt_min)

    X_rr_min, X_rr_max = rr_stats.min(axis=0), rr_stats.max(axis=0)
    rr_stats -= X_rr_min
    rr_stats /= (X_rr_max - X_rr_min)
    
    if n_before:
        X, X_cwt, rr, y = get_windows(X, X_cwt, y, loc, rr_stats,
                                    n_before, n_after, time_encoded, sampling_rate)
    else:
        X, X_cwt, rr, y = get_beat_to_beat_windows(X, X_cwt, y, loc, rr_stats,
                                                   window_size, time_encoded, sampling_rate)

    return X, X_cwt, rr, y


def process_dataset(data_root: Path, patient_ids: List[int],
                    label_mapping: Dict[str, str],
                    lead='MLII', sampling_rate=360, resampling: Optional[int]=None,
                    time_encoded=False) \
    -> Tuple[NDArray, NDArray, NDArray, NDArray]:
    with ProcessPoolExecutor() as executor:
        futures = [
            executor.submit(process_record, data_root, record, label_mapping,
            lead, sampling_rate, resampling, time_encoded) for record in patient_ids
        ]
        
        results = []
        for future in as_completed(futures):
            result = future.result()
            results.append(result)

    X, X_cwt, X_rr, y = zip(*results)
    return np.concatenate(X), np.concatenate(X_cwt), \
           np.concatenate(X_rr), np.concatenate(y)


def save_record(folder: Path, X: NDArray, X_cwt: NDArray, X_rr: NDArray, y: NDArray):
    folder.mkdir(exist_ok=True, parents=True)
    np.save(folder / 'X', X)
    np.save(folder / 'X_cwt', X_cwt)
    np.save(folder / 'X_rr', X_rr)
    np.save(folder / 'y', y)


def load_record(folder: Path) -> Tuple[NDArray, NDArray, NDArray, NDArray, NDArray]:
    X = np.load(folder / 'X.npy')
    X_cwt = np.load(folder / 'X_cwt.npy')
    X_rr = np.load(folder / 'X_rr.npy')
    y = np.load(folder / 'y.npy')

    return X, X_cwt, X_rr, y


def load_patient_data(data_root: Path, patient_id: int,
                      padding: Optional[int]=None,
                      allowed_labels=Optional[List[int]]) -> Tuple[NDArray, NDArray, NDArray, NDArray, NDArray]:
    X_total, X_cwt_total, X_rr_total, y_total = [], [], [], []
    patient_folder = data_root / f"PATIENT_{patient_id}"
    for p in patient_folder.glob('RECORD*'):
        X, X_cwt, X_rr, y = load_record(p)
        X_total.append(X)
        X_cwt_total.append(X_cwt)
        X_rr_total.append(X_rr)
        y_total.append(y)
    X_total = np.concatenate(X_total)
    X_cwt_total = np.concatenate(X_cwt_total)
    X_rr_total = np.concatenate(X_rr_total)
    y_total = np.concatenate(y_total)

    if allowed_labels is not None:
        selected_indices = np.isin(y_total, allowed_labels)
        X_total = X_total[selected_indices]
        X_cwt_total = X_cwt_total[selected_indices]
        X_rr_total = X_rr_total[selected_indices]
        y_total = y_total[selected_indices]

    if padding is not None and X_total.shape[1] != padding:
            padding_size = padding - X_total.shape[1]
            padding_shape = ((0, 0), (0, padding_size), (0, 0))
            X_total = np.pad(X_total, padding_shape, constant_values=-1)
            X_cwt_total = np.pad(X_cwt_total, padding_shape, constant_values=-1)

    return X_total, X_cwt_total, X_rr_total, y_total


def get_distribution(root_folder: Path, patient_ids: List[int]):
    split_counts = defaultdict(int)
    for pid in patient_ids:
        patient_folder = root_folder / f"PATIENT_{pid}"
        for p in patient_folder.glob('RECORD*'):
            y = np.load(p / 'y.npy')
            labels, counts = np.unique(y, return_counts=True)
            for l, c in zip(labels, counts):
                split_counts[l.item()] += c.item()
    return split_counts


def generate_stratified_split(dataset_root: Path, patient_ids: Iterable[int], testing_fraction=.5) -> Tuple[NDArray, NDArray]:
    y_total, y_patient_id = [], []
    for patient_id in patient_ids:
        patient_folder = dataset_root / f"PATIENT_{patient_id}"
        y_for_patient = []
        for p in patient_folder.glob('RECORD*'):
            y_for_patient.append(np.load(p / 'y.npy'))
        y_for_patient = np.concatenate(y_for_patient)
        y_total.append(y_for_patient)
        patient_id = np.ones_like(y_for_patient) * patient_id
        y_patient_id.append(patient_id)
    y_total = np.concatenate(y_total)
    patient_ids = np.concatenate(y_patient_id)
    n_splits = round(1 / testing_fraction)
    sgkf = StratifiedGroupKFold(n_splits=n_splits, shuffle=False)
    res = list(sgkf.split(np.zeros_like(y_total), y_total, patient_ids))
    training_patients = np.unique(patient_ids[res[0][0]])
    testing_patients = np.unique(patient_ids[res[0][1]])
    return training_patients, testing_patients