import hydra
import wandb
import os

import numpy as np
import torch
import xmltodict as xtd

from tqdm import tqdm
from io import BytesIO
from PIL import Image

import warnings
warnings.filterwarnings('ignore')

import pywt
import numpy as np
import neurokit2 as nk
from sklearn.cluster import DBSCAN
from scipy.stats import mode

from ecgcmr.signal.sig_utils.plot_ecg import plot_long_ecg


def log_ecg(ecg, patient, rpeaks=None, tpeaks=None):
    ecg_image_buffer = BytesIO()
    plot_long_ecg(ecg.squeeze(), rpeaks=rpeaks, tpeaks=tpeaks,
                  leads_to_plot=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], display=False, save_to=ecg_image_buffer)
    ecg_image_buffer.seek(0)
    ecg_image = Image.open(ecg_image_buffer)
    wandb.log({f"Patient {patient} ECG": wandb.Image(ecg_image)})

def wavelet_filter(signal, wavelet='db6', level=8):
    # Decompose the original signal to wavelet coefficients
    coeffs = pywt.wavedec(signal, wavelet, level=level)

    # Create two sets of coefficients for different processing needs
    coeffs_for_rpeaks = list(coeffs)  # Deep copy for R-peaks detection
    coeffs_for_storage = list(coeffs)  # Deep copy for storage

    # Zeroing specific components for R-peaks detection: Zero A9, D1, D2, D3
    coeffs_for_rpeaks[0] = np.zeros_like(coeffs_for_rpeaks[0])  # Zeroing A9
    coeffs_for_rpeaks[-3] = np.zeros_like(coeffs_for_rpeaks[-3])  # Zeroing D3
    coeffs_for_rpeaks[-2] = np.zeros_like(coeffs_for_rpeaks[-2])  # Zeroing D2
    coeffs_for_rpeaks[-1] = np.zeros_like(coeffs_for_rpeaks[-1])  # Zeroing D1

    # Zeroing specific components for storage: Zero A9, D1, D2
    coeffs_for_storage[0] = np.zeros_like(coeffs_for_storage[0])  # Zeroing A9
    coeffs_for_storage[-2] = np.zeros_like(coeffs_for_storage[-2])  # Zeroing D2
    coeffs_for_storage[-1] = np.zeros_like(coeffs_for_storage[-1])  # Zeroing D1

    # Reconstruct signals from modified coefficients
    filtered_signal_for_rpeaks_detection = pywt.waverec(coeffs_for_rpeaks, wavelet).astype(np.float32)
    filtered_signal_for_storage = pywt.waverec(coeffs_for_storage, wavelet).astype(np.float32)

    return filtered_signal_for_storage, filtered_signal_for_rpeaks_detection
    
def z_score_normalization(signal, epsilon=1e-8):
    signal = signal.astype(np.float32)
    mean = np.mean(signal)
    std = np.std(signal)
    normalized_signal = (signal - mean) / (std + epsilon)
    return normalized_signal.astype(np.float32)

def get_rpeaks(ecg, sampling_rate=500):
    try:
        peaks_info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, correct_artifacts=True, method="neurokit")[1]
        peaks = peaks_info['ECG_R_Peaks']
    except Exception as e:
        peaks = np.array([])

    if len(peaks) < 3: 
        try:
            peaks_info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, correct_artifacts=True, method="koka2022")[1]
            peaks_fallback = peaks_info['ECG_R_Peaks']
            return peaks_fallback
        except Exception as e:
            return np.array([])
    return peaks

def find_representative_peaks(peaks_all_channels, delta, min_samples):
    all_peaks = np.sort(np.concatenate(peaks_all_channels))
    if all_peaks.size == 0:
        return np.array([])

    all_peaks_reshaped = all_peaks.reshape(-1, 1)
    db = DBSCAN(eps=delta, min_samples=min_samples).fit(all_peaks_reshaped)
    labels = db.labels_

    unique_labels = np.unique(labels[labels >= 0])
    representatives = []
    for label in unique_labels:
        cluster_peaks = all_peaks[labels == label]
        peak_mode = mode(cluster_peaks)
        if peak_mode.count.size > 0 and peak_mode.count >= min_samples:
            most_frequent_peak = peak_mode.mode
        else:
            most_frequent_peak = np.median(cluster_peaks)
        representatives.append(int(most_frequent_peak))

    return np.array(representatives)

def read_ecg(fname):
    lead_order = ["I", "II", "III", "AVR", "AVL", "AVF", "V1", "V2", "V3", "V4", "V5", "V6"]

    # run example
    ecg, median_ecg, md = import_ecg(fname, lead_order)
    
    return ecg, median_ecg, md

def import_ecg(fname, lead_order):
    f = open(fname, "rt")
    raw_input = f.read()
    f.close()

    # extract ECG and metadata
    ecg, median_ecg, md = parse_xml(raw_input, lead_order)
    md['filename'] = fname

    return ecg.float().numpy(), median_ecg.float().numpy(), md


def parse_xml(input_data, lead_order):
    """ Takes input as raw xml data read from file (Cardiosoft specification), returns list of 12 lead waveforms
    and metadata. """
    md = {}  # metadata dictionary
    data = xtd.parse(input_data)['CardiologyXML']

    full_lead_nodes = [['StripData', 'WaveformData'], ['Strip', 'StripData', 'WaveformData']]
    median_lead_nodes = [['RestingECGMeasurements', 'MedianSamples', 'WaveformData'], []]
    full_leads = get_lead_data(data, full_lead_nodes, lead_order)
    median_leads = get_lead_data(data, median_lead_nodes, lead_order)

    try:
        md = get_metadata(data, md)
    except Exception:
        md = {}
        
    md['lead order'] = lead_order

    return torch.tensor(full_leads, dtype=torch.float32), torch.tensor(median_leads, dtype=torch.float32), md


def get_lead_data(data, nodes, lead_order):
    leads = [[] for i in range(12)]
    raw_lead_data = get_xml_node(data, nodes)

    # check number of leads
    if len(raw_lead_data) != 12:
        print('Warning: only {} leads found'.format(len(raw_lead_data)))

    # split data into 12 leads and convert string into individual values
    else:
        for i in range(len(raw_lead_data)):
            lead_n = lead_order.index(raw_lead_data[i]['@lead'].upper())
            lead_data_string = raw_lead_data[i]['#text']
            lead_vals = [int(x) for x in lead_data_string.split(",")]
            leads[lead_n] = lead_vals

    return leads


def get_xml_node(data, node_list):
    output = data
    try:
        for x in node_list[0]:
            output = output[x]
    except KeyError:
        output = data[:]
        try:
            for x in node_list[1]:
                output = output[x]
        except:
            raise ValueError('No lead data found!')
    return output


def get_metadata(data, md):
    md['sample rate'] = float(data['StripData']['SampleRate']['#text'])
    md['t scale'] = 1. / md['sample rate']
    md['v scale'] = float(data['StripData']['Resolution']['#text'])
    md['filter 50Hz'] = data['FilterSetting']['Filter50Hz']
    md['filter 60Hz'] = data['FilterSetting']['Filter60Hz']
    md['low pass'] = float(data['FilterSetting']['LowPass']['#text'])
    md['high pass'] = float(data['FilterSetting']['HighPass']['#text'])

    meas = data['RestingECGMeasurements']
    md['Heart rate'] = meas['VentricularRate']['#text'] + ' ' + meas['VentricularRate']['@units']
    md['P duration'] = meas['PDuration']['#text'] + ' ' + meas['PDuration']['@units']
    md['PR interval'] = meas['PQInterval']['#text'] + ' ' + meas['PQInterval']['@units']
    md['QRS duration'] = meas['QRSDuration']['#text'] + ' ' + meas['QRSDuration']['@units']
    md['QT interval'] = meas['QTInterval']['#text'] + ' ' + meas['QTInterval']['@units']
    md['QTc interval'] = meas['QTCInterval']['#text'] + ' ' + meas['QTCInterval']['@units']
    md['P axis'] = meas['PAxis']['#text'] + ' ' + meas['PAxis']['@units']
    md['R axis'] = meas['RAxis']['#text'] + ' ' + meas['RAxis']['@units']
    md['T axis'] = meas['TAxis']['#text'] + ' ' + meas['TAxis']['@units']
    return md


def main():
    wandb.init(project='Logging')

    save_folder = 'saved_tensors/multimodal/ecg_ED_ES'
    root_path = '/vol/aimspace/projects/ukbb/data/cardiac/ecg'
    delta_r = 150
    min_samples = 3

    for mode in ["train", "val", "test"]:
        patient_ids_path = os.path.join("saved_tensors/ids", f'{mode}_patient_ids.npy')
        patient_ids = np.load(patient_ids_path, mmap_mode='r')

        valid_patient_ids = []
        valid_rpeaks = []
        new_ecg_data = []

        for patient in tqdm(patient_ids):
            raw_ecg_file = os.path.join(root_path, f'{patient}_20205_2_0.xml')
            ecg, _, _ = read_ecg(raw_ecg_file)

            try:
                rpeaks_all_channels = []
                cleared_ecg_all_leads = []

                for lead in ecg:
                    filtered_signal, filtered_signal_for_rpeaks_detection = wavelet_filter(lead)
                    normalized_ecg_to_store = z_score_normalization(filtered_signal)
                    normalized_ecg_for_rpeaks = z_score_normalization(filtered_signal_for_rpeaks_detection)
                    cleared_ecg_all_leads.append(normalized_ecg_to_store)

                    rpeaks = get_rpeaks(normalized_ecg_for_rpeaks)
                    rpeaks = rpeaks[~np.isnan(rpeaks)]

                    if rpeaks.size > 0:
                        rpeaks_all_channels.append(rpeaks)

                aggregated_rpeaks = find_representative_peaks(rpeaks_all_channels, delta=delta_r, min_samples=min_samples)

                possible_rpeaks = [rpeak for rpeak in aggregated_rpeaks if rpeak + 2500 <= 5000]
                
                cleared_ecg_array = np.array(cleared_ecg_all_leads, dtype=np.float32)

                if possible_rpeaks:
                    valid_patient_ids.append(patient)
                    valid_rpeaks.append(aggregated_rpeaks)
                    new_ecg_data.append(cleared_ecg_array)
                else:
                    print(f'Patient {patient} not enough r peaks')
                    log_ecg(ecg=ecg, patient=patient, rpeaks=aggregated_rpeaks)

            except Exception as e:
                print(f'Patient {patient}, {e}')
                log_ecg(ecg=ecg, patient=patient)

        np.save(os.path.join(save_folder, f"{mode}_new_patient_ids.npy"), np.array(valid_patient_ids))
        np.save(os.path.join(save_folder, f"{mode}_new_rpeaks.npy"), np.array(valid_rpeaks, dtype=object))
        np.save(os.path.join(save_folder, f"{mode}_new_ecg_data.npy"), np.array(new_ecg_data, dtype=np.float32))

if __name__ == "__main__":
    main()
