import numpy as np
from pathlib import Path
from data.utils import load_patient_data
import argparse
import h5py
import json

from typing import List, Tuple
from numpy.typing import NDArray


ALLOWED_LABELS = (0, 1, 2)
PADDED_LENGTH = 1000

DATA_ROOT = '/data/IDLab/DigiHealth/processed_data/beat-to-beat/incart-time'
FOLDS_ROOT = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/configs')
FOLDS_CONFIG = FOLDS_ROOT / 'incart_stratified_standard.json'

OUTPUT_ROOT = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots')
# OUTPUT_ROOT = '/data/IDLab/DigiHealth/training_snapshots'
# OUTPUT_FOLDER = Path(OUTPUT_ROOT) / FOLDS_CONFIG.stem
OUTPUT_FOLDER = Path(OUTPUT_ROOT) / (FOLDS_CONFIG.stem + "_25hz_5sr")

def add_data_to_hdf5(f: h5py.Group, X: NDArray, X_cwt: NDArray,
                     rr: NDArray, y: NDArray):
    ds_size = f['X'].shape[0]
    if ds_size == 1:
        # HDF5 initiates dataset with 1 row filled with zeroes, this fixes that issue
        ds_size = 0
    n_samples = X.shape[0]
    f['X'].resize(ds_size + n_samples, axis=0)
    f['X_cwt'].resize(ds_size + n_samples, axis=0)
    f['rr'].resize(ds_size + n_samples, axis=0)
    f['y'].resize(ds_size + n_samples, axis=0)

    f['X'][-n_samples:] = X
    f['X_cwt'][-n_samples:] = X_cwt
    f['rr'][-n_samples:] = rr
    f['y'][-n_samples:] = y


def save_data_to_hdf5(data_path: Path, patient_ids: List[int],
                      hdf5_path: Path, suffixes: List[str], sr_per_patient: int) -> str:
    with h5py.File(hdf5_path, 'w') as f:
        f.create_dataset('X', shape=(1, PADDED_LENGTH, 2),
                         maxshape=(None, PADDED_LENGTH, 2),
                         chunks=True, dtype='f')
        f.create_dataset('X_cwt', shape=(1, PADDED_LENGTH, 101),
                         maxshape=(None, PADDED_LENGTH, 101),
                         chunks=True, dtype='f')
        f.create_dataset('rr', shape=(1, 4),
                         maxshape=(None, 4),
                         chunks=True, dtype='f')
        f.create_dataset('y', shape=(1,),
                         maxshape=(None,),
                         chunks=True, dtype='i')
        for patient_id in patient_ids:
            selected_suffixes = np.random.choice(suffixes, sr_per_patient, replace=False) \
                                if len(suffixes) >= sr_per_patient and sr_per_patient > 0 else suffixes
            for suffix in selected_suffixes:
                data_path_for_patient = str(data_path) + str(suffix)
                print(f"Patient {patient_id}: {data_path_for_patient}")
                try:
                    X, X_cwt, rr, y = load_patient_data(Path(data_path_for_patient), patient_id,
                                                        padding=PADDED_LENGTH, allowed_labels=ALLOWED_LABELS)
                    add_data_to_hdf5(f, X, X_cwt, rr, y)
                except ValueError:
                    print('ValueError; SKIPPING')
        return len(f['X'])


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--sweep_steps', default=0, type=int)
    parser.add_argument('--sr_per_pdatient', default=0, type=int)
    args = parser.parse_args()

    args.sweep_steps = 25
    args.sr_per_patient = 5

    if args.sweep_steps > 0:
        suffixes = [f"-{sr}" for sr in range(100, 501, args.sweep_steps)]
    else:
        suffixes = ['']

    OUTPUT_FOLDER.mkdir(exist_ok=True, parents=True)
    for i, config_dict in enumerate(json.load(open(FOLDS_CONFIG, 'r'))):
        print(f"##### Processing fold {i}... #####")
        fold_folder = OUTPUT_FOLDER / f"fold_{i}"
        fold_folder.mkdir(exist_ok=True)
        training_ids = config_dict['training']['patient_ids']
        evaluation_ids = config_dict['evaluation']['patient_ids']

        total_training_samples = save_data_to_hdf5(
            DATA_ROOT, training_ids, fold_folder / "train.hdf5",
            suffixes=suffixes[::2], sr_per_patient=args.sr_per_patient
        )
        total_evaluation_samples = save_data_to_hdf5(
            DATA_ROOT, evaluation_ids, fold_folder / "val.hdf5",
            suffixes=suffixes[1::2], sr_per_patient=args.sr_per_patient
        )
        # total_testing_samples = save_data_to_hdf5(
        #     DATA_ROOT, testing_ids, fold_folder / "test.hdf5",
        #     suffixes=suffixes[3::4], sr_per_patient=args.sr_per_patient
        # )
        print(f"Fold {i}: Loaded {total_training_samples:,} training samples, {total_evaluation_samples:,} evaluation samples.")
    