from pathlib import Path
import torch
import json
import numpy as np
from data.hdf5_dataset import HDF5Dataset
from models.transformer import ECGTransformer
from sklearn.metrics import balanced_accuracy_score


LOGS_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/logs')
DATA_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard/fold_0')
DATA_25HZ_5SR_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_25hz_5sr/fold_0')
DATA_100HZ_5SR_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_100hz_5sr/fold_0')
DATA_100HZ_500HZ_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_train_100hz_val_500hz/fold_0')
DATA_500HZ_100HZ_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_train_500hz_val_100hz/fold_0')
DATA_100_125HZ_500HZ_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_train_100-125hz_val_500hz/fold_0')
CHUNK_SIZE = 1_000
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'


validation_25hz_5sr_dataset = HDF5Dataset(DATA_25HZ_5SR_PATH / 'test.hdf5')
validation_100hz_500hz_dataset = HDF5Dataset(DATA_100HZ_500HZ_PATH / 'val.hdf5')
validation_500hz_100hz_dataset = HDF5Dataset(DATA_500HZ_100HZ_PATH / 'val.hdf5')


experiments = [f for f in LOGS_PATH.iterdir() if f.is_dir()]
for experiment in experiments:
    print(f"Running experiment {experiment}...")
    if (experiment / 'fold_0/y_true.npy').exists():
        print(f"Experiment {experiment} already evaluated.")
    else:
        try:
            if '25hz_5sr' in experiment.name:
                dataset_length = len(validation_25hz_5sr_dataset)
            elif 'train_100hz_val_500hz' in experiment.name:
                dataset_length = len(validation_100hz_500hz_dataset)
            elif 'train_500hz_val_100hz' in experiment.name:
                dataset_length = len(validation_500hz_100hz_dataset)
            elif 'train_100-125hz_val_500hz' in experiment.name:
                dataset_length = len(validation_100hz_500hz_dataset)
            # elif '100hz_5sr' in experiment.name:
            #     dataset_length = len(validation_100hz_5sr_dataset)
            # else:
            #     dataset_length = len(validation_dataset)
            config = json.load(open(experiment / 'fold_0/config.json'))
            if config['model'] == 'cnn':
                model = torch.load(experiment / 'fold_0/cnn.pt', weights_only=False).to(DEVICE)
            else:
                model = torch.load(experiment / 'fold_0/transformer.pt', weights_only=False).to(DEVICE)
            model.positional_encoding_scheme = config['positional_encoding_scheme']
            model.eval()

            y_pred = []
            y_true = []
            for chunk_start in range(0, dataset_length, CHUNK_SIZE):
                print(f"Loading chunk [{chunk_start}:{min(chunk_start + CHUNK_SIZE, dataset_length)}]...")
                if '25hz_5sr' in experiment.name:
                    X_ecg, X_cwt, rr, y = validation_25hz_5sr_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                elif 'train_100hz_val_500hz' in experiment.name:
                    X_ecg, X_cwt, rr, y = validation_100hz_500hz_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                elif 'train_500hz_val_100hz' in experiment.name:
                    X_ecg, X_cwt, rr, y = validation_500hz_100hz_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                elif 'train_100-125hz_val_500hz' in experiment.name:
                    X_ecg, X_cwt, rr, y = validation_100hz_500hz_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                # elif '100hz_5sr' in experiment.name:
                #     X_ecg, X_cwt, rr, y = validation_100hz_5sr_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                # else:
                #     X_ecg, X_cwt, rr, y = validation_dataset[chunk_start:chunk_start + CHUNK_SIZE]
                for i in range(len(y)):
                    ecg_sample = X_ecg[i]
                    cwt_sample = X_cwt[i]
                    nonzero = (ecg_sample[:, -1] < .0).nonzero()
                    if len(nonzero) > 0:
                        index = nonzero[0]
                    else:
                        index = len(ecg_sample)
                    ecg_sample_min, ecg_sample_max = ecg_sample[:index, :-1].min(), ecg_sample[:index, :-1].max()
                    X_ecg[i, :index, :-1] -= ecg_sample_min
                    X_ecg[i, :index, :-1] /= ecg_sample_max - ecg_sample_min
                    cwt_sample_min, cwt_sample_max = cwt_sample[:index, :-1].min(), cwt_sample[:index, :-1].max()
                    X_cwt[i, :index, :-1] -= cwt_sample_min
                    X_cwt[i, :index, :-1] /= cwt_sample_max - cwt_sample_min
                y_true.append(y.numpy())

                with torch.no_grad():
                    if 'cwt' in config['input_type']:
                        X_cwt = X_cwt.to(DEVICE)
                    else:
                        X_cwt = None
                    if 'ecg' in config['input_type']:
                        X_ecg = X_ecg.to(DEVICE)
                    else:
                        X_ecg = None
                    if 'rr' in config['input_type']:
                        rr = rr.to(DEVICE)
                    else:
                        rr = None
                    y_pred_chunk = torch.softmax(model(X_ecg, X_cwt, rr), dim=-1).argmax(dim=-1).detach().cpu().numpy()
                    y_pred.append(y_pred_chunk)
            y_true = np.concatenate(y_true, axis=0)
            y_pred = np.concatenate(y_pred, axis=0)
            np.save(experiment / 'fold_0/y_true.npy', y_true)
            np.save(experiment / 'fold_0/y_pred.npy', y_pred) 
            
            print(f"Balanced accuracy: {balanced_accuracy_score(y_true, y_pred):.4f}")
        except Exception as e:
            print(f"Error in experiment {experiment}: {e}")
            continue