import os
import hydra
import json
import torch
import wandb

from omegaconf import DictConfig, OmegaConf
from importlib import import_module
from ecgcmr.utils.misc import fix_seed

from lightning.pytorch.loggers import WandbLogger


@hydra.main(config_path="ecgcmr/conf", config_name="base", version_base=None)
def main(cfg: DictConfig):
    print(f'Task: {cfg.training_mode.task}')
    
    fix_seed(seed=cfg.seed)

    save_dir = setup_save_directory(cfg)
    devices = cfg.n_gpus

    training_details = determine_train_function(cfg)
    train_function = training_details['function']
    project_name = training_details['project_name']
    extra_params = training_details.get('extra_params', {})

    group_name, experiment_name = generate_experiment_name(cfg=cfg)

    wandb_logger = None
    run = None

    if cfg.training_mode.with_eval:
        wandb_config = OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True)
        
        os.environ["WANDB__SERVICE_WAIT"] = "300"
        
        run = wandb.init(
            project=project_name,
            dir=save_dir,
            config=wandb_config,
            name=experiment_name,
            group=group_name
        )

    else:
        wandb_logger = setup_wandb_logger(project_name, save_dir, cfg, group_name, experiment_name)

    torch.set_float32_matmul_precision('medium')

    try:
        train_function(cfg=cfg,
                       wandb_logger=run if cfg.training_mode.with_eval else wandb_logger,
                       save_dir=save_dir,
                       devices=devices,
                       **extra_params)
    finally:
        if cfg.training_mode.with_eval and run is not None:
            run.finish()
        elif wandb_logger is not None and wandb_logger.experiment is not None:
            wandb_logger.experiment.finish()

def generate_experiment_name(cfg: DictConfig) -> str:
    launch_time = cfg.launch_time.replace("/", "_")
    group_name, experiment_name = build_group_name_and_experiment_name(cfg)
    full_experiment_name = f'{experiment_name}/{launch_time}'
    return group_name, full_experiment_name

def setup_save_directory(cfg):
    if 'finetune' in cfg.training_mode.task:
        save_dir = os.path.join(cfg.training_mode.checkpoint_path, "eval")
        os.makedirs(save_dir, exist_ok=True)
    else:
        save_dir = hydra.core.hydra_config.HydraConfig.get().runtime.output_dir
    return save_dir  

def setup_wandb_logger(project_name, save_dir, cfg, group_name, experiment_name):
    # wandb.require("core")
    wandb_config = OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True)

    logger = WandbLogger(project=project_name,
                         save_dir=save_dir,
                         offline=cfg.debug,
                         config=wandb_config,
                         log_model=not cfg.debug,
                         group=group_name,
                         name=f'{experiment_name}')
    logger.experiment
    return logger

def build_group_name_and_experiment_name(cfg):
    if cfg.models.type == 'multimodal':
        group_name = cfg.models.type
        experiment_name = append_multimodal_params(f'{cfg.models.type}', cfg)
    else:
        group_name = f'{cfg.training_mode.task}_{cfg.models.backbone}'

        if cfg.models.type == 'image_masked':
            experiment_name = append_image_masked_params(group_name, cfg)
        elif cfg.models.type == 'image_supervised':
            experiment_name = append_image_supervised_params(group_name, cfg)
        elif cfg.models.type == 'image_contrastive':
            experiment_name = append_image_contrastive_params(group_name, cfg)
        elif cfg.models.type == 'ecg':
            if cfg.training_mode.task == 'ecg_finetune_masked':
                experiment_name = append_ecg_params_finetune(group_name, cfg)
            elif cfg.training_mode.task == 'ecg_pretrain_masked':
                experiment_name = append_ecg_params(group_name, cfg)
            elif cfg.training_mode.task == 'ecg_pretrain_supervised':
                experiment_name = append_ecg_params_supervised(group_name, cfg)
            elif cfg.training_mode.task == 'ecg_pretrain_contrastive':
                experiment_name = f'{group_name}_contrastive'
    
    return group_name, experiment_name

def load_config(checkpoint_path):
    config_path = os.path.join(checkpoint_path, "config.json")
    with open(config_path, 'r') as file:
        return json.load(file)

def append_image_contrastive_params(group_name, cfg):
    name = f'{group_name}'
    backbone = cfg.models.backbone
    if backbone == 'resnet':
        model_conf = f'_{cfg.models.resnet.model_depth}'
    elif backbone == 'vit':
        model_conf = f'_patch_{cfg.models.vit.tubelet_size}x{cfg.models.vit.patch_size}x{cfg.models.vit.patch_size}_hidden_{cfg.models.vit.hidden_size}_layers_{cfg.models.vit.num_hidden_layers}_heads_{cfg.models.vit.num_attention_heads}'
    elif backbone == 'byol_vit':
        model_conf = f'BYOL_patch_{cfg.models.byol_vit.tubelet_size}x{cfg.models.byol_vit.patch_size}x{cfg.models.byol_vit.patch_size}_hidden_{cfg.models.byol_vit.hidden_size}_layers_{cfg.models.byol_vit.num_hidden_layers}_heads_{cfg.models.byol_vit.num_attention_heads}'
    name += model_conf
    return name  

def append_multimodal_params(group_name, cfg):
    # image_encoder_config = load_config(cfg.training_mode.encoders.image.checkpoint_path)
    # ecg_encoder_connfig = load_config(cfg.training_mode.encoders.ecg.checkpoint_path)

    experiment_name_addition = cfg.experiment_name_addition

    lr = cfg.models.params.lr
    lr_image_encoder = cfg.models.params.lr_image_encoder
    lr_ecg_encoder = cfg.models.params.lr_ecg_encoder

    name = f'{group_name}_lr_{lr}_image_{lr_image_encoder}_ecg_{lr_ecg_encoder}'

    loss_type = cfg.training_mode.loss.type
    name += f'_loss_{loss_type}'

    global_loss_temp = cfg.training_mode.loss.global_loss.temperature
    name += f'_glob_temp_{global_loss_temp}'

    if loss_type == 'both':
        weight_local = cfg.training_mode.loss.weight_local
        name += f'_local_w_{weight_local}'
        
        local_loss_temp = cfg.training_mode.loss.local_loss.temperature
        name += f'_temp_{local_loss_temp}'

        local_loss_std = cfg.training_mode.loss.local_loss.std
        name += f'_std_{local_loss_std}'

    total_epochs = cfg.max_epochs
    warmup_epochs = cfg.models.params.scheduler.warmup_cosine.warmup_steps

    name += f'_total_epochs_{total_epochs}_warmup_{warmup_epochs}'

    linear_probing_lr = cfg.downstream_task.params.lr
    name += f'_linear_probing_lr_{linear_probing_lr}_use_mlp_{cfg.downstream_task.use_mlp}'

    if experiment_name_addition:
        name += f'_{experiment_name_addition}'

    return name

def append_image_masked_params(group_name, cfg):
    tubelet_size = cfg.models[cfg.models.model_size].tubelet_size
    patch_size = cfg.models[cfg.models.model_size].patch_size
    mask_ratio = cfg.models[cfg.models.model_size].mask_ratio
    timesteps = cfg.augmentations.imaging.time_sample.result_n_frames if cfg.augmentations.imaging.time_sample.enable else cfg.dataset.time_steps
    hidden_dim = cfg.models[cfg.models.model_size].hidden_size
    layers = cfg.models[cfg.models.model_size].num_hidden_layers
    heads = cfg.models[cfg.models.model_size].num_attention_heads
    augmentations = cfg.augmentations.name
    return f'{group_name}_{tubelet_size}x{patch_size}x{patch_size}_mask_{mask_ratio}_time_{timesteps}_hidden_{hidden_dim}_layers_{layers}_heads_{heads}_augmentations_{augmentations}'

def append_image_supervised_params(group_name, cfg):
    backbone = cfg.models.backbone
    if backbone == 'resnet':
        name = f'{group_name}_{cfg.models.resnet.model_depth}'
    elif backbone == 'vit':
        tubelet_size = cfg.models[cfg.models.model_size].tubelet_size
        patch_size = cfg.models[cfg.models.model_size].patch_size
        name = f'{group_name}_{tubelet_size}x{patch_size}x{patch_size}'
    return name

def append_ecg_params_finetune(group_name, cfg):
    ecg_finetune_cfg = load_config(cfg.training_mode.checkpoint_path)

    patch_size_0 = ecg_finetune_cfg['patch_size'][0]
    patch_size_1 = ecg_finetune_cfg['patch_size'][1]
    mask_ratio = ecg_finetune_cfg['mask_ratio']
    hidden_dim = ecg_finetune_cfg['hidden_size']
    layers = ecg_finetune_cfg['num_hidden_layers']
    heads = ecg_finetune_cfg['num_attention_heads']
    
    return f'{group_name}_{patch_size_0}x{patch_size_1}_mask_{mask_ratio}_hidden_{hidden_dim}_layers_{layers}_heads_{heads}_finetune'

def append_ecg_params(group_name, cfg):
    patch_size_0 = cfg.models[cfg.models.model_size].patch_size[0]
    patch_size_1 = cfg.models[cfg.models.model_size].patch_size[1]
    mask_ratio = cfg.models[cfg.models.model_size].mask_ratio
    ecg_time_steps_used = cfg.augmentations.ecg.random_crop.ecg_time_steps
    hidden_dim = cfg.models[cfg.models.model_size].hidden_size
    layers = cfg.models[cfg.models.model_size].num_hidden_layers
    heads = cfg.models[cfg.models.model_size].num_attention_heads
    augmentations = cfg.augmentations.name
    return f'{group_name}_{patch_size_0}x{patch_size_1}_mask_{mask_ratio}_time_{ecg_time_steps_used}_hidden_{hidden_dim}_layers_{layers}_heads_{heads}_augmentations_{augmentations}_masked_pretrain'

def append_ecg_params_supervised(group_name, cfg):
    patch_size_0 = cfg.models[cfg.models.model_size].patch_size[0]
    patch_size_1 = cfg.models[cfg.models.model_size].patch_size[1]
    ecg_time_steps_used = cfg.augmentations.ecg.random_crop.ecg_time_steps
    hidden_dim = cfg.models[cfg.models.model_size].hidden_size
    layers = cfg.models[cfg.models.model_size].num_hidden_layers
    heads = cfg.models[cfg.models.model_size].num_attention_heads
    augmentations = cfg.augmentations.name
    return f'{group_name}_{patch_size_0}x{patch_size_1}_time_{ecg_time_steps_used}_hidden_{hidden_dim}_layers_{layers}_heads_{heads}_augmentations_{augmentations}_supervised_pretrain'

def determine_train_function(cfg):
    task = cfg.training_mode.task
    with_eval = cfg.training_mode.with_eval

    task_to_details = {
        'img_pretrain_contrastive': {
            'module': 'ecgcmr.imaging.train_imaging_contrastive_pytorch'if with_eval else 'ecgcmr.imaging.train_imaging_contrastive',
            'function': 'train_imaging_contrastive_with_eval' if with_eval else 'train_imaging_contrastive',
            'project_name': 'Imaging Pre-train',
            'extra_params': {}
        },
        'img_pretrain_masked': {
            'module': 'ecgcmr.imaging.train_imaging_masked_pytorch' if with_eval else 'ecgcmr.imaging.train_imaging_masked',
            'function': 'train_imaging_masked_with_eval' if with_eval else 'train_imaging_masked',
            'project_name': 'Imaging Pre-train',
            'extra_params': {}
        },
        'img_finetune_masked': {
            'module': 'ecgcmr.imaging.train_imaging_masked',
            'function': 'fine_tune_masked',
            'project_name': 'Imaging Fine-Tune',
            'extra_params': {}
        },
        'img_pretrain_supervised': {
            'module': 'ecgcmr.imaging.train_imaging_supervised',
            'function': 'train_imaging_supervised',
            'project_name': 'Image Supervised Training',
            'extra_params': {}
        },
        'ecg_pretrain_masked': {
            'module': 'ecgcmr.signal.train_ecg_masked_pytorch' if with_eval else 'ecgcmr.signal.train_ecg_masked',
            'function': 'train_ecg_masked_with_eval' if with_eval else 'train_ecg_masked',
            'project_name': 'ECG Pre-train',
            'extra_params': {}
        },
        'ecg_pretrain_contrastive': {
            'module': 'ecgcmr.signal.train_ecg_contrastive',
            'function': 'train_ecg_contrastive',
            'project_name': 'ECG Pre-train',
            'extra_params': {}
        },
        'ecg_pretrain_supervised': {
            'module': 'ecgcmr.signal.train_ecg_supervised',
            'function': 'train_ecg_supervised',
            'project_name': 'ECG Supervised Training',
            'extra_params': {}
        },
        'ecg_finetune_masked': {
            'module': 'ecgcmr.signal.train_ecg_masked',
            'function': 'finetune_ecg_masked',
            'project_name': 'ECG Fine-Tune',
            'extra_params': {}
        },
        'multimodal_pretrain_contrastive': {
            'module': 'ecgcmr.multimodal.train_multimodal_pytorch' if with_eval else 'ecgcmr.multimodal.train_multimodal',
            'function': 'train_multimodal_contrastive_with_eval' if with_eval else 'train_multimodal_contrastive',
            'project_name': 'ML FINETUNING',
            'extra_params': {}
        }
    }

    details = task_to_details.get(task)
    if details is None:
        raise ValueError(f"Unknown training task specified: {task}")

    module = import_module(details['module'])
    function = getattr(module, details['function'])

    if task == 'multimodal_pretrain_contrastive':
        details['extra_params'].update({
            'image_checkpoint_path': cfg.training_mode.encoders.image.checkpoint_path,
            'ecg_checkpoint_path': cfg.training_mode.encoders.ecg.checkpoint_path
        })
    if 'finetune' in task:
        details['extra_params']['checkpoint_path'] = cfg.training_mode.checkpoint_path

    return {
        'function': function,
        'project_name': details['project_name'],
        'extra_params': details['extra_params']
    }

if __name__ == "__main__":
    main()