import torch
import torchmetrics

from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf

from ecgcmr.multimodal.multimodal_dataset.MultiModalWithEvalPyTorch import MultiModalWithEvalDataModule
from ecgcmr.multimodal.multimodal_models.MultiModalCLIPPyTorch_LinearProbe import MultiModalCLIPWithEval

from ecgcmr.utils.misc import move_batch_to_device, log_metrics
from ecgcmr.utils.misc import plot_epoch_heatmap, plot_similarity_vs_time_difference


def log_gradient_norms(model, wandb_logger):
    gradient_norms = {}
    for name, param in model.named_parameters():
        if param.grad is not None and param.requires_grad:
            grad_norm = param.grad.norm(2).item()
            gradient_norms[f'grad_norm/{name}'] = grad_norm
            if grad_norm < 1e-6:
                print(f'Gradient norm very small for {name}: {grad_norm}')
    if wandb_logger is not None:
        log_metrics(wandb_logger, gradient_norms)


def train_multimodal_contrastive_with_eval(
    cfg: DictConfig,
    save_dir: str,
    devices: int = 1,
    wandb_logger = None,
    image_checkpoint_path: str = None,
    ecg_checkpoint_path: str = None
):

    datamodule = MultiModalWithEvalDataModule(cfg=cfg)
    datamodule.setup('fit')

    train_dataloaders = datamodule.train_dataloader()
    val_dataloaders = datamodule.val_dataloader()

    num_main_steps = len(train_dataloaders['main'])
    num_downstream_steps = len(train_dataloaders['downstream'])

    OmegaConf.set_struct(cfg, False)  # Temporarily disable struct mode
    cfg.start_downstream_epoch = 5
    OmegaConf.set_struct(cfg, True)  # Re-enable struct mode

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = MultiModalCLIPWithEval(cfg=cfg, save_dir=save_dir,
                                   pretrained_model_name_or_path_ecg=ecg_checkpoint_path,
                                   pretrained_model_name_or_path_image=image_checkpoint_path)
    model = model.to(device)

    optimizers, schedulers = model.configure_optimizers(num_main_steps, num_downstream_steps)
    optimizer_main, optimizer_last = optimizers
    main_scheduler, _ = schedulers

    scaler = torch.cuda.amp.GradScaler()

    train_contrastive_total_loss_metric = torchmetrics.MeanMetric()
    val_contrastive_total_loss_metric = torchmetrics.MeanMetric()

    train_downstream_loss_metric = torchmetrics.MeanMetric()
    val_downstream_loss_metric = torchmetrics.MeanMetric()  

    train_contrastive_global_loss_metric = torchmetrics.MeanMetric()
    train_contrastive_local_loss_metric = torchmetrics.MeanMetric()
    val_contrastive_global_loss_metric = torchmetrics.MeanMetric()
    val_contrastive_local_loss_metric = torchmetrics.MeanMetric()

    num_epochs = cfg.max_epochs
    best_val_loss = float('inf')

    best_mean_R2 = -float('inf')
    best_mean_MAE = float('inf')
    
    log_every_n_steps = cfg.log_every_n_steps
    
    for epoch in range(num_epochs):
        print(f"Epoch {epoch + 1}/{num_epochs}")

        train_contrastive_total_loss_metric.reset()
        train_contrastive_global_loss_metric.reset()
        train_contrastive_local_loss_metric.reset()

        encountered_global_loss = False
        encountered_local_loss = False
        
        ### ------------ Train Main -----------------
        model.prepare_for_main_training()

        avg_sample_similarities_accumulator = []
        avg_time_similarities_accumulator_for_projected = []

        with tqdm(total=len(train_dataloaders['main']), desc="Training (Main Task)", leave=False) as pbar_main:
            for batch_idx, batch in enumerate(train_dataloaders['main']):
                ecg_aug, image_aug = batch.get('ecg_aug'), batch.get('image_aug')
                rpeaks = batch.get("rpeaks", None)
                tpfs = batch.get("tpfs", None)

                ecg_aug = ecg_aug.to(device, non_blocking=True)
                image_aug = image_aug.to(device, non_blocking=True)

                if rpeaks is not None:
                    rpeaks = rpeaks.to(device, non_blocking=True)
                if tpfs is not None:
                    tpfs = tpfs.to(device, non_blocking=True)

                optimizer_main.zero_grad()
                
                with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                    losses_dict, similarities_dict = model.forward_contrastive(ecg_aug=ecg_aug, image_aug=image_aug,
                                                                               rpeaks=rpeaks, tpfs=tpfs)

                total_loss = losses_dict["total_loss"]
                global_loss = losses_dict["global_loss"]
                local_loss = losses_dict["local_loss"]

                scaler.scale(total_loss).backward()

                # Unscale gradients before inspecting
                scaler.unscale_(optimizer_main)

                if batch_idx % log_every_n_steps == 0:
                    log_gradient_norms(model, wandb_logger)

                scaler.step(optimizer_main)
                scaler.update()

                total_loss_value = total_loss.detach().item()
                train_contrastive_total_loss_metric.update(total_loss_value)

                if global_loss is not None:
                    encountered_global_loss = True
                    train_contrastive_global_loss_metric.update(global_loss.detach().item())
                if local_loss is not None:
                    encountered_local_loss = True
                    train_contrastive_local_loss_metric.update(local_loss.detach().item())
                
                pbar_main.set_postfix({"contrastive_loss": total_loss_value})
                pbar_main.update(1)

                avg_sample_similarities_accumulator.append(similarities_dict["batch_sample_similarity"])

                if "batch_time_similarity_for_projected" in similarities_dict:
                    avg_time_similarities_accumulator_for_projected.append(similarities_dict["batch_time_similarity_for_projected"])
        
                if main_scheduler['interval'] == 'step':
                    main_scheduler['scheduler'].step()

                if batch_idx % log_every_n_steps == 0:
                    log_metrics(wandb_logger, {"train/contrastive_loss_step": total_loss_value})

                    if global_loss is not None:
                        log_metrics(wandb_logger, {"train/contrastive_global_loss_step": global_loss.detach().item()})  
                    if local_loss is not None:
                        log_metrics(wandb_logger, {"train/contrastive_local_loss_step": local_loss.detach().item()})

                    for param_group in optimizer_main.param_groups:
                        log_metrics(wandb_logger, {f"learning_rate/main_{param_group['name']}" : param_group['lr']})
        
        mean_total_contrastive_loss = train_contrastive_total_loss_metric.compute()
        log_metrics(wandb_logger, {"train/contrastive_total_loss_epoch": mean_total_contrastive_loss})

        if encountered_global_loss:
            mean_global_contrastive_loss = train_contrastive_global_loss_metric.compute()
            log_metrics(wandb_logger, {"train/contrastive_global_loss_epoch": mean_global_contrastive_loss})

        if encountered_local_loss:
            mean_local_contrastive_loss = train_contrastive_local_loss_metric.compute()
            log_metrics(wandb_logger, {"train/contrastive_local_loss_epoch": mean_local_contrastive_loss})
        
        avg_sample_similarities_epoch = torch.stack(avg_sample_similarities_accumulator).mean(dim=0)
        
        if len(avg_time_similarities_accumulator_for_projected) > 0:
            avg_time_similarities_accumulator_for_projected_epoch = torch.stack(avg_time_similarities_accumulator_for_projected).mean(dim=0)
            plot_epoch_heatmap(avg_time_similarities_accumulator_for_projected_epoch, stage='train', heatmap_type='time_step_proj_tokens')
            plot_similarity_vs_time_difference(avg_time_similarities_accumulator_for_projected_epoch, stage='train', type='proj')
            avg_time_similarities_accumulator_for_projected.clear()

        # Plot the heatmaps once per epoch
        plot_epoch_heatmap(avg_sample_similarities_epoch, stage='train', heatmap_type='sample')
        avg_sample_similarities_accumulator.clear()

        ### ------------ Train Downstream -----------------
        if epoch >= cfg.start_downstream_epoch and epoch % model.train_every_n_epochs == 0:
        
            last_layer_optimizer = model.prepare_for_downstream_training(device=device, num_downstream_steps=num_downstream_steps)
            
            train_downstream_loss_metric.reset()

            if last_layer_optimizer is not None:
                optimizer_last = last_layer_optimizer

            with tqdm(total=len(train_dataloaders['downstream']), desc="Training (Downstream Task)", leave=False) as pbar_downstream:
                for batch_idx, batch in enumerate(train_dataloaders['downstream']):
                    batch = move_batch_to_device(batch, device)
                    
                    optimizer_last.zero_grad()
                    
                    with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                        downstream_dict = model.forward_linear_probe(batch)
                    
                    downstream_loss = downstream_dict["downstream_loss"]
                    features = downstream_dict["features"]
                    preds = downstream_dict["logits"]
                    labels = downstream_dict["labels"]
                    
                    scaler.scale(downstream_loss).backward()
                    scaler.step(optimizer_last)
                    scaler.update()

                    downstream_loss_value = downstream_loss.detach().item()
                    train_downstream_loss_metric.update(downstream_loss_value)

                    if batch_idx % log_every_n_steps == 0:
                        log_metrics(wandb_logger, {"train/downstream_loss_step": downstream_loss_value})

                    pbar_downstream.set_postfix({"downstream_loss": downstream_loss_value})
                    pbar_downstream.update(1)

                    model.regression.update(
                        feats=features.detach().cpu().numpy(),
                        labels=labels.detach().cpu().numpy(),
                        stage='train'
                    )
                    
                    if batch_idx % log_every_n_steps == 0:
                        for param_group in optimizer_last.param_groups:
                            log_metrics(wandb_logger, {f"learning_rate/last_layer_{param_group['name']}" : param_group['lr']})
            
            mean_downstream_loss = train_downstream_loss_metric.compute()
            log_metrics(wandb_logger, {"train/downstream_loss_epoch": mean_downstream_loss})

        ### ------------ Eval Main -----------------
        avg_sample_similarities_accumulator = []
        avg_time_similarities_accumulator_for_projected = []

        with torch.no_grad():
            
            model.prepare_for_main_validation()
            val_contrastive_total_loss_metric.reset()
            val_contrastive_global_loss_metric.reset()
            val_contrastive_local_loss_metric.reset()
            
            encountered_val_global_loss = False
            encountered_val_local_loss = False
            
            with tqdm(total=len(val_dataloaders['main']), desc="Validation (Main Task)", leave=False) as pbar_val_main:
                for batch_idx, batch in enumerate(val_dataloaders['main']):
                    ecg_aug, image_aug = batch.get('ecg_aug'), batch.get('image_aug')
                    rpeaks = batch.get("rpeaks", None)
                    tpfs = batch.get("tpfs", None)

                    ecg_aug = ecg_aug.to(device, non_blocking=True)
                    image_aug = image_aug.to(device, non_blocking=True)

                    if rpeaks is not None:
                        rpeaks = rpeaks.to(device, non_blocking=True)
                    if tpfs is not None:
                        tpfs = tpfs.to(device, non_blocking=True)

                    with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                        losses_dict, similarities_dict = model.forward_contrastive(ecg_aug=ecg_aug, image_aug=image_aug,
                                                                                   rpeaks=rpeaks, tpfs=tpfs)
                        
                    total_loss = losses_dict["total_loss"]
                    global_loss = losses_dict["global_loss"]
                    local_loss = losses_dict["local_loss"]

                    total_loss_value = total_loss.item()
                    val_contrastive_total_loss_metric.update(total_loss_value)

                    if global_loss is not None:
                        encountered_val_global_loss = True
                        val_contrastive_global_loss_metric.update(global_loss.item())
                    if local_loss is not None:
                        encountered_val_local_loss = True
                        val_contrastive_local_loss_metric.update(local_loss.item())

                    if batch_idx % log_every_n_steps == 0:
                        log_metrics(wandb_logger, {"val/contrastive_loss_step": total_loss_value})
                        if global_loss is not None:
                            log_metrics(wandb_logger, {"val/contrastive_global_loss_step": global_loss.item()})  
                        if local_loss is not None:
                            log_metrics(wandb_logger, {"val/contrastive_local_loss_step": local_loss.item()})
                    
                    pbar_val_main.set_postfix({"val_masked_loss": total_loss_value})
                    pbar_val_main.update(1)

                    avg_sample_similarities_accumulator.append(similarities_dict["batch_sample_similarity"])

                    if "batch_time_similarity_for_projected" in similarities_dict:
                        avg_time_similarities_accumulator_for_projected.append(similarities_dict["batch_time_similarity_for_projected"])
            
            mean_val_total_contrastive_loss = val_contrastive_total_loss_metric.compute()
            log_metrics(wandb_logger, {"val/contrastive_total_loss_epoch": mean_val_total_contrastive_loss})

            if encountered_val_global_loss:
                mean_val_global_contrastive_loss = val_contrastive_global_loss_metric.compute()
                log_metrics(wandb_logger, {"val/contrastive_global_loss_epoch": mean_val_global_contrastive_loss})
            if encountered_val_local_loss:
                mean_val_local_contrastive_loss = val_contrastive_local_loss_metric.compute()
                log_metrics(wandb_logger, {"val/contrastive_local_loss_epoch": mean_val_local_contrastive_loss})

            if mean_val_total_contrastive_loss < best_val_loss:
                best_val_loss = mean_val_total_contrastive_loss
                model.save_checkpoint(name=f'best_val_loss', main_optimizer=optimizer_main, main_scheduler=main_scheduler, optimizer_last=optimizer_last, epoch=epoch)
                print(f"Saved best model with val loss {best_val_loss:.4f}")

            log_metrics(wandb_logger, {"best_val_loss": best_val_loss})

            avg_sample_similarities_epoch = torch.stack(avg_sample_similarities_accumulator).mean(dim=0)
            
            if len(avg_time_similarities_accumulator_for_projected) > 0:
                avg_time_similarities_accumulator_for_projected_epoch = torch.stack(avg_time_similarities_accumulator_for_projected).mean(dim=0)
                plot_epoch_heatmap(avg_time_similarities_accumulator_for_projected_epoch, stage='val', heatmap_type='time_step_proj_tokens')
                plot_similarity_vs_time_difference(avg_time_similarities_accumulator_for_projected_epoch, stage='val', type='proj')
                avg_time_similarities_accumulator_for_projected.clear()

            # Plot the heatmaps once per epoch
            plot_epoch_heatmap(avg_sample_similarities_epoch, stage='val', heatmap_type='sample')
            avg_sample_similarities_accumulator.clear()
            
            ### ------------ Eval Downstream -----------------
            if epoch >= cfg.start_downstream_epoch and epoch % model.train_every_n_epochs == 0:
                
                model.prepare_for_downstream_validation()

                val_downstream_loss_metric.reset()
                model.downstream_task_metrics.reset()

                with tqdm(total=len(val_dataloaders['downstream']), desc="Validation (Downstream Task)", leave=False) as pbar_val_downstream:
                    for batch_idx, batch in enumerate(val_dataloaders['downstream']):
                        batch = move_batch_to_device(batch, device)
                        
                        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                            downstream_dict = model.forward_linear_probe(batch)
                        
                        downstream_loss = downstream_dict["downstream_loss"]
                        features = downstream_dict["features"]
                        preds = downstream_dict["logits"]
                        labels = downstream_dict["labels"]

                        downstream_loss_value = downstream_loss.item()
                        val_downstream_loss_metric.update(downstream_loss_value)
                        model.downstream_task_metrics.update(preds=preds, labels=labels)
                        
                        if batch_idx % log_every_n_steps == 0:
                            log_metrics(wandb_logger, {"val/downstream_loss_step": downstream_loss_value})
                        
                        pbar_val_downstream.set_postfix({"val_downstream_loss": downstream_loss_value})
                        pbar_val_downstream.update(1)

                        model.regression.update(
                            feats=features.detach().cpu().numpy(),
                            labels=labels.detach().cpu().numpy(),
                            stage='val'
                        )

                mean_val_downstream_loss = val_downstream_loss_metric.compute()
                log_metrics(wandb_logger, {"val/downstream_loss_epoch": mean_val_downstream_loss})

                is_best = model.regression.compute()
                metrics, preds, labels = model.downstream_task_metrics.compute()

                mean_R2 = metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_R2'] + metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_R2']
                log_metrics(wandb_logger, {"downstream mean R2 val": mean_R2})

                print(f"Mean R2 RV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_R2']}")
                print(f"Mean R2 LV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_R2']}")
                print(f"downstream_mean_R2 val: {mean_R2}")

                if is_best:
                    model.save_checkpoint(name='best_mean_R2_sklearn', main_optimizer=optimizer_main, main_scheduler=main_scheduler, optimizer_last=optimizer_last, epoch=epoch)
                    print(f"Saved best model with Mean R2 from sklearn evaluation")
                
                if mean_R2 > best_mean_R2:
                    best_mean_R2 = mean_R2
                    model.save_checkpoint(name='best_mean_R2_linear_probing', main_optimizer=optimizer_main, main_scheduler=main_scheduler, optimizer_last=optimizer_last, epoch=epoch)
                    print(f"Saved best model with Mean R2 {best_mean_R2}")

                    best_metrics = {"best_mean_R2/best_mean_R2": best_mean_R2}
                    for name, value in metrics.items():
                        best_metrics[f"best_mean_R2/{name}"] = value
                    
                    log_metrics(wandb_logger, best_metrics)
                    model.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_mean_R2_plots')
                
                mean_MAE = metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_MAE'] + metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_MAE']
                log_metrics(wandb_logger, {"downstream mean MAE val": mean_MAE})

                print(f"Mean MAE RV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_MAE']}")
                print(f"Mean MAE LV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_MAE']}")
                
                if mean_MAE < best_mean_MAE:
                    best_mean_MAE = mean_MAE
                    model.save_checkpoint(name='best_mean_MAE', main_optimizer=optimizer_main, main_scheduler=main_scheduler, optimizer_last=optimizer_last, epoch=epoch)
                    print(f"Saved best model with Mean MAE {best_mean_MAE}")

                    best_metrics = {"best_mean_MAE/best_mean_MAE": best_mean_MAE}
                    for name, value in metrics.items():
                        best_metrics[f"best_mean_MAE/{name}"] = value
                    
                    log_metrics(wandb_logger, best_metrics)
                    model.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_mean_MAE_plots')

    if wandb_logger is not None:
        wandb_logger.finish()
