import torch
import torchmetrics

from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf

from ecgcmr.imaging.img_dataset.MaskedImageWithEvalPyTorch import MaskedImageWithEvalDataModule
from ecgcmr.imaging.img_models.ImageViTMAEPyTorch_LinearProbe import ImageViTMAEPyTorch_LinearProbe
from ecgcmr.imaging.img_utils.imaging_callbacks import ImageLoggerMetricPyTorch

from ecgcmr.utils.misc import move_batch_to_device, log_metrics


def train_imaging_masked_with_eval(
    cfg: DictConfig,
    save_dir: str,
    devices: int = 1,
    wandb_logger = None,
):

    datamodule = MaskedImageWithEvalDataModule(cfg=cfg)
    datamodule.setup('fit')

    train_dataloaders = datamodule.train_dataloader()
    val_dataloaders = datamodule.val_dataloader()

    num_main_steps = len(train_dataloaders['main'])
    num_downstream_steps = len(train_dataloaders['downstream'])
    
    OmegaConf.set_struct(cfg, False)  # Temporarily disable struct mode
    cfg.start_downstream_epoch = 500
    OmegaConf.set_struct(cfg, True)  # Re-enable struct mode

    device = torch.device('cuda' if devices > 0 else 'cpu')

    model = ImageViTMAEPyTorch_LinearProbe(cfg=cfg, save_dir=save_dir)
    model = model.to(device)

    optimizers, schedulers = model.configure_optimizers(num_main_steps, num_downstream_steps)
    optimizer_main, optimizer_last = optimizers
    main_scheduler, _ = schedulers

    scaler = torch.cuda.amp.GradScaler()

    train_masked_loss_metric = torchmetrics.MeanMetric()
    train_downstream_loss_metric = torchmetrics.MeanMetric()
    val_masked_loss_metric = torchmetrics.MeanMetric()
    val_downstream_loss_metric = torchmetrics.MeanMetric()

    num_epochs = cfg.max_epochs

    best_val_loss = float('inf')
    best_mean_R2 = -float('inf')
    best_mean_MAE = float('inf')
    
    log_every_n_steps = cfg.log_every_n_steps
    log_images_every_n_epochs = cfg.log_images_every_n_epochs
    
    image_logger = ImageLoggerMetricPyTorch(cfg=cfg, image_encoder=model.image_model)
    
    for epoch in range(num_epochs):
        print(f"Epoch {epoch + 1}/{num_epochs}")

        ### ------------ Train Main -----------------
        model.prepare_for_main_training()
        train_masked_loss_metric.reset()
        
        with tqdm(total=len(train_dataloaders['main']), desc="Training (Main Task)", leave=False) as pbar_main:
            for batch_idx, batch in enumerate(train_dataloaders['main']):
                batch = move_batch_to_device(batch, device)

                optimizer_main.zero_grad()
                
                with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                    outputs = model.forward_masked(batch)
                    masked_loss = outputs.loss

                scaler.scale(masked_loss).backward()
                scaler.step(optimizer_main)
                scaler.update()

                masked_loss_value = masked_loss.detach().item()
                train_masked_loss_metric.update(masked_loss_value)
                
                if batch_idx % log_every_n_steps == 0:
                    log_metrics(wandb_logger, {"train/masked_loss_step": masked_loss_value})
                
                pbar_main.set_postfix({"masked_loss": masked_loss_value})
                pbar_main.update(1)
        
                if main_scheduler['interval'] == 'step':
                    main_scheduler['scheduler'].step()

                if batch_idx % log_every_n_steps == 0:
                    for param_group in optimizer_main.param_groups:
                        log_metrics(wandb_logger, {f"learning_rate/main_{param_group['name']}" : param_group['lr']})
        
        mean_masked_loss = train_masked_loss_metric.compute()
        log_metrics(wandb_logger, {"train/masked_loss_epoch": mean_masked_loss})
        
        ### ------------ Train Downstream -----------------
        if epoch >= cfg.start_downstream_epoch and epoch % model.train_every_n_epochs == 0:

            last_layer_optimizer = model.prepare_for_downstream_training(device=device, num_downstream_steps=num_downstream_steps)
            train_downstream_loss_metric.reset()

            if last_layer_optimizer is not None:
                optimizer_last = last_layer_optimizer
            
            with tqdm(total=len(train_dataloaders['downstream']), desc="Training (Downstream Task)", leave=False) as pbar_downstream:
                for batch_idx, batch in enumerate(train_dataloaders['downstream']):
                    batch = move_batch_to_device(batch, device)

                    optimizer_last.zero_grad()
                    
                    with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                        downstream_loss, features, preds, labels = model.forward_linear_probe(batch)
                    
                    scaler.scale(downstream_loss).backward()
                    scaler.step(optimizer_last)
                    scaler.update()

                    downstream_loss_value = downstream_loss.detach().item()
                    train_downstream_loss_metric.update(downstream_loss_value)

                    if batch_idx % log_every_n_steps == 0:
                        log_metrics(wandb_logger, {"train/downstream_loss_step": downstream_loss_value})

                    pbar_downstream.set_postfix({"downstream_loss": downstream_loss_value})
                    pbar_downstream.update(1)

                    model.regression.update(
                        feats=features.detach().cpu().numpy(),
                        labels=labels.detach().cpu().numpy(),
                        stage='train'
                    )
                    
                    if batch_idx % log_every_n_steps == 0:
                        for param_group in optimizer_last.param_groups:
                            log_metrics(wandb_logger, {f"learning_rate/last_layer_{param_group['name']}" : param_group['lr']})
            
            mean_downstream_loss = train_downstream_loss_metric.compute()
            log_metrics(wandb_logger, {"train/downstream_loss_epoch": mean_downstream_loss})
        
        ### ------------ Eval Main -----------------
        with torch.no_grad():

            val_masked_loss_metric.reset()
            model.prepare_for_main_validation()

            with tqdm(total=len(val_dataloaders['main']), desc="Validation (Main Task)", leave=False) as pbar_val_main:
                for batch_idx, batch in enumerate(val_dataloaders['main']):
                    batch = move_batch_to_device(batch, device)
                    
                    with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                        outputs = model.forward_masked(batch)
                        masked_loss = outputs.loss
                    
                    masked_loss_value = masked_loss.item()
                    val_masked_loss_metric.update(masked_loss_value)

                    if epoch >= cfg.start_downstream_epoch and epoch % log_images_every_n_epochs == 0 and batch_idx == 0:
                        image_logger.update(outputs, batch)
                        image_logger.log_plots()

                    if batch_idx % log_every_n_steps == 0:
                        log_metrics(wandb_logger, {"val/masked_loss_step": masked_loss_value})
                    
                    pbar_val_main.set_postfix({"val_masked_loss": masked_loss_value})
                    pbar_val_main.update(1)
            
            mean_val_masked_loss = val_masked_loss_metric.compute()
            log_metrics(wandb_logger, {"val/masked_loss_epoch": mean_val_masked_loss})

            if mean_val_masked_loss < best_val_loss:
                best_val_loss = mean_val_masked_loss
                model.save_checkpoint(name=f"best_val_loss")
                print(f"Saved best model with val loss {best_val_loss:.4f}")
                
            log_metrics(wandb_logger, {"best_val_loss": best_val_loss})

            ### ------------ Eval Downstream -----------------
            if epoch >= cfg.start_downstream_epoch and epoch % model.train_every_n_epochs == 0:     
                
                model.prepare_for_downstream_validation()

                val_downstream_loss_metric.reset()
                model.downstream_task_metrics.reset()

                with tqdm(total=len(val_dataloaders['downstream']), desc="Validation (Downstream Task)", leave=False) as pbar_val_downstream:
                    for batch_idx, batch in enumerate(val_dataloaders['downstream']):
                        batch = move_batch_to_device(batch, device)
                        
                        with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
                            downstream_loss, features, preds, labels = model.forward_linear_probe(batch)
                        
                        downstream_loss_value = downstream_loss.item()
                        val_downstream_loss_metric.update(downstream_loss_value)
                        model.downstream_task_metrics.update(preds=preds, labels=labels)
                        
                        if batch_idx % log_every_n_steps == 0:
                            log_metrics(wandb_logger, {"val/downstream_loss_step": downstream_loss_value})
                        
                        pbar_val_downstream.set_postfix({"val_downstream_loss": downstream_loss_value})
                        pbar_val_downstream.update(1)

                        model.regression.update(
                            feats=features.detach().cpu().numpy(),
                            labels=labels.detach().cpu().numpy(),
                            stage='val'
                        )

                mean_val_downstream_loss = val_downstream_loss_metric.compute()
                log_metrics(wandb_logger, {"val/downstream_loss_epoch": mean_val_downstream_loss})

                is_best = model.regression.compute()
                metrics, preds, labels = model.downstream_task_metrics.compute()

                mean_R2 = metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_R2'] + metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_R2']
                log_metrics(wandb_logger, {"downstream mean R2 val": mean_R2})

                print(f"Mean R2 RV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_R2']}")
                print(f"Mean R2 LV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_R2']}")
                print(f"downstream_mean_R2 val: {mean_R2}")

                if is_best:
                    model.save_checkpoint(name='best_mean_R2_sklearn')
                    print(f"Saved best model with Mean R2 from sklearn evaluation")
                
                if mean_R2 > best_mean_R2:
                    best_mean_R2 = mean_R2
                    model.save_checkpoint(name='best_mean_R2')
                    print(f"Saved best model with Mean R2 {best_mean_R2}")

                    best_metrics = {"best_mean_R2/best_mean_R2": best_mean_R2}
                    for name, value in metrics.items():
                        best_metrics[f"best_mean_R2/{name}"] = value
                    
                    log_metrics(wandb_logger, best_metrics)
                    model.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_mean_R2_plots')
                
                mean_MAE = metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_MAE'] + metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_MAE']
                log_metrics(wandb_logger, {"downstream mean MAE val": mean_MAE})

                print(f"Mean MAE RV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_RV_MAE']}")
                print(f"Mean MAE LV: {metrics[f'val_{model.training_scheme_setting}_{model.use_mlp_setting}/Mean_LV_MAE']}")
                
                if mean_MAE < best_mean_MAE:
                    best_mean_MAE = mean_MAE
                    model.save_checkpoint(name='best_mean_MAE')
                    print(f"Saved best model with Mean MAE {best_mean_MAE}")

                    best_metrics = {"best_mean_MAE/best_mean_MAE": best_mean_MAE}
                    for name, value in metrics.items():
                        best_metrics[f"best_mean_MAE/{name}"] = value
                    
                    log_metrics(wandb_logger, best_metrics)
                    model.plotter_val.plot_results(preds=preds, labels=labels, prefix='best_mean_MAE_plots')

    if wandb_logger is not None:
        wandb_logger.finish()
