from omegaconf import DictConfig

from lightning import Trainer
from lightning.pytorch.loggers import WandbLogger

from ecgcmr.utils.misc import fix_seed
from ecgcmr.imaging.img_dataset.MaskedImagingLightning import MaskedImageDataModule, MaskedImageWithEvalDataModule
from ecgcmr.imaging.img_dataset.DownstreamImagingLightning import DownstreamImageDataModule

from ecgcmr.imaging.img_models.ImageViTMAE import ImageViTMAE
from ecgcmr.imaging.img_models.ImageViTMAEEval import ImageViTEval
from ecgcmr.imaging.img_models.ImageViTMAE_LinearProbe import ImageViTMAE_LinearProbe


def train_imaging_masked(
        cfg: DictConfig,
        wandb_logger: WandbLogger,
        save_dir: str,
        devices: int = 1):
    fix_seed(seed=cfg.seed)
    
    datamodule = MaskedImageDataModule(cfg=cfg)

    model = ImageViTMAE(cfg=cfg, save_dir=save_dir)
    wandb_logger.watch(model, log_graph=False)

    strategy = "ddp" if devices > 1 else "auto"

    trainer = Trainer(
        accelerator="gpu",
        devices=devices,
        precision="bf16-mixed",
        strategy=strategy,
        logger=wandb_logger,
        max_epochs=cfg.max_epochs,
        log_every_n_steps=cfg.log_every_n_steps,
        check_val_every_n_epoch=cfg.check_val_every_n_epoch,
        default_root_dir=save_dir,
        num_sanity_val_steps=0,
        profiler="simple")

    trainer.fit(model=model, datamodule=datamodule)


def train_imaging_masked_with_eval(
        cfg: DictConfig,
        wandb_logger: WandbLogger,
        save_dir: str,
        devices: int = 1
        ):
    
    """
    Doesn't work -> moving to usual PyTorch
    """
    fix_seed(seed=cfg.seed)
    
    datamodule = MaskedImageWithEvalDataModule(cfg=cfg)

    model = ImageViTMAE_LinearProbe(cfg=cfg, save_dir=save_dir)
    wandb_logger.watch(model, log_graph=False)

    strategy = "ddp" if devices > 1 else "auto"

    trainer = Trainer(
        accelerator="gpu",
        devices=devices,
        precision="bf16-mixed",
        strategy=strategy,
        logger=wandb_logger,
        max_epochs=cfg.max_epochs,
        log_every_n_steps=cfg.log_every_n_steps,
        check_val_every_n_epoch=cfg.check_val_every_n_epoch,
        default_root_dir=save_dir,
        num_sanity_val_steps=0,
        profiler="simple")

    trainer.fit(model=model, datamodule=datamodule)


def fine_tune_masked(cfg: DictConfig,
                     wandb_logger: WandbLogger,
                     save_dir: str = None,
                     devices: int = 1,
                     checkpoint_path = None):
    
    datamodule = DownstreamImageDataModule(cfg=cfg, mask_labels=False)

    test_model = ImageViTEval(cfg=cfg, save_dir=save_dir, pretrained_model_name_or_path=checkpoint_path)
    
    wandb_logger.watch(test_model, log_graph=False)

    trainer = Trainer(
        accelerator="gpu",
        devices=devices,
        precision="bf16-mixed",
        logger=wandb_logger,
        max_epochs=cfg.downstream_task.max_epochs,
        log_every_n_steps=cfg.log_every_n_steps,
        default_root_dir=save_dir,
        num_sanity_val_steps=0,
    )

    trainer.fit(model=test_model, datamodule=datamodule)