import os

from omegaconf import DictConfig

from lightning import Trainer
from lightning.pytorch.loggers import WandbLogger

from ecgcmr.multimodal.multimodal_dataset.MultiModalLightning import MultiModalDataModule, MultiModalWithEvalDataModule
from ecgcmr.multimodal.multimodal_models.ImageECGClip import ImageECGClip
from ecgcmr.multimodal.multimodal_models.ImageECGClip_LinearProbe import ImageECGClip_LinearProbe


def train_multimodal_contrastive(cfg: DictConfig,
                                 wandb_logger: WandbLogger,
                                 save_dir: str,
                                 image_checkpoint_path: str,
                                 ecg_checkpoint_path: str,
                                 devices: int = 1):
    datamodule = MultiModalDataModule(cfg=cfg)
    
    model = ImageECGClip(cfg=cfg, save_dir=save_dir,
                         pretrained_model_name_or_path_image=image_checkpoint_path,
                         pretrained_model_name_or_path_ecg=ecg_checkpoint_path)
    
    wandb_logger.watch(model, log_graph=True)

    strategy = "ddp" if devices > 1 else "auto"
    
    trainer = Trainer(
        accelerator="gpu",
        devices=devices,
        strategy=strategy,
        precision="bf16-mixed",
        logger=wandb_logger,
        max_epochs=cfg.max_epochs,
        log_every_n_steps=cfg.log_every_n_steps,
        check_val_every_n_epoch=cfg.check_val_every_n_epoch,
        default_root_dir=save_dir,
        num_sanity_val_steps=0,
        profiler="simple",
        )
    
    trainer.fit(model=model, datamodule=datamodule)


def train_multimodal_contrastive_with_eval(cfg: DictConfig,
                                           wandb_logger: WandbLogger,
                                           save_dir: str,
                                           image_checkpoint_path: str,
                                           ecg_checkpoint_path: str,
                                           devices: int = 1):
    datamodule = MultiModalWithEvalDataModule(cfg=cfg)
    
    directories = ["best_val_loss_hf_model", "hf_model", "hf_ecg_model", "hf_mri_model"]

    pretrained_model_name_or_path_image = next(
        (os.path.join(image_checkpoint_path, d) for d in directories if os.path.exists(os.path.join(image_checkpoint_path, d))), 
        None
    )

    pretrained_model_name_or_path_ecg = next(
        (os.path.join(ecg_checkpoint_path, d) for d in directories if os.path.exists(os.path.join(ecg_checkpoint_path, d))), 
        None
    )
    
    model = ImageECGClip_LinearProbe(cfg=cfg, save_dir=save_dir,
                                     pretrained_model_name_or_path_image=pretrained_model_name_or_path_image,
                                     pretrained_model_name_or_path_ecg=pretrained_model_name_or_path_ecg)
    wandb_logger.watch(model, log_graph=False)

    strategy = "ddp" if devices > 1 else "auto"
    
    trainer = Trainer(
        accelerator="gpu",
        devices=devices,
        strategy=strategy,
        precision="bf16-mixed",
        logger=wandb_logger,
        max_epochs=cfg.max_epochs,
        log_every_n_steps=cfg.log_every_n_steps,
        check_val_every_n_epoch=cfg.check_val_every_n_epoch,
        default_root_dir=save_dir,
        num_sanity_val_steps=0,
        profiler="simple",
        )
    
    trainer.fit(model=model, datamodule=datamodule)
