import lightning as L
from torch.utils.data import DataLoader
from omegaconf import DictConfig

from ecgcmr.multimodal.multimodal_dataset.MultiModalImagingECGDataset import MultiModalImagingECGDataset
from ecgcmr.multimodal.multimodal_utils.misc import get_collate_fn

from ecgcmr.signal.sig_datasets.DownstreamECGDataset import DownstreamECGDataset


class MultiModalDataModule(L.LightningDataModule):
    def __init__(
        self, cfg: DictConfig) -> None:
        super().__init__()

        self.cfg = cfg
        self.batch_size = cfg.dataset.batch_size
        self.num_workers = cfg.dataset.num_workers
        self.use_peaks_location = cfg.dataset.use_peaks_location

        if self.cfg.training_mode.loss.type == 'global':  ## if we compute only global loss, we don't need rpeaks
            self.use_peaks_location = False

    def setup(self, stage: str):
        if stage == 'fit':
            self.dataset_train = MultiModalImagingECGDataset(cfg=self.cfg, mode='train', use_peaks_location=self.use_peaks_location, apply_augmentations=True)
            self.dataset_val = MultiModalImagingECGDataset(cfg=self.cfg, mode='val', use_peaks_location=self.use_peaks_location, apply_augmentations=True)

    def train_dataloader(self) -> DataLoader:
        return DataLoader(
            self.dataset_train, 
            batch_size=self.batch_size,
            shuffle=True, 
            num_workers=self.num_workers,
            pin_memory=True,
            drop_last=True,
            collate_fn=get_collate_fn(use_peaks_location=self.use_peaks_location)
        )

    def val_dataloader(self) -> DataLoader:
        return DataLoader(
            self.dataset_val,
            batch_size=self.batch_size, 
            shuffle=False, 
            num_workers=self.num_workers,
            pin_memory=True,
            drop_last=True,
            collate_fn=get_collate_fn(use_peaks_location=self.use_peaks_location)
        )


class MultiModalWithEvalDataModule(L.LightningDataModule):
    def __init__(
        self, cfg: DictConfig) -> None:
        super().__init__()

        self.cfg = cfg

        self.main_batch_size = cfg.dataset.batch_size
        self.main_num_workers = cfg.dataset.num_workers

        self.downstream_batch_size = cfg.downstream_task.batch_size
        self.downstream_num_workers = cfg.downstream_task.num_workers

        self.use_peaks_location = cfg.dataset.use_peaks_location

        if self.cfg.training_mode.loss.type == 'global':  ## if we compute only global loss, we don't need rpeaks
            self.use_peaks_location = False

    def setup(self, stage: str):
        if stage == 'fit':
            self.dataset_train = MultiModalImagingECGDataset(cfg=self.cfg, mode='train', use_peaks_location=self.use_peaks_location, apply_augmentations=True)
            self.dataset_train_downstream = DownstreamECGDataset(cfg=self.cfg, mode='train', apply_augmentations=False)

            self.dataset_val = MultiModalImagingECGDataset(cfg=self.cfg, mode='val', use_peaks_location=self.use_peaks_location, apply_augmentations=True)
            self.dataset_val_downstream = DownstreamECGDataset(cfg=self.cfg, mode='val', apply_augmentations=False)

    def train_dataloader(self):
        main_dataloader = DataLoader(
            self.dataset_train, 
            batch_size=self.main_batch_size,
            shuffle=True, 
            num_workers=self.main_num_workers,
            pin_memory=True,
            drop_last=True,
            collate_fn=get_collate_fn(use_peaks_location=self.use_peaks_location)
        )

        downstream_dataloader = DataLoader(
            self.dataset_train_downstream, 
            batch_size=self.downstream_batch_size,
            shuffle=True, 
            num_workers=self.downstream_num_workers,
            pin_memory=True
        )

        return {'main': main_dataloader, 'downstream': downstream_dataloader}

    def val_dataloader(self):
        main_dataloader = DataLoader(
            self.dataset_val, 
            batch_size=self.main_batch_size,
            shuffle=False, 
            num_workers=self.main_num_workers,
            pin_memory=True,
            drop_last=True,
            collate_fn=get_collate_fn(use_peaks_location=self.use_peaks_location)
        )

        downstream_dataloader = DataLoader(
            self.dataset_val_downstream, 
            batch_size=self.downstream_batch_size,
            shuffle=False, 
            num_workers=self.downstream_num_workers,
            pin_memory=True
        )

        return {'main': main_dataloader, 'downstream': downstream_dataloader}
