import lightning as L
from torch.utils.data import DataLoader
from omegaconf import DictConfig

from ecgcmr.imaging.img_dataset.MaskedImagingDataset import MaskedImageDataset
from ecgcmr.imaging.img_dataset.DownstreamImageDataset import DownstreamImageDataset


class MaskedImageDataModule(L.LightningDataModule):
    def __init__(
        self,
        cfg: DictConfig
        ) -> None:
        super().__init__()

        self.cfg = cfg
        self.batch_size = cfg.dataset.batch_size
        self.num_workers = cfg.dataset.num_workers

    def setup(self, stage: str):
        if stage == 'fit':
            self.dataset_train = MaskedImageDataset(cfg=self.cfg, mode='train', apply_augmentations=True)
            self.dataset_val = MaskedImageDataset(cfg=self.cfg, mode='val', apply_augmentations=True)

    def train_dataloader(self):
        train_loader = DataLoader(
            self.dataset_train, 
            batch_size=self.batch_size, 
            shuffle=True,
            num_workers=self.num_workers,
            pin_memory=True
        )
        return train_loader

    def val_dataloader(self):
        val_dataloader = DataLoader(
            self.dataset_val,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=self.num_workers,
            pin_memory=True
        )
        return val_dataloader


class MaskedImageWithEvalDataModule(L.LightningDataModule):
    def __init__(
        self, cfg: DictConfig
        ) -> None:
        super().__init__()

        self.cfg = cfg
        self.main_batch_size = cfg.dataset.batch_size
        self.main_num_workers = cfg.dataset.num_workers

        self.downstream_batch_size = cfg.downstream_task.batch_size
        self.downstream_num_workers = cfg.downstream_task.num_workers

    def setup(self, stage: str):
        if stage == 'fit':
            self.dataset_train = MaskedImageDataset(cfg=self.cfg, mode='train', apply_augmentations=True)
            self.dataset_val = MaskedImageDataset(cfg=self.cfg, mode='val', apply_augmentations=True)

            self.dataset_train_downstream = DownstreamImageDataset(cfg=self.cfg, mode='train', mask_labels=False, apply_augmentations=False)
            self.dataset_val_downstream = DownstreamImageDataset(cfg=self.cfg, mode='val', mask_labels=False, apply_augmentations=False)

    def train_dataloader(self):
        main_dataloader = DataLoader(
            self.dataset_train, 
            batch_size=self.main_batch_size,
            shuffle=True, 
            num_workers=self.main_num_workers,
            pin_memory=True,
        )

        downstream_dataloader = DataLoader(
            self.dataset_train_downstream, 
            batch_size=self.downstream_batch_size,
            shuffle=True, 
            num_workers=self.downstream_num_workers,
            pin_memory=True
        )

        return {'main': main_dataloader, 'downstream': downstream_dataloader}

    def val_dataloader(self):
        main_dataloader = DataLoader(
            self.dataset_val, 
            batch_size=self.main_batch_size,
            shuffle=False, 
            num_workers=self.main_num_workers,
            pin_memory=True,
        )

        downstream_dataloader = DataLoader(
            self.dataset_val_downstream, 
            batch_size=self.downstream_batch_size,
            shuffle=False, 
            num_workers=self.downstream_num_workers,
            pin_memory=True
        )

        return {'main': main_dataloader, 'downstream': downstream_dataloader}
