from typing import Tuple
from omegaconf import DictConfig

import torch
import numpy as np

from ecgcmr.imaging.img_dataset.ImageDataset import BaseImageDataset
from ecgcmr.imaging.img_augmentations.ImageAugmentations import ImageAugmentations

from ecgcmr.utils.misc import filter_ed_labels


class DownstreamImageDataset(BaseImageDataset):
    """
    Dataset for downstream task for Imaging
    """
    def __init__(
        self,
        cfg: DictConfig,
        mode: str,
        mask_labels: bool = False,
        supervised: bool = False,
        apply_augmentations: bool = False,
    ) -> None:
        """
        mask_labels - if set to True, will take only ED frame labels.
        """
        super().__init__(cfg=cfg.dataset.paths, mode=mode)

        task = cfg.downstream_task.task
        metrics_type = cfg.downstream_task.type
        indices_path = cfg.downstream_task.paths[f'downstream_task_{mode}_subindices']
        labels_path = cfg.downstream_task.paths[f'downstream_task_{mode}_labels_{metrics_type}']

        if metrics_type == 'vol':
            num_classes = cfg.downstream_task.num_classes_vol
        elif metrics_type == 'area':
            num_classes = cfg.downstream_task.num_classes_area

        if task == 'regression':
            if mask_labels:
                self.ids_to_take = filter_ed_labels(cfg.downstream_task.target)
            else:
                self.ids_to_take = [i for i in range(num_classes)]

        self.indices = np.load(indices_path, mmap_mode='r')
        self.labels = np.load(labels_path, mmap_mode='r')[:, self.ids_to_take]

        if supervised:
            self.aug_cfg = cfg.augmentations.imaging
            img_size = cfg.dataset.img_size
        else:
            self.aug_cfg = cfg.downstream_task.augmentations.imaging
            img_size = cfg.downstream_task.img_size

        self.image_augmentations = ImageAugmentations(
            cfg=self.aug_cfg,
            img_size=img_size,
            apply_augmentations=apply_augmentations,
        )

    def __len__(self):
        return len(self.indices)

    def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
        if self.aug_cfg.time_sample.enable:
            image, es_time_step = super().__getitem__(self.indices[index])
            image_data = {"image": image, "es_time_step": es_time_step}
        else:
            image_data = super().__getitem__(self.indices[index])[0]

        return self.image_augmentations(image_data), torch.from_numpy(self.labels[index]).float()
