from abc import abstractmethod

import pytorch_lightning as pl
import torch

from torchmetrics.image import StructuralSimilarityIndexMeasure, PeakSignalNoiseRatio
from torchmetrics.segmentation import DiceScore, MeanIoU
from torchmetrics.classification import Accuracy, F1Score, AUROC, MatthewsCorrCoef

from einops import rearrange

import loss
from models.multimae3d_utils import (
    unshuffle_patches,
    unpatchify,
    patchify,
    shuffle_patches,
)
from data.transforms import MultiLabelsToBraTSClasses

from util.setup_functions import get_adapter_opts


class TaskModule(pl.LightningModule):
    def __init__(self, cfg, task, loss_cfg, model):
        super().__init__()
        self.cfg = cfg
        self.task = task
        self.loss_cfg = loss_cfg[task]
        self.model = model
        self.has_visualization = True
        self.log_metrics_at_epoch_end = False
        self.test_stage = cfg.testing.get("test_stage", False)
        self.test_results = []

        self._get_loss()
        self._get_metrics()

    @abstractmethod
    def _get_loss(self):
        pass

    @abstractmethod
    def _get_metrics(self):
        pass

    @abstractmethod
    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        pass

    @abstractmethod
    def comp_metrics(
        self, outputs: dict, batch: dict, reconstruction_type: bool
    ) -> dict:
        pass

    @abstractmethod
    def visualize(self, batch, input, output, random_idx, axs):
        pass


class MultiMAEAutoencoderModule(TaskModule):
    def __init__(self, cfg, task, loss_cfg, model, **kwargs):
        super().__init__(cfg, task, loss_cfg, model)

    def _get_loss(self):
        # self.loss = [getattr(loss, name)(**opt) for name, opt in self.loss_cfg.items()][
        #     0
        # ]
        if self.loss_cfg == 'mse':
            self.loss = torch.nn.MSELoss()
        self.only_masked_loss = self.cfg.model.get("only_masked_loss", False)

    def _get_metrics(self):
        self.metrics = torch.nn.ModuleDict(
            {
                "ssim": StructuralSimilarityIndexMeasure(data_range=(-1.0, 1.0)),
                "psnr": PeakSignalNoiseRatio(data_range=(-1.0, 1.0)),
                # "mse": MeanSquaredError(),
            }
        )

    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        task = self.task

        selected_patches = outputs["selected_patches"]
        masked_patches = outputs["masked_patches"]
        perm_indices = outputs["perm_indices"]
        reconstructed_patches = outputs["reconstructed_patches"]
        reconstructed = reconstructed_patches[task]

        # check if patches are already reconstructed to the original image size
        if reconstructed.shape[-3:] == self.model.img_size:
            target = batch[task]
            return self.loss(reconstructed, target)

        if self.task in selected_patches.keys():
            # if task is in input_task, we can compute the loss w.r.t. selected_patches
            if self.only_masked_loss:
                num_selected = selected_patches[task].shape[1]
                reconstructed = reconstructed[:, num_selected:, ...]
                target = masked_patches[task]
            else:
                target = torch.cat(
                    [selected_patches[task], masked_patches[task]], dim=1
                )

        else:
            if reconstructed.shape[1] == self.model.num_patches:
                reconstructed = unpatchify(
                    reconstructed, self.model.img_size, self.model.patch_size
                )
                target = batch[task]
            else:
                reconstructed = rearrange(reconstructed, "b l c h w d -> (b l) c h w d")
                target = patchify(batch[task], self.model.patch_size)
                target = shuffle_patches(
                    target,
                    patch_size=self.model.patch_size,
                    permutations=perm_indices[task],
                )
                raise NotImplementedError(
                    "Patch-wise dice computation is not implemented yet"
                )

        return self.loss(reconstructed, target)

    def comp_metrics( self, outputs, batch, reconstruction_type: str = "reconstructed_patches"):
        metric_results = {}
        pred = outputs[reconstruction_type][self.task]
        target = batch[self.task]
        for name, metric in self.metrics.items():
            if target.device != metric.device:
                metric = metric.to(target.device)

            if self.test_stage:
                for i in range(target.shape[1]):
                    metric_result = metric(pred[:, i], target[:, i])
                    metric_results[f"{name}-{i}"] = metric_result
            else:
                metric_result = metric(pred, target)
                metric_results[name] = metric_result

        if self.test_stage:
            for sample_idx in range(pred.shape[0]):
                sample_values = {}
                sample_values["dataset"] = batch["dataset"][sample_idx]
                sample_values["patient_id"] = batch["patient_id"][sample_idx]
                for name, metric in self.metrics.items():
                    if target.device != metric.device:
                        metric = metric.to(target.device)
                    for channel_idx in range(target.shape[1]):
                        metric_result = (
                            metric(
                                pred[sample_idx][None, channel_idx],
                                target[sample_idx][None, channel_idx],
                            )
                            .detach()
                            .cpu()
                        )
                        sample_values[f"{name}-{channel_idx}"] = metric_result.item()
                self.test_results.append(sample_values)

        return metric_results

    def visualize(self, batch, input, output, random_idx, axs):
        batch_ax, input_ax, pred_ax = axs[0], axs[1], axs[2]
        n_slices = batch[self.task].shape[-1]
        slice = n_slices // 2
        random_idx = random_idx % batch[self.task].shape[0]

        batch_ax.imshow(
            batch[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        batch_ax.set_title(f"ground truth - {self.task}")

        input_ax.imshow(
            input[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        input_ax.set_title(f"input - {self.task}")

        pred_ax.imshow(
            output[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        pred_ax.set_title(f"pred - {self.task}")

        return axs


class MultiMAESegmentationModule(TaskModule):
    def __init__(self, cfg, task, loss_cfg, model, multilabel: bool = False, **kwargs):
        self.multilabel = multilabel
        adapter_opts = get_adapter_opts(cfg, task, "output")
        if "out_channels" in adapter_opts:
            self.num_seg_classes = adapter_opts["out_channels"]
        else:
            self.num_seg_classes = cfg.model.model_params.num_seg_classes
        self.label_remapper = MultiLabelsToBraTSClasses() if multilabel else None

        super().__init__(cfg, task, loss_cfg, model)

    def _get_metrics(self):
        average = "none" if self.test_stage else "macro"
        self.metrics = torch.nn.ModuleDict(
            {
                "dice": DiceScore(
                    num_classes=self.num_seg_classes,
                    average=average,
                    input_format="one-hot" if self.multilabel else "index",
                ),
                "mIoU": MeanIoU(
                    num_classes=self.num_seg_classes,
                    input_format="one-hot" if self.multilabel else "index",
                ),
            }
        )

    def _get_loss(self):
        self.loss = [getattr(loss, name)(**opt) for name, opt in self.loss_cfg.items()][
            0
        ]
        self.only_masked_loss = self.cfg.model.get("only_masked_loss", False)

    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        task = self.task

        selected_patches = outputs["selected_patches"]
        # if we apply sliding window inference, there are no masked_patches
        masked_patches = (
            outputs["masked_patches"] if "masked_patches" in outputs else None
        )
        perm_indices = outputs["perm_indices"] if "perm_indices" in outputs else None
        reconstructed_patches = (
            outputs["reconstructed_image"]
            if "reconstructed_image" in outputs
            else outputs["reconstructed_patches"]
        )
        reconstructed = reconstructed_patches[task]

        # if task in selected_patches, its an autoencoder application of segmentaiton
        # else it is a prediction task

        # check if patches are already reconstructed to the original image size
        if reconstructed.shape[-3:] == batch[task].shape[-3:]:
            target = batch[task]
            return self.loss(reconstructed, target)

        if self.task in selected_patches.keys() and self.only_masked_loss:
            num_selected = selected_patches[task].shape[1]
            reconstructed = reconstructed[:, num_selected:, ...]
            target = masked_patches[task]

        # prediction task
        else:
            if reconstructed.shape[1] == self.model.num_patches:
                if task in selected_patches:
                    reconstructed = unshuffle_patches(reconstructed, perm_indices[task])
                reconstructed = unpatchify(
                    reconstructed, self.model.img_size, self.model.patch_size
                )
                target = batch[task]
            else:
                reconstructed = rearrange(reconstructed, "b l c h w d -> (b l) c h w d")
                target = patchify(batch[task], self.model.patch_size)
                target = shuffle_patches(
                    target,
                    patch_size=self.model.patch_size,
                    permutations=perm_indices[task],
                )
                raise NotImplementedError(
                    "Patch-wise dice computation is not implemented yet"
                )

        return self.loss(reconstructed, target)

    def comp_metrics(
        self, outputs, batch, reconstruction_type: str = "reconstructed_patches"
    ) -> dict:
        pred = outputs[reconstruction_type][self.task]
        target = batch[self.task].long()
        metric_results = {}
        pred = (
            pred.sigmoid() > 0.5
            if self.multilabel
            else pred.argmax(dim=1, keepdim=True)
        )

        for name, metric in self.metrics.items():
            if target.device != metric.device:
                metric = metric.to(target.device)
            metric_result = metric(pred, target)
            if metric_result.dim() == 0:
                metric_results[name] = metric_result
            else:
                for i in range(metric_result.shape[0]):
                    metric_results[f"{name}-{i}"] = metric_result[i]

        if self.test_stage:
            for sample_idx in range(pred.shape[0]):
                sample_values = {}
                sample_values["dataset"] = batch["dataset"][sample_idx]
                sample_values["patient_id"] = batch["patient_id"][sample_idx]
                for name, metric in self.metrics.items():
                    if target.device != metric.device:
                        metric = metric.to(target.device)
                    metric_result = (
                        metric(
                            pred[sample_idx][None, ...], target[sample_idx][None, ...]
                        )
                        .detach()
                        .cpu()
                    )

                    if metric_result.dim() == 0:
                        sample_values[name] = metric_result.item()
                    else:
                        for i in range(metric_result.shape[0]):
                            sample_values[f"{name}-{i}"] = metric_result[i].item()
                self.test_results.append(sample_values)

        return metric_results

    def visualize(self, batch, input, output, random_idx, axs):
        batch_ax, input_ax, pred_ax = axs[0], axs[1], axs[2]
        n_slices = batch[self.task].shape[-1]
        slice = n_slices // 2
        random_idx = random_idx % batch[self.task].shape[0]
        cmap = "viridis" if self.num_seg_classes < 5 else "tab20b"

        ground_truth = batch[self.task]
        input = input[self.task] if self.task in input else None
        output = output[self.task]
        if self.multilabel:
            ground_truth = self.label_remapper(ground_truth)
            input = self.label_remapper(input) if input is not None else None
            output = self.label_remapper(output.sigmoid() > 0.5)
            ground_truth = ground_truth[random_idx, ..., slice].cpu().numpy()
            input = (
                input[random_idx, ..., slice].cpu().numpy()
                if input is not None
                else None
            )
            output = output[random_idx, ..., slice].cpu().numpy()
            # print(output.shape)
        else:
            ground_truth = ground_truth[random_idx, 0, ..., slice].cpu().numpy()
            input = (
                input[random_idx, 0, ..., slice].cpu().numpy()
                if input is not None
                else None
            )
            output = output.argmax(dim=1, keepdim=True)
            output = output[random_idx, 0, ..., slice].cpu().numpy()

        batch_ax.imshow(
            ground_truth,
            cmap=cmap,
            vmin=0,
            vmax=self.num_seg_classes,
        )
        batch_ax.set_title(f"ground truth - {self.task}")

        if input is not None:
            input_ax.imshow(
                input,
                cmap=cmap,
                vmin=0,
                vmax=self.num_seg_classes,
            )
            input_ax.set_title(f"input - {self.task}")
        else:
            input_ax.set_title(f"no input - {self.task}")

        pred_ax.imshow(
            output,
            cmap=cmap,
            vmin=0,
            vmax=self.num_seg_classes,
        )
        pred_ax.set_title(f"pred - {self.task}")

        return axs


class MultiMAEMultilabelSegmentationModule(MultiMAESegmentationModule):
    def __init__(self, cfg, task, loss_cfg, model, **kwargs):
        super().__init__(cfg, task, loss_cfg, model, multilabel=True)


class MultiMAEClassificationModule(TaskModule):
    def __init__(self, cfg, task, loss_cfg, model, **kwargs):
        adapter_opts = get_adapter_opts(cfg, task, "output")
        if "num_classes" in adapter_opts:
            self.num_classes = adapter_opts["num_classes"]
        else:
            self.num_classes = cfg.model.model_params.num_classes
        self.classification_task = "binary" if self.num_classes <= 2 else "multiclass"
        super().__init__(cfg, task, loss_cfg, model)
        self.has_visualization = False
        self.log_metrics_at_epoch_end = True
        self.metric_preds = []
        self.metric_targets = []
        self.sample_vals = []

    def _get_loss(self):
        self.loss = [getattr(loss, name)(**opt) for name, opt in self.loss_cfg.items()][
            0
        ]

    def _get_metrics(self):
        average = "none" if self.classification_task == "binary" else "macro"
        self.metrics = torch.nn.ModuleDict(
            {
                "acc": Accuracy(
                    task=self.classification_task,
                    num_classes=self.num_classes,
                    average=average,
                ),
                "f1": F1Score(
                    task=self.classification_task,
                    num_classes=self.num_classes,
                    average=average,
                ),
                "auroc": AUROC(
                    task=self.classification_task,
                    num_classes=self.num_classes,
                    average=average,
                ),
                "mcc": MatthewsCorrCoef(
                    task=self.classification_task,
                    num_classes=self.num_classes,
                ),
            }
        )

    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        target = batch[self.task]
        # remove class dim if class dimension is 1 (i.e. binary classification)
        pred = outputs["reconstructed_patches"][self.task].squeeze(dim=1)
        return self.loss(pred, target)

    def _comp_metrics(self, pred: torch.Tensor, target: torch.Tensor) -> dict:
        metric_results = {}
        for name, metric in self.metrics.items():
            if target.device != metric.device:
                metric = metric.to(target.device)
            metric_results[name] = metric(pred, target)
        return metric_results

    def comp_metrics(
        self,
        outputs: dict,
        batch: dict,
        reconstruction_type: str = "reconstructed_patches",
    ) -> dict:
        # remove class dim if class dimension is 1 (i.e. binary classification)
        pred = outputs[reconstruction_type][self.task].squeeze(dim=1)
        if self.classification_task == "binary":
            pred = pred.sigmoid()
        else:
            pred = pred.softmax(dim=1)
        target = batch[self.task].long()
        self.metric_preds.append(pred.detach())
        self.metric_targets.append(target.detach())

        if self.test_stage:
            for sample_idx in range(target.shape[0]):
                sample_values = {}
                if self.classification_task == "binary":
                    sample_values["pred"] = (
                        (pred[sample_idx].detach().cpu() > 0.5).int().item()
                    )
                else:
                    sample_values["pred"] = torch.argmax(
                        pred[sample_idx].detach().cpu()
                    ).item()
                sample_values["target"] = target[sample_idx].detach().cpu().item()
                sample_values["dataset"] = batch["dataset"][sample_idx]
                sample_values["patient_id"] = batch["patient_id"][sample_idx]
                self.test_results.append(sample_values)
        return {}

    def epoch_end_comp_metrics(self) -> dict:
        pred = torch.cat(self.metric_preds, dim=0)
        n_samples = pred.shape[0]
        target = torch.cat(self.metric_targets, dim=0)
        # auroc cannot handle concatinated metatensors for some reason, thus we need to convert them to tensors
        pred = pred.as_tensor() if hasattr(pred, "as_tensor") else pred
        target = target.as_tensor() if hasattr(target, "as_tensor") else target
        metric_results = self._comp_metrics(pred, target)
        self.metric_preds = []
        self.metric_targets = []
        return metric_results, n_samples
