from abc import abstractmethod

import pytorch_lightning as pl
import torch

from torchmetrics.image import StructuralSimilarityIndexMeasure, PeakSignalNoiseRatio
from torchmetrics.segmentation import DiceScore, MeanIoU
from torchmetrics.classification import Accuracy, F1Score, AUROC, MatthewsCorrCoef

from einops import rearrange

import loss
from models.multimae3d_utils import (
    unshuffle_patches,
    unpatchify,
    patchify,
    shuffle_patches,
)
from data.transforms import MultiLabelsToBraTSClasses

from util.setup_functions import get_adapter_opts


class TaskModule:
    def __init__(self, cfg, task, loss_cfg, model):
        super().__init__()
        self.cfg = cfg
        self.task = task
        self.loss_cfg = loss_cfg[task]
        self.model = model
        self.has_visualization = True
        self.log_metrics_at_epoch_end = False
        self.test_stage = cfg.testing.test_stage
        self.test_results = []

        self._get_loss()
        self._get_metrics()

    @abstractmethod
    def _get_loss(self):
        pass

    @abstractmethod
    def _get_metrics(self):
        pass

    @abstractmethod
    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        pass

    @abstractmethod
    def comp_metrics(
        self, outputs: dict, batch: dict, reconstruction_type: bool
    ) -> dict:
        pass

    @abstractmethod
    def visualize(self, batch, input, output, random_idx, axs):
        pass


class MultiMAEAutoencoderModule(TaskModule):
    def __init__(self, cfg, task, loss_cfg, model, **kwargs):
        super().__init__(cfg, task, loss_cfg, model)

    def _get_loss(self):
        if self.loss_cfg == 'mse':
            self.loss = torch.nn.MSELoss()
        self.only_masked_loss = self.cfg.model.only_masked_loss

    def _get_metrics(self):
        self.metrics = torch.nn.ModuleDict(
            {
                "ssim": StructuralSimilarityIndexMeasure(data_range=(-1.0, 1.0)),
                "psnr": PeakSignalNoiseRatio(data_range=(-1.0, 1.0)),
                # "mse":  MeanSquaredError(),
            }
        )
    # 计算损失
    def comp_loss(self, outputs: dict, batch: dict) -> torch.tensor:
        task = self.task

        selected_patches = outputs["selected_patches"]
        masked_patches = outputs["masked_patches"]
        perm_indices = outputs["perm_indices"]
        reconstructed_patches = outputs["reconstructed_patches"]
        reconstructed = reconstructed_patches[task]

        # 检查重建图像形状和原图像形状是否一致，计算损失 
        if reconstructed.shape[-3:] == self.model.img_size:
            target = batch[task]
            return self.loss(reconstructed, target)

        if self.task in selected_patches.keys():
            # if task is in input_task, we can compute the loss w.r.t. selected_patches
            if self.only_masked_loss: # 只计算重建 mask 的 patch 
                num_selected = selected_patches[task].shape[1]
                reconstructed = reconstructed[:, num_selected:, ...] # 重建出 mask 
                target = masked_patches[task]
            else:
                target = torch.cat( [selected_patches[task], masked_patches[task]], dim=1)

        else:
            if reconstructed.shape[1] == self.model.num_patches:
                reconstructed = unpatchify(reconstructed, self.model.img_size, self.model.patch_size)
                target = batch[task]
            else:
                reconstructed = rearrange(reconstructed, "b l c h w d -> (b l) c h w d")
                target = patchify(batch[task], self.model.patch_size)
                target = shuffle_patches(
                    target,
                    patch_size=self.model.patch_size,
                    permutations=perm_indices[task],
                )
                raise NotImplementedError(
                    "Patch-wise dice computation is not implemented yet"
                )

        return self.loss(reconstructed, target)
   
    # 计算评价指标
    def comp_metrics(self, outputs, batch, reconstruction_type = "reconstructed_patches"):
        metric_results = {}
        pred = outputs[reconstruction_type][self.task] # 指定模态的输出
        target = batch[self.task] # 指定模态的真实值

        for name, metric in self.metrics.items():
            if target.device != metric.device:
                metric = metric.to(target.device)

            if self.test_stage:
                for i in range(target.shape[1]):
                    metric_result = metric(pred[:, i], target[:, i])
                    metric_results[f"{name}-{i}"] = metric_result
            else:
                metric_result = metric(pred, target)
                metric_results[name] = metric_result

        if self.test_stage:
            for sample_idx in range(pred.shape[0]):
                sample_values = {}
                sample_values["dataset"] = batch["dataset"][sample_idx]
                sample_values["patient_id"] = batch["patient_id"][sample_idx]
                for name, metric in self.metrics.items():
                    if target.device != metric.device:
                        metric = metric.to(target.device)
                    for channel_idx in range(target.shape[1]):
                        metric_result = (
                            metric(
                                pred[sample_idx][None, channel_idx],
                                target[sample_idx][None, channel_idx],
                            )
                            .detach()
                            .cpu()
                        )
                        sample_values[f"{name}-{channel_idx}"] = metric_result.item()
                self.test_results.append(sample_values)

        return metric_results

    def visualize(self, batch, input, output, random_idx, axs):
        batch_ax, input_ax, pred_ax = axs[0], axs[1], axs[2]
        n_slices = batch[self.task].shape[-1]
        slice = n_slices // 2
        random_idx = random_idx % batch[self.task].shape[0]

        batch_ax.imshow(
            batch[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        batch_ax.set_title(f"ground truth - {self.task}")

        input_ax.imshow(
            input[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        input_ax.set_title(f"input - {self.task}")

        pred_ax.imshow(
            output[self.task][random_idx, 0, ..., slice].cpu().numpy(),
            cmap="viridis",
            vmin=-1,
            vmax=1,
        )
        pred_ax.set_title(f"pred - {self.task}")

        return axs

