from random import randint

import torch
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from typing import Dict, Union, List
from omegaconf.dictconfig import DictConfig

# for MultiMAEBaseTrainer
# from models import create_multimae, calc_patchified_dim, interpolate_pos_embed
from util import modular_tasks
from util.sw_inferer import MultiMAESlidingWindowInferer

from models import (
    create_multimae,
    unshuffle_patches,
    unpatchify,
    interpolate_pos_embed,
    MultiMAE3D,
    PatchedInputAdapter,
    SpatialOutputAdapter,
    calc_patchified_dim,
    patchify,
    shuffle_patches,
)
from util.setup_functions import resolve_listconfig_of_dicts

"""
Modular Trainer: 
- for each task in a task list, a new TaskModule is created
- each TaskModule has its own loss function, forward function, metrics, 
    visualization, etc.
- optimizer and scheduler are shared across all TaskModules by the 
    ModularTrainer
- the ModularTrainer is responsible for training, validation, and testing, 
    running the forward step to the model, and then iterates over the 
    TaskModules to compute loss, metrics, ... 
- at init the ModularTrainer creates a list of TaskModules
"""


def CosineSchedulerWithWarmup(optimizer, warmup_epochs, total_epochs, min_lr=1e-6):
    warmup_scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1e-2 , total_iters=warmup_epochs)
    cosine_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(total_epochs - warmup_epochs), eta_min=min_lr, last_epoch=total_epochs)
    scheduler = torch.optim.lr_scheduler.SequentialLR(
        optimizer, [warmup_scheduler, cosine_scheduler], milestones=[warmup_epochs]
    )
    return scheduler

class ModularTrainer(pl.LightningModule):
    def __init__(self, cfg, **kwargs):
        super().__init__()
        self.cfg = cfg
        self.layerwise_lr_decay_rate = 0.75

        self.task_modules = {}
        self.save_hyperparameters(cfg, ignore="model")

    def configure_optimizers(self) -> dict:
        """Configure the optimizer for your classifier."""
        
        optim_groups = self.model.parameters()
        self.optimizer = [
            getattr(torch.optim, name)(optim_groups, **values)
            for name, values in self.cfg.training.optimizer.items()
        ][
            0
        ]  # get the class from the name
        if self.cfg.training.scheduler is not None:
            scheduler_name, scheduler_values = [(name, values) for name, values in self.cfg.training.scheduler.items()][0]
            if scheduler_name == 'CosineSchedulerWithWarmup': 
                self.scheduler_constructor = lambda x: CosineSchedulerWithWarmup(x, **scheduler_values)
            else: 
                self.scheduler_constructor = lambda x: getattr(torch.optim.lr_scheduler, scheduler_name)(x, **scheduler_values)
            self.scheduler = self.scheduler_constructor(self.optimizer)
            scheduler_dict = {
                "scheduler": self.scheduler,
                **self.cfg.training.monitoring,  # for monitor and frequency params
            }
            return {"optimizer": self.optimizer, "lr_scheduler": scheduler_dict}
        else:
            self.scheduler_constructor = None
            return self.optimizer


class MultiMAEModularTrainer:
    def __init__(self, cfg, **kwargs):
        super().__init__(cfg)
        # self.kwargs = kwargs

        # 输入任务
        self.input_tasks = cfg.model.model_params.input_tasks
        # 输出任务
        self.output_tasks = cfg.model.model_params.output_tasks
        # 对抗 eta
        self.adversarial_eta = cfg.training.adversarial_eta
        
        # if output_tasks is not just names but names with Tasks or even tasks with 
        # opts, this exteacts the names from the DictConfig
        if any([isinstance(key_value, Dict) for key_value in self.output_tasks]) is True:
            self.output_tasks = [key for key, _ in self.output_tasks.items()]
        
        print(f"\n{'ModularTrainer: Input Tasks ':=^80}")    
        print(f"{self.input_tasks=}")
        print(f"\n{'ModularTrainer: Output Tasks ':=^80}")    
        print(f"{self.output_tasks=}")

        print()
        print(f"\n{'ModularTrainer: _get_model() ':=^80}")    
        self._get_model() # 创建 mae 模型
        print(f"\n{'ModularTrainer: _get_task_types() ':=^80}")    
        self._get_task_types()
        print(f"\n{'ModularTrainer: _get_task_loss_cfg() ':=^80}")    
        self._get_task_loss_cfg() # 获取对应任务的损失
        print(f"\n{'ModularTrainer: _get_task_loss_weights() ':=^80}")    
        self._get_task_loss_weights() # 获取对应损失权重
        print(f"\n{'ModularTrainer: _get_task_modules() ':=^80}")    
        self._get_task_modules()
        print(f"\n{'ModularTrainer: _get_ignore_parameters() ':=^80}")    
        self._get_ignore_parameters()

        print(f"\n{'ModularTrainer:  ':=^80}")    
        self.sliding_window = ( "sliding_window" in cfg.transforms and cfg.transforms.sliding_window )
        if self.sliding_window:
            print("Create Sliding Window inferer")
            self.register_module( name="inferer",
                module=MultiMAESlidingWindowInferer(**cfg.transforms.sliding_window),
            )  # register as module so that lightning transfers to cuda later
            self.infere = lambda x: self.inferer(x, self.model)

    def set_test_type(
        self,
        exclude: Union[str, List[str]] = None,
        mask_ratio: Union[float, Dict[str, float]] = None,
        checkpoint_name: str = "", 
    ):
        print(f"{exclude=}")
        
        assert not (
            (exclude is not None) and (mask_ratio is not None)
        ), "Specify either exclude or mask_ratio, not both!"

        mask_ratio = (
            {task: 0.0 for task in self.input_tasks}
            if mask_ratio is None
            else mask_ratio
        )
        self.test_exclude = ""
        self.checkpoint_name = checkpoint_name
        if exclude is not None and exclude != [None]:
            if isinstance(exclude, str):
                exclude = [exclude]
            for task in exclude:
                mask_ratio[task] = 1.0
            self.test_exclude = "-".join(["no", *exclude])
        else:
            self.test_exclude = 'full'
            

        self.model.mask_ratio = mask_ratio
        self.model.leave_one_out = False
        self.model.use_dirichlet = False

    def _get_task_types(self):
        # create a task module for each task
        self.task_types = {
            task: t
            for task_type in self.cfg.training.tasks.type
            for task, t in task_type.items()
        } # {t1: MultiMAEAutoencoderModule}, {t2: MultiMAEAutoencoderModule} 
        # only keep tasks that are in the output_tasks: 保留在 output_task 中的 task
        self.task_types = {
            task: t for task, t in self.task_types.items() if task in self.output_tasks
        }

    def _get_task_loss_cfg(self):
        tasks = list(self.task_types.keys())
        losses = list(self.cfg.training.loss.loss_instances)
        # assert that the number of tasks and losses are the same
        assert len(tasks) == len(losses), "Number of tasks and losses must be the same"
        self.task_loss_cfg = dict(zip(tasks, losses))

    def _get_task_loss_weights(self):
        tasks = list(self.task_types.keys())
        loss_cfg = self.cfg.training.loss
        loss_weights = getattr(loss_cfg, "loss_weights", 1.0)

        # if loss_weight is not defined, set it to 1.0
        if loss_weights is None:
            loss_weights = [1.0]

        if len(loss_weights) == 1:
            loss_weights = [loss_weights[0] for _ in range(len(tasks))]

        assert len(tasks) == len( loss_weights), "Number of tasks and loss weights must be the same"
        loss_weights = dict(zip(tasks, loss_weights))
        self.task_loss_weights = loss_weights

    def _get_task_modules(self):
        # create a task module for each task
        self.task_modules: Dict[str, modular_tasks.TaskModule] = {
            task: getattr(modular_tasks, t)(
                self.cfg,
                task=task,
                loss_cfg=self.task_loss_cfg,
                model=self.model,
                **self.kwargs,
            ) # 为 MultiMAEAutoencoderModule 
            for task, t in self.task_types.items()
        }

    # 生成 multimae 模型
    def _get_model(self):
        self.model = create_multimae(
            self.
        )
    
    # 获取固定不学习的参数
    def _get_ignore_parameters(self):
        self.ignore_parameters = []
        if 'encoder_pos_embed_type' not in self.cfg.model.model_params:
            return 
        if self.cfg.model.model_params.encoder_pos_embed_type == "global_sincos":
            print(f"ignore encoder_pos_embed for checkpoints to save space")
            self.ignore_parameters.append("model.pos_embed")
        for task, decoder in self.model.output_adapters.items(): 
            if getattr(decoder, 'pos_embed_type', None) == "global_sincos": 
                for name, _ in self.named_parameters(): 
                    if all([w in name for w in ['output_adapters', 'pos_embed', f".{task}."]]): 
                        print(f"ignore decoder pos_embed for task {task}")
                        self.ignore_parameters.append(name)

    def on_save_checkpoint(self, checkpoint):
        # remove all parameters with key in self.ignore_parameters from checkpoint dict:
        for key in self.ignore_parameters:
            del checkpoint["state_dict"][key]

    # 进行模型推理
    def infere(self, batch):
        return self.model(batch, return_as_image=True, return_as_dict=True)

    def _training_loss(self, outputs: dict, batch: dict) -> tuple:
        # compute total weighted loss
        loss_vals = {}
        total_loss = torch.tensor(0, dtype=torch.float32)  # .to(inputs[0][0].device)
        for task, task_module in self.task_modules.items():
            loss_val = task_module.comp_loss(outputs, batch)
            total_loss = total_loss + (self.task_loss_weights[task] * loss_val)
            loss_vals[task] = loss_val.detach()
        return total_loss, loss_vals

    # 使用快速梯度符号法（FGSM）来生成对抗样本，返回生成的对抗样本
    def _generate_adversarial_batch(self, batch, adversarial_eta): 
        for task in self.input_tasks: 
            batch[task] = batch[task].detach().clone().requires_grad_(True)
        outputs = self.model(batch, return_as_image=False, return_as_dict=True)
        loss, _ = self._training_loss(outputs, batch)
        
        for i, task in enumerate(self.input_tasks): 
            grad = torch.autograd.grad(loss, batch[task],
                                        retain_graph=True)[0]
            torch.cuda.synchronize()
            task_grad = grad.detach()
            batch[task].requires_grad = False
            task_grad = torch.sign(task_grad) # we want only the sign value
            task_mask = (batch[task] != -1).float()  # only change foreground
            adv_perturbation = adversarial_eta * task_grad * task_mask
            # normalize input
            adversarial_task = (batch[task] + adv_perturbation) / (1 + adversarial_eta)
            batch[task] = adversarial_task.detach()
        return batch

    # =========================================================================
    # Training, return total_loss
    def training_step(self, batch: dict, batch_idx: int) -> dict:
        # adversarial_eta > 0.0 生成对抗样本
        if self.adversarial_eta > 0.0: 
            with torch.set_grad_enabled(True): 
                batch = self._generate_adversarial_batch(batch, self.adversarial_eta)


        outputs = self.model( batch, return_as_image=False, return_as_dict=True )  # patch space okay for training
        
        first_task = list(self.task_modules.keys())[0]
        batch_size = outputs["reconstructed_patches"][first_task].shape[0]

        # 计算训练损失 日志记录
        total_loss, loss_vals = self._training_loss(outputs, batch)
        for task, loss_val in loss_vals.items(): 
            self.log(
                f"loss/train/{task}",
                loss_val,
                on_step=False,
                on_epoch=True,
                sync_dist=True,
                batch_size=batch_size,
            )
        self.log(
            "loss/train",
            total_loss,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
            batch_size=batch_size,
        )
        return total_loss

    # =========================================================================
    # Validation
    # 随机选择索引便于在 验证结束后进行可视化展示
    def on_validation_epoch_start(self) -> None:
        self.random_batch_idx = randint(0, len(self.trainer.val_dataloaders[0]) - 1)
        self.random_idx = randint(0, self.trainer.val_dataloaders[0].batch_size - 1)

    # 验证
    def validation_step(self, batch: dict, batch_idx: int) -> dict:
        reconstruction_type = ( "reconstructed_image" if self.sliding_window else "reconstructed_patches" )

        outputs = self.infere(batch)  # val metrics are calculated in image space
        
        # reconstructed = outputs[-1]
        first_task = list(self.task_modules.keys())[0]
        batch_size = outputs[reconstruction_type][first_task].shape[0]

        # Compute validation metrics if nth epoch 计算验证的度量指标
        if (self.trainer.current_epoch + 1) % (
            self.trainer.check_val_every_n_epoch # n 个 epoch 进行验证
            * self.cfg.training.log_metrics_every_nth_val # n 次验证记录日志
        ) == 0:
            for task, task_module in self.task_modules.items():
                task_metrics = task_module.comp_metrics(outputs, batch, reconstruction_type)
                if not task_module.log_metrics_at_epoch_end: # 在每个 epoch 的最后 log 度量
                    for name, metric_value in task_metrics.items():
                        self.log(
                            f"val/{name}/{task}",
                            metric_value,
                            on_epoch=True,
                            sync_dist=True,
                            batch_size=batch_size,
                        )

        # Compute validation loss 计算验证损失 
        total_loss = torch.tensor(0, dtype=torch.float32).to(self.device)
        for task, task_module in self.task_modules.items():
            loss_val = task_module.comp_loss(outputs, batch)
            total_loss += loss_val * self.task_loss_weights[task]
            self.log(
                f"loss/val/{task}",
                loss_val,
                on_step=False,
                on_epoch=True,
                sync_dist=True,
                batch_size=batch_size,
            )
        self.log(
            "loss/val",
            total_loss,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
            batch_size=batch_size,
        )

        # return randomly chosen batch and predictions for visualization
        if batch_idx == self.random_batch_idx:
            selected_with_masked = outputs["selected_patches"]
            return batch, selected_with_masked, outputs[reconstruction_type]

    def validation_epoch_end(self, outputs) -> None:
        reconstruction_type = ( "reconstructed_image" if self.sliding_window else "reconstructed_patches")
        # log here on one gpu only
        # log visualization if n-th epoch
        if self.trainer.is_global_zero:
            # 可视化 真实，输入，输出图像
            if (self.trainer.current_epoch + 1) % (
                self.trainer.check_val_every_n_epoch
                * self.cfg.training.log_images_every_nth_val
            ) == 0:
                batch, inputs, outputs = outputs[0]
                fig = self.visualize( batch, inputs, outputs, )

                if self.logger:
                    self.logger.log_image(key="reconstructions", images=[fig])
            # 日志记录 metrics
            if (self.trainer.current_epoch + 1) % (
                self.trainer.check_val_every_n_epoch
                * self.cfg.training.log_metrics_every_nth_val
            ) == 0:  
                for task, task_module in self.task_modules.items():
                    if task_module.log_metrics_at_epoch_end:
                        task_metrics, n_samples = task_module.epoch_end_comp_metrics()
                        for name, metric_value in task_metrics.items():
                            self.log(
                                f"val/{name}/{task}",
                                metric_value,
                                on_epoch=True,
                                sync_dist=True,
                                batch_size=n_samples,
                            )                
                 

    def on_test_epoch_start(self) -> None:
        self.random_batch_idx = randint(0, len(self.trainer.test_dataloaders[0]) - 1)
        self.random_idx = randint(0, self.trainer.test_dataloaders[0].batch_size - 1)
        self.test_results = []

    def test_step(self, batch: dict, batch_idx: int) -> dict:
        reconstruction_type = ( "reconstructed_image" if self.sliding_window else "reconstructed_patches" )

        outputs = self.infere(batch)  # val metrics are calculated in image space
        
        first_task = list(self.task_modules.keys())[0]
        batch_size = outputs[reconstruction_type][first_task].shape[0]

        batch_results = []
        total_loss = torch.tensor(0, dtype=torch.float32).to(self.device)

        for task, task_module in self.task_modules.items():
            task_results = {"batch_idx": batch_idx, "task": task}
            # compute test metrics
            task_metrics = task_module.comp_metrics(outputs, batch, reconstruction_type)
            for name, metric_value in task_metrics.items():
                task_results[name] = ( metric_value.item() )
                # some modules require logging metrics at epoch end 
                # to compute the metrics correctly (e.g. AUROC)
                if not task_module.log_metrics_at_epoch_end: 
                    self.log(
                        f"test/{self.checkpoint_name}/{self.test_exclude}/{name}/{task}",
                        metric_value,
                        on_epoch=True,
                        sync_dist=True,
                        batch_size=batch_size,
                    )

            # Compute test loss
            loss_val = task_module.comp_loss(outputs, batch)
            total_loss += loss_val * self.task_loss_weights[task]
            # log los for each task
            task_results["loss"] = ( loss_val.item() )
            self.log(
                f"loss/test/{self.checkpoint_name}/{self.test_exclude}/{task}",
                loss_val,
                on_step=False,
                on_epoch=True,
                sync_dist=True,
                batch_size=batch_size,
            )
            batch_results.append(task_results)

        # for task, task_module in self.task_modules.items():
        # log combined (weighted) loss
        batch_results.append(
            {
                "batch_idx": batch_idx,
                "loss": (
                    total_loss.item()
                ),
            }
        )
        self.log(
            f"loss/test/{self.checkpoint_name}/{self.test_exclude}",
            total_loss,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
            batch_size=batch_size,
        )

        self.test_results += batch_results

        if batch_idx == self.random_batch_idx:
            self.visualize( batch, outputs["selected_patches"], outputs[reconstruction_type])
    
    # 分类任务中使用
    def test_epoch_end(self, outputs):
        full_test_results = []

        for task, task_module in self.task_modules.items():
            if task_module.log_metrics_at_epoch_end:
                task_test_results = {
                    "batch_idx": None, 
                    "task": task
                }
                task_metrics, n_samples = task_module.epoch_end_comp_metrics()
                for name, metric_value in task_metrics.items():
                    task_test_results[name] = metric_value.item()
                    self.log(
                        f"test/{self.checkpoint_name}/{self.test_exclude}/{name}/{task}",
                        metric_value,
                        on_epoch=True,
                        sync_dist=True,
                        batch_size=n_samples
                    )      
                full_test_results.append(task_test_results)
        self.test_results += full_test_results

    def visualize(self, batch, inputs, outputs):
        # combine tasks in input and output tasks:
        # all_tasks = list(set(self.input_tasks + self.output_tasks))
        all_tasks = list(self.input_tasks) + [ task for task in self.output_tasks if task not in self.input_tasks ]
        
        # Regression / Classification tasks are not visualized
        all_visualizable_output_tasks = [task for task, module in self.task_modules.items() if module.has_visualization]

        all_tasks = list(self.input_tasks) + [ task for task in all_visualizable_output_tasks if task not in self.input_tasks ]
        
        n_tasks = len(all_tasks)
        fig, axs = plt.subplots(n_tasks, 3, figsize=(5 * 3, 5 * n_tasks), squeeze=False)

        random_idx = self.random_idx % inputs[all_tasks[0]].shape[0]

        for task_idx, task in enumerate(all_tasks):
            if task in batch and not task in self.task_modules:
                batch_ax = axs[task_idx, 0]
                input_ax = axs[task_idx, 1]
                gt_slice = batch[task].shape[-1] // 2
                batch_ax.imshow(
                    batch[task][random_idx, 0, ..., gt_slice].detach().cpu(),
                    cmap="viridis",
                    vmin=-1,
                    vmax=1,
                )
                batch_ax.set_title(f"ground truth - {task}")

                in_slice = inputs[task].shape[-1] // 2
                input_ax.imshow(
                    inputs[task][random_idx, 0, ..., in_slice].detach().cpu(),
                    cmap="viridis",
                    vmin=-1,
                    vmax=1,
                )
                batch_ax.set_title(f"input - {task}")
            else:
                axs[task_idx, :] = self.task_modules[task].visualize( batch, inputs, outputs, self.random_idx, axs[task_idx] )

        return fig

    def load_model_checkpoints(
        self, checkpoint_path: str, load_input_adapters=None, load_output_adapters=None
    ):

        # tensors are loaded to the same device they were used in the
        # checkpoint, if cdda is available, otherwise to cpu
        map_location = None if torch.cuda.is_available() else torch.device("cpu")

        # get the random initialized state_dict
        state_dict = self.state_dict()
        model_patchified_dim = self.model.patchified_dim

        # load the checkpoint file
        checkpoint = torch.load(
            checkpoint_path, map_location=map_location, weights_only=False
        )

        # extract state_dict from checkpoint file
        checkpoint_state_dict = checkpoint["state_dict"]

        # get hyperparameters, needed for positional embedding interpolation
        checkpoint_model_img_size = checkpoint["hyper_parameters"]["model"][ "model_params" ]["img_size"]
        checkpoint_model_patch_size = checkpoint["hyper_parameters"]["model"][ "model_params" ]["patch_size"]
        checkpoint_patchified_dim = calc_patchified_dim(
            checkpoint_model_img_size, checkpoint_model_patch_size
        )

        def check_and_interpolate_pos_embed(pos_embed):
            if not model_patchified_dim == checkpoint_patchified_dim:
                pos_embed = interpolate_pos_embed(
                    pos_embed, checkpoint_patchified_dim, model_patchified_dim
                )
            return pos_embed

        # special loading procedures for those elements, are not automatically loaded
        exclude_loading = ["pos_embed", "loss"]
        exclude_loading += [] if load_input_adapters is None else ["input_adapters"]
        exclude_loading += [] if load_output_adapters is None else ["output_adapters"]

        # iterate over checkpoint state dict and "insert" existing weights into
        # model state dict. Elements / Adapters that do not exist in the
        # checkpoint state dict are used unchanged randomly initialized.
        for key in checkpoint_state_dict.keys():
            # if not in any special rules, just load checkpoint_state into
            # model state dict.
            if not any([ex in key for ex in exclude_loading]):
                state_dict[key] = checkpoint_state_dict[key]
            # special rules for input adapters, if specified in
            # load_input_adapters
            elif ("input_adapters" in key) and (load_input_adapters is not None):
                if any([adapter in key for adapter in load_input_adapters]):
                    # as long as the adapter checkpoint is a subset of keys
                    # from the model checkpoint, it can be loaded.
                    state_dict[key] = (
                        check_and_interpolate_pos_embed(checkpoint_state_dict[key])
                        if "pos_embed" in key
                        else checkpoint_state_dict[key]
                    )
            # special rules for output adapters, if specified in
            # load_output_adapters
            elif ("output_adapters" in key) and (load_output_adapters is not None):
                if any([adapter in key for adapter in load_output_adapters]):
                    state_dict[key] = (
                        check_and_interpolate_pos_embed(checkpoint_state_dict[key])
                        if "pos_embed" in key
                        else checkpoint_state_dict[key]
                    )
            elif "pos_embed" in key:
                state_dict[key] = check_and_interpolate_pos_embed(
                    checkpoint_state_dict[key]
                )
            elif "loss" in key:
                continue
            else:
                raise ValueError(f"Key {key} not found in model state dict")

        self.load_state_dict(state_dict)

    def freeze_weights(self, parts: list):
        print(f"Freeze: {parts}")
        for part in parts:
            if part == "input_adapters":
                for param in self.model.input_adapters.parameters():
                    param.requires_grad = False
            if part == "input_pos_embed":
                self.model.pos_embed.requires_grad = False
            if part == "encoder":
                for param in self.model.encoder.parameters():
                    param.requires_grad = False
            if part == "output_adapters":
                for param in self.model.parameters():
                    param.requires_grad = False
            if part == "input_adapters_proj":
                for param in self.model.input_adapters.named_parameters():
                    if "proj" in param[0]:
                        param[1].requires_grad = False
            if part == "output_adapters_proj":
                for param in self.model.output_adapters.named_parameters():
                    if "out_proj" in param[0]:
                        param[1].requires_grad = False
