import logging

logger = logging.getLogger(__name__)

from typing import Union, List, Tuple, Dict, Optional
import copy

import torch
import torch.nn as nn
from timm.layers import trunc_normal_
from timm.layers.helpers import to_3tuple
from timm.models.vision_transformer import Block
from timm.models.vision_transformer import VisionTransformer
from timm.models.vision_transformer import _load_weights
from timm.models.layers import trunc_normal_, lecun_normal_
from timm.models.layers import DropPath
from einops import rearrange
from collections import OrderedDict
from functools import partial
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
import math
from models.multimae3d_utils import (
    calc_patchified_dim,
    patchify,
    unpatchify,
    shuffle_patches,
    unshuffle_patches,
    generate_dirichlet_masked,
    sample_random_key,
    build_position_embedding,
    get_batch_pos_embed,
    mask_data,
)
from util.setup_functions import resolve_listconfig_of_dicts
from models.multimae3d_adapters import (
    PatchedInputAdapter,
    SpatialOutputAdapter,
    SegmenterOutputAdapter,
    SETROutputAdapter,
    UNETROutputAdapter,
    ConvSpatialOutputAdapter,
    PatchConvSpatialOutputAdapter,
    LinearOutputAdapter,
)
from models_mamba import create_block as create_mamaba_block
from models_mamba import Block as mamba_block
from rope import *
import random
try:
    from mamba_ssm.ops.triton.layer_norm import RMSNorm, layer_norm_fn, rms_norm_fn
except ImportError:
    RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None

from models import multimae3d_adapters
from util.setup_functions import get_training_type


# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(
    module,
    n_layer,
    initializer_range=0.02,  # Now only used for embedding layer.
    rescale_prenorm_residual=True,
    n_residuals_per_layer=1,  # Change to 2 if we have MLP
):
    if isinstance(module, nn.Linear):
        if module.bias is not None:
            if not getattr(module.bias, "_no_reinit", False):
                nn.init.zeros_(module.bias)
    elif isinstance(module, nn.Embedding):
        nn.init.normal_(module.weight, std=initializer_range)

    if rescale_prenorm_residual:
        # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
        #   > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
        #   > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
        #   >   -- GPT-2 :: https://openai.com/blog/better-language-models/
        #
        # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
        for name, p in module.named_parameters():
            if name in ["out_proj.weight", "fc2.weight"]:
                # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
                # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
                # We need to reinit p since this code could be called multiple times
                # Having just p *= scale would repeatedly scale it down
                nn.init.kaiming_uniform_(p, a=math.sqrt(5))
                with torch.no_grad():
                    p /= math.sqrt(n_residuals_per_layer * n_layer)


def segm_init_weights(m):
    if isinstance(m, nn.Linear):
        trunc_normal_(m.weight, std=0.02)
        if isinstance(m, nn.Linear) and m.bias is not None:
            nn.init.constant_(m.bias, 0)
    elif isinstance(m, nn.Conv2d):
        # NOTE conv was left to pytorch default in my original init
        lecun_normal_(m.weight)
        if m.bias is not None:
            nn.init.zeros_(m.bias)
    elif isinstance(m, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):
        nn.init.zeros_(m.bias)
        nn.init.ones_(m.weight)


# =============================================================================
# MultiMAE for 3D data
# =============================================================================


class MultiMAE3D(torch.nn.Module):
    def __init__(
        self,
        input_adapters: Dict[str, nn.Module],
        output_adapters: Dict[str, nn.Module],
        mask_ratio: Dict[str, float],  # TODO: change to Dirichlet distribution
        global_img_size: Union[int, Tuple[int, int, int]] = (240, 240, 160),
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        num_global_tokens: int = 1,
        embed_dim: int = 768,
        # num_heads: int = 12, # transformer 头的数量
        # depth: int = 12,
        use_dirichlet: bool = False,
        use_seg_masking: bool = False,
        leave_one_out: bool = False,
        dirichlet_alpha: float = 1.0,
        mlp_ratio: float = 4.0,
        pos_embed_type: str = "perceptron",  # TODO: add other positional embedding types
        qkv_bias: bool = True,  # as MultiMAE
        # drop_path_rate: float = 0.0,
        attn_drop_rate: float = 0.0,
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),  # as MultiMAE
        token_aggregation: str = None,
        eval_masking: bool = True,
        apply_pos_embed_to_context: bool = False,  
        deep_copy_batch: bool = False, 
        # return_as_image: bool = False,
        # =========================== mamaba 参数 ======================
        mamba_block_depth=24, 
        # embed_dim=192, 
        d_state=16,
        channels=3, 
        num_classes=1000,
        ssm_cfg=None, 
        drop_rate=0.,
        drop_path_rate=0.1,
        norm_epsilon: float = 1e-5, 
        rms_norm: bool = True, 
        initializer_cfg=None,
        fused_add_norm=True,
        residual_in_fp32=True,
        device=None,
        dtype=None,
        ft_seq_len=None,
        pt_hw_seq_len=14,
        if_bidirectional=False,
        final_pool_type='none',
        if_abs_pos_embed=True,
        if_rope=False,
        if_rope_residual=False,
        flip_img_sequences_ratio=-1.,
        if_bimamba=False,
        bimamba_type="v2",
        # if_cls_token=True,
        if_divide_out=True,
        init_layer_scale=None,
        # use_double_cls_token=False,
        # use_middle_cls_token=True,

        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)

        # =====================================================================
        # assertions
        # Input and output adapters need to have fixed order to guarantee
        # correct tokenization and concatenation of tokens
        assert isinstance(
            input_adapters, OrderedDict
        ), "input_adapters must be an OrderedDict"
        assert isinstance(
            output_adapters, OrderedDict
        ), "output_adapters must be an OrderedDict"

        # =====================================================================
        # store additional parameters here:
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.num_global_tokens = num_global_tokens
        self.embed_dim = embed_dim

        self.use_seg_masking = use_seg_masking
        self.use_dirichlet = use_dirichlet
        self.dirichlet_alpha = dirichlet_alpha
        self.leave_one_out = leave_one_out
        self.mlp_ratio = mlp_ratio
        self.mask_ratio = mask_ratio
        self.pos_embed_type = pos_embed_type
        # self.qkv_bias = qkv_bias
        # self.return_as_image = return_as_image
        self.token_aggregation = token_aggregation
        self.eval_masking = eval_masking
        # if not None, any task in batch with 100% masking will be replaced by this value
        self.full_masked_val = None
        self.apply_pos_embed_to_context = apply_pos_embed_to_context
        self.deep_copy_batch = deep_copy_batch

        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        print(
            f"img_size: {img_size}, patch_size: {patch_size}, num_patches: {self.num_patches}"
        )
        print(f"{apply_pos_embed_to_context=}")


        # =====================================================================
        # mamba parametere
        self.residual_in_fp32 = residual_in_fp32
        self.fused_add_norm = fused_add_norm
        self.if_bidirectional = if_bidirectional
        self.final_pool_type = final_pool_type
        self.if_abs_pos_embed = if_abs_pos_embed
        self.if_rope = if_rope
        self.if_rope_residual = if_rope_residual
        self.flip_img_sequences_ratio = flip_img_sequences_ratio
        self.if_cls_token = if_cls_token
        self.use_double_cls_token = use_double_cls_token
        self.use_middle_cls_token = use_middle_cls_token
        self.num_tokens = 1 if if_cls_token else 0

        self.num_classes = num_classes
        self.d_model = self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models

        if if_rope:
            half_head_dim = embed_dim // 2
            hw_seq_len = img_size // patch_size
            self.rope = VisionRotaryEmbeddingFast(
                dim=half_head_dim,
                pt_seq_len=pt_hw_seq_len,
                ft_seq_len=hw_seq_len
            )

        # =====================================================================
        # init input and output adapters
        for _, input_adapter in input_adapters.items():
            input_adapter.init(embed_dim=embed_dim)
        self.input_adapters = nn.ModuleDict(input_adapters)

        in_channels = (
            self.input_adapters["images"].in_channels
            if "images" in self.input_adapters
            else None
        )
        
        for _, output_adapter in output_adapters.items():
            output_adapter.init(
                enc_embed_dim=embed_dim,
                enc_depth=self.depth,
                input_tasks=list(self.input_adapters.keys()),
                in_channels=in_channels,
                global_img_size=global_img_size,
            )
        self.output_adapters = nn.ModuleDict(output_adapters)

        # =====================================================================
        # init additional global tokens
        if num_global_tokens > 0:
            self.global_tokens = nn.Parameter(
                torch.randn(num_global_tokens, self.embed_dim)
            )

        # =====================================================================
        # init positional_embeddings
        self.pos_embed, self.global_grid_size = build_position_embedding(
            num_patches=self.num_patches,
            embed_dim=embed_dim,
            pos_embed_type=pos_embed_type,
            patchified_dim=self.patchified_dim,
            patch_size=self.patch_size,
            global_img_size=global_img_size,
        )

        # Weight initialization procedures
        # self.initialize_weights()

        # =====================================================================
        # init encoder mamba
        factory_kwargs = {"device": device, "dtype": dtype}

        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, mamba_block_depth)]  # stochastic depth decay rule
        # import ipdb;ipdb.set_trace()
        inter_dpr = [0.0] + dpr
        self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
        self.encoder = (
            nn.Sequential(
                *[
                    create_mamaba_block(
                        embed_dim,
                        d_state=d_state,
                        ssm_cfg=ssm_cfg,
                        norm_epsilon=norm_epsilon,
                        rms_norm=rms_norm,
                        residual_in_fp32=residual_in_fp32,
                        fused_add_norm=fused_add_norm,
                        layer_idx=i,
                        if_bimamba=if_bimamba,
                        bimamba_type=bimamba_type,
                        drop_path=inter_dpr[i],
                        if_divide_out=if_divide_out,
                        init_layer_scale=init_layer_scale,
                        **factory_kwargs,
                    )
                    for i in range(mamba_block_depth)
                ]
            )
            if mamba_block_depth > 0
            else None
        ) 

        # output head
        self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)(
            embed_dim, eps=norm_epsilon, **factory_kwargs
        )

        # mamba init
        self.apply(
            partial(
                _init_weights,
                n_layer=mamba_block_depth,
                **(initializer_cfg if initializer_cfg is not None else {}),
            )
        )
    

    def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
        return {
            i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
            for i, layer in enumerate(self.layers)
        }

    @torch.jit.ignore
    def no_weight_decay(self):
        return {"pos_embed", "cls_token", "dist_token", "cls_token_head", "cls_token_tail"}

    @torch.jit.ignore()
    def load_pretrained(self, checkpoint_path, prefix=""):
        _load_weights(self, checkpoint_path, prefix)

    # =====================================================================

    def forward_features(self, x, inference_params=None, if_random_cls_token_position=False, if_random_token_rank=False):
        # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
        # with slight modifications to add the dist_token
        # x = self.patch_embed(x)
        B, M, _ = x.shape
        # Randomly shuffle the sequence order
        if if_random_token_rank:

            # 生成随机 shuffle 索引
            shuffle_indices = torch.randperm(M)

            if isinstance(token_position, list):
                print("original value: ", x[0, token_position[0], 0], x[0, token_position[1], 0])
            else:
                print("original value: ", x[0, token_position, 0])
            print("original token_position: ", token_position)

            # 执行 shuffle
            x = x[:, shuffle_indices, :]

            if isinstance(token_position, list):
                # 找到 cls token 在 shuffle 之后的新位置
                new_token_position = [torch.where(shuffle_indices == token_position[i])[0].item() for i in range(len(token_position))]
                token_position = new_token_position
            else:
                # 找到 cls token 在 shuffle 之后的新位置
                token_position = torch.where(shuffle_indices == token_position)[0].item()

            if isinstance(token_position, list):
                print("new value: ", x[0, token_position[0], 0], x[0, token_position[1], 0])
            else:
                print("new value: ", x[0, token_position, 0])
            print("new token_position: ", token_position)

        # Random flip sequence
        if_flip_img_sequences = False
        if self.flip_img_sequences_ratio > 0 and (self.flip_img_sequences_ratio - random.random()) > 1e-5:
            x = x.flip([1])
            if_flip_img_sequences = True

        # mamba impl
        residual = None
        hidden_states = x
        if not self.if_bidirectional:
            for layer in self.encoder:

                if if_flip_img_sequences and self.if_rope:
                    hidden_states = hidden_states.flip([1])
                    if residual is not None:
                        residual = residual.flip([1])

                # rope about
                if self.if_rope:
                    hidden_states = self.rope(hidden_states)
                    if residual is not None and self.if_rope_residual:
                        residual = self.rope(residual)

                if if_flip_img_sequences and self.if_rope:
                    hidden_states = hidden_states.flip([1])
                    if residual is not None:
                        residual = residual.flip([1])

                hidden_states, residual = layer(
                    hidden_states, residual, inference_params=inference_params
                )
        else:
            # get two layers in a single for-loop
            for i in range(len(self.layers) // 2):
                if self.if_rope:
                    hidden_states = self.rope(hidden_states)
                    if residual is not None and self.if_rope_residual:
                        residual = self.rope(residual)

                hidden_states_f, residual_f = self.layers[i * 2](
                    hidden_states, residual, inference_params=inference_params
                )
                hidden_states_b, residual_b = self.layers[i * 2 + 1](
                    hidden_states.flip([1]), None if residual == None else residual.flip([1]), inference_params=inference_params
                )
                hidden_states = hidden_states_f + hidden_states_b.flip([1])
                residual = residual_f + residual_b.flip([1])

        if not self.fused_add_norm:
            if residual is None:
                residual = hidden_states
            else:
                residual = residual + self.drop_path(hidden_states)
            hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype))
        else:
            # Set prenorm=False here since we don't need the residual
            fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn
            hidden_states = fused_add_norm_fn(
                self.drop_path(hidden_states),
                self.norm_f.weight,
                self.norm_f.bias,
                eps=self.norm_f.eps,
                residual=residual,
                prenorm=False,
                residual_in_fp32=self.residual_in_fp32,
            )

        if self.final_pool_type == 'none':
            return hidden_states[:, -1, :]
        elif self.final_pool_type == 'mean':
            return hidden_states.mean(dim=1)
        elif self.final_pool_type == 'max':
            return hidden_states
        elif self.final_pool_type == 'all':
            return hidden_states
        else:
            raise NotImplementedError

    def mamba_forward(self, x, return_features=False, inference_params=None, if_random_cls_token_position=False, if_random_token_rank=False):
        x = self.forward_features(x, inference_params, if_random_cls_token_position=if_random_cls_token_position, if_random_token_rank=if_random_token_rank)
        if return_features:
            return x
        x = self.head(x)
        if self.final_pool_type == 'max':
            x = x.max(dim=1)[0]
        return x

    # ====================================================================
    def forward(
        self,
        x,
        mask_ratio: Union[float, Dict[str, float]] = None,
        permutations: Dict[str, torch.Tensor] = None,
        return_as_image: bool = False,
        use_dirichlet: bool = None,
        dirichlet_alpha: float = None,
        leave_one_out: bool = None,
        return_as_dict: bool = False,
        only_encode: bool = False,
        token_aggregation: str = None,
        use_seg_masking: bool = None,
        deep_copy_batch: bool = None, 
    ):
        # UNETR requires deep copy of input batch for leave-one-out reconstruction
        deep_copy_batch = self.deep_copy_batch if deep_copy_batch is None else deep_copy_batch
        if deep_copy_batch: 
            x = copy.deepcopy(x)

        # selected_patches, masked_patches, selected_pos_emb and masked_pos_emb are shuffled!
        selected_patches = {}
        masked_patches = {}
        reconstructed_patches = {}
        selected_pos_embed = {}
        masked_pos_embed = {}
        perm_indices = {}
        tokens = {}

        input_img_size = None
        input_patchified_dim = None
        # Tokenize input for each task

        if not (self.training) and not (self.eval_masking):
            use_dirichlet = False if use_dirichlet is None else use_dirichlet
            leave_one_out = False if leave_one_out is None else leave_one_out
            mask_ratio = 0.0 if mask_ratio is None else mask_ratio

        use_seg_masking = (
            self.use_seg_masking if use_seg_masking is None else use_seg_masking
        )
        use_dirichlet = self.use_dirichlet if use_dirichlet is None else use_dirichlet
        dirichlet_alpha = (
                self.dirichlet_alpha if dirichlet_alpha is None else dirichlet_alpha
        )
        leave_one_out = self.leave_one_out if leave_one_out is None else leave_one_out
        token_aggregation = (
            self.token_aggregation if token_aggregation is None else token_aggregation
        )
        if isinstance(self.mask_ratio, dict):
            mask_ratio = self.mask_ratio.copy() if mask_ratio is None else mask_ratio
        else:
            mask_ratio = self.mask_ratio if mask_ratio is None else mask_ratio

        if use_seg_masking:
            mask_mode = "seg"
        elif use_dirichlet:
            mask_mode = "dirichlet"
        else:
            mask_mode = "uniform"
        input_tasks = [task for task in self.input_adapters]
        selected_patches, masked_patches, perm_indices, mask_ratio, leave_one_out_task = mask_data(
            batch=x,
            tasks=input_tasks,
            mask_ratio=mask_ratio, 
            mode=mask_mode,
            patch_size=self.patch_size,
            leave_one_out=leave_one_out,
            dirichlet_alpha=dirichlet_alpha,
            permutations=permutations,
        )
        # print(f"{mask_ratio=}")

        for task in self.input_adapters:
            selected_shuffled_patches = selected_patches[task]
            num_selected_patches = selected_shuffled_patches.shape[1]
            # masked_shuffled_patches = masked_patches[task]

            if self.full_masked_val is not None and isinstance(mask_ratio, dict): 
                image_tasks = ['t1', 't1c', 't2', 'fla']
                for mask_task, val in mask_ratio.items(): 
                    if task == mask_task and val == 1.0: 
                        selected_shuffled_patches = torch.full_like(selected_shuffled_patches, self.full_masked_val)
                        x[task] = torch.full_like(x[task], self.full_masked_val) 
                    if task == 'images' and val == 1.0 and mask_task in image_tasks: 
                        channel_idx = image_tasks.index(mask_task)
                        selected_shuffled_patches[:, :, channel_idx] = self.full_masked_val
                        x['images'][:, channel_idx] = torch.full_like(x['images'][:, channel_idx], self.full_masked_val) 
                            
            perm_idx = perm_indices[task]
            batch_size = selected_shuffled_patches.shape[0]

            # Tokenize selected patches
            if self.input_adapters[task] is None:
                continue

            if task == "seg":
                selected_shuffled_patches = selected_shuffled_patches.float()
            # convert patchs mapping to tokens
            tokens[task] = self.input_adapters[task](selected_shuffled_patches)

            # check if image has same shape as default. If not interpolte positional embeddings
            input_img_size = x[task].shape[2:]
            # (image_size[0]//patch_size[0], image_size[1]//patch_size[1], image_size[2]//patch_size[2])
            input_patchified_dim = calc_patchified_dim(input_img_size, self.patch_size)

            batch_pos_embed, patchified_dim = get_batch_pos_embed(
                batch=x,
                pos_embed=self.pos_embed,
                pos_embed_type=self.pos_embed_type,
                input_patchified_dim=input_patchified_dim,
                global_grid_size=self.global_grid_size,
                patch_size=self.patch_size,
                batch_size=batch_size,
            )

            batch_pos_embed = batch_pos_embed[
                torch.arange(batch_size)[:, None], perm_idx, ...
            ]
            selected_pos_embed[task] = batch_pos_embed[:, :num_selected_patches]

            tokens[task] = tokens[task] + selected_pos_embed[task]

        # =====================================================================
        # Concatenate tokens for each task
        task_order = list(tokens.keys())
        task_ranges = OrderedDict()  # store the range of tokens for each task

        # Calculate the range of tokens for each task
        lower_bound = self.num_global_tokens
        for task in task_order:
            upper_bound = lower_bound + tokens[task].shape[1]
            task_ranges[task] = (lower_bound, upper_bound)
            lower_bound = upper_bound

        # Stack input tokens (batch_size, all_patchs ,embed_dim)
        input_tokens = torch.cat([tokens[task] for task in task_order], dim=1)

        # Add [CLS] token(s)
        if self.num_global_tokens > 0:
            # Repeat global tokens for each batch element
            batch_global_tokens = self.global_tokens.repeat(input_tokens.shape[0], 1, 1)
            input_tokens = torch.cat([batch_global_tokens, input_tokens], dim=1)

        # =====================================================================
        # Pass through transformer
        encoder_tokens = []
        x_hat = input_tokens
        if self.encoder is None:
            encoder_tokens = [input_tokens]
        else:
            for encoder_block in self.encoder:
                x_hat = encoder_block(x_hat)
                encoder_tokens.append(x_hat)

        if only_encode:
            if token_aggregation is None:
                return encoder_tokens
            if token_aggregation == "cls":
                return [enc[:, : self.num_global_tokens, ...] for enc in encoder_tokens]
            if token_aggregation == "mean":
                return [enc.mean(dim=1) for enc in encoder_tokens]

        # =====================================================================
        # Pass through output adapters
        for task, output_adapter in self.output_adapters.items():
            # # num_masked_tokens = masked_patches[task].shape[1]
            if self.apply_pos_embed_to_context: 
                perm_index = perm_indices 
                task_range = task_ranges
            else: 
                task_range = task_ranges[task] if task in task_ranges else None
                perm_index = perm_indices[task] if task in perm_indices else None
            if hasattr(output_adapter, 'unetr_use_input'):  
                perm_index = perm_indices if perm_index is None else perm_index
                task_range = task_ranges if task_range is None else task_range
                
            output_patches = output_adapter(
                encoder_tokens,
                task_range,
                perm_index,
                patchified_dim,
                batch=x,
                adapter_task=task,
            )
            num_selected_patches = (
                selected_patches[task].shape[1] if task in selected_patches else 0
            )
            
            num_masked_patches = output_patches.shape[1] - num_selected_patches
            if task in masked_patches:
                masked_patches[task] = masked_patches[task][
                    :, :num_masked_patches
                ]  # remove the patches ignored by the decoder

            task_range = task_ranges[task] if task in task_ranges else None
            perm_index = perm_indices[task] if task in perm_indices else None

            if (
                return_as_image and output_adapter.visualizable
            ):  # only for testing, needs full size
                if perm_index is not None and isinstance(perm_index, torch.Tensor):
                    output_patches = unshuffle_patches(output_patches, perm_index)
                # output_patches = unpatchify(output_patches, self.img_size, self.patch_size)
                output_patches = unpatchify(
                    output_patches, input_img_size, self.patch_size
                )

            reconstructed_patches[task] = output_patches

        # if return_as_image is true, unshuffle and unpatchify the selected
        # nput patches with zeros for masked patches
        if return_as_image:
            for task in selected_patches:
                # masked_patches can also be empty but has an entry in the dict
                if task in masked_patches:
                    patches = torch.cat(
                        [
                            selected_patches[task],
                            torch.zeros_like(masked_patches[task]),
                        ],
                        dim=1,
                    )
                    patches = unshuffle_patches(patches, perm_indices[task])
                    patches = unpatchify(patches, input_img_size, self.patch_size)
                    selected_patches[task] = (
                        patches  # overwrite selected patches with full size image of selected and masked (zeros)
                    )

        if return_as_dict:
            return_dict = {
                "selected_patches": selected_patches,
                "masked_patches": masked_patches,
                "perm_indices": perm_indices,
                "tokens": tokens,
                "encoder_tokens": encoder_tokens,
                "task_ranges": task_ranges,
                "reconstructed_patches": reconstructed_patches,
            }
            return return_dict
        else:
            return (
                selected_patches,
                masked_patches,
                perm_indices,
                tokens,
                encoder_tokens,
                task_ranges,
                reconstructed_patches,
            )


# =============================================================================
# Model creation function for MRI case
def create_multimae(
    cfg: OmegaConf,
    img_size: Union[int, Tuple[int, int, int]],
    patch_size: Union[int, Tuple[int, int, int]],
    embed_dim: int,
    mask_ratio: Union[float, Dict[str, float]],
    num_global_tokens: int = 1,
    pos_embed_type: str = "sincos",
    depth: int = 12,
    num_heads: int = 12,
    use_seg_masking: bool = False, 
    use_dirichlet: bool = False,
    dirichlet_alpha: float = 1.0,
    leave_one_out: bool = False,
    global_img_size: List[int] = [240, 240, 160], 
    apply_pos_embed_to_context: bool = False, 
    **kwargs,
):

    encoder_pos_embed_type = cfg.model.model_params.get(
        "encoder_pos_embed_type", pos_embed_type
    )

    # prepare input adpaters
    print(f"\nInstantiate input adapters")
    input_adapters = instantiate_input_adapters(cfg, **cfg.model.model_params)
    print(f"\nInstantiate output adapters")
    output_adapters = instantiate_output_adapters(cfg, **cfg.model.model_params)

    print(f"\n{' Instantiate MultiMAE3D '}")
    return MultiMAE3D(
        input_adapters=input_adapters,
        output_adapters=output_adapters,
        img_size=img_size,
        patch_size=patch_size,
        embed_dim=embed_dim,
        mask_ratio=mask_ratio,
        depth=depth,
        num_heads=num_heads,
        num_global_tokens=num_global_tokens,
        pos_embed_type=encoder_pos_embed_type,
        use_seg_masking=use_seg_masking, 
        use_dirichlet=use_dirichlet,
        dirichlet_alpha=dirichlet_alpha,
        leave_one_out=leave_one_out,
        global_img_size=global_img_size, 
        apply_pos_embed_to_context=apply_pos_embed_to_context, 
    )


def instantiate_input_adapters(
    cfg: OmegaConf,
    img_size: Union[int, Tuple[int, int, int]],
    patch_size: Union[int, Tuple[int, int, int]],
    in_channels: int = 1,
    input_tasks: List[str] = None,
    embed_dim: int = 768,
    input_has_learnable_embed: bool = False,
    **kwawrgs,
):
    input_tasks = (
        input_tasks if input_tasks is not None else cfg.datasets.sequences_to_use
    )
    training_type = get_training_type(cfg)

    input_adapters = []
    for task in input_tasks:
        task_in_channels = (
            cfg.model.model_params.num_seg_classes
            if (task == "seg")
            and (
                ("multilabel" in training_type)
                or cfg.model.model_params.get("multilabel", False)
            )
            else in_channels
        )
        adapter = PatchedInputAdapter(
            task_in_channels,
            img_size,
            patch_size,
            embed_dim=embed_dim,
            has_learnable_embed=input_has_learnable_embed,
        )
        input_adapters.append((task, adapter))
    return OrderedDict(input_adapters)


def instantiate_output_adapters(
    cfg: OmegaConf,
    img_size: Union[int, Tuple[int, int, int]],
    patch_size: Union[int, Tuple[int, int, int]],
    out_channels: int = 1,
    output_tasks: List[str] = None,
    decoder_embed_dim: int = 256,
    num_seg_classes: int = 3,
    decoder_pos_embed_type: str = None,
    decoder_depth: int = 3,
    decoeder_num_heads: int = 4,
    decoder_num_learnable_tokens: bool = 0,
    decoder_has_learnable_embed: bool = False,
    decoder_reconstruct_rate: float = 0.6,
    decoder_reconstruct_all_testing: bool = True,
    attn_type: str = None,
    seg_output_adapter_type: str = "spatial",
    output_adapter_type: str = "spatial",
    segmenter_patch_div: int = 1,
    classifier: Optional[str] = None,
    num_classes: Optional[str] = None,
    unetr_use_input: bool = False,
    setr_interpolation_mode: str = "nearest",
    setr_embed_dim: int = None,
    setr_version: int = 1,
    **kwargs,
):

    training_type = get_training_type(cfg)
    adapter_opts = {
        "img_size": img_size,
        "patch_size": patch_size,
        "embed_dim": decoder_embed_dim,
        "num_heads": decoeder_num_heads,
        "depth": decoder_depth,
        "num_learnable_tokens": decoder_num_learnable_tokens,
        "has_learnable_embed": decoder_has_learnable_embed,
        # 'use_x_attn_block': use_x_attn_block,
        "attn_type": attn_type,
        "pos_embed_type": (
            decoder_pos_embed_type
            if decoder_pos_embed_type is not None
            else pos_embed_type
        ),
        "reconstruct_rate": decoder_reconstruct_rate,
        "reconstruct_all_testing": decoder_reconstruct_all_testing,
        "segmenter_patch_div": segmenter_patch_div,
        "unetr_use_input": unetr_use_input,
        "setr_interpolation_mode": setr_interpolation_mode,
        "setr_embed_dim": setr_embed_dim,
        "setr_version": setr_version,
        "num_classes": num_classes,
    }

    # for backwards compatibility
    ADAPTERS = {
        "spatial": SpatialOutputAdapter,
        "segmenter": SegmenterOutputAdapter,
        "setr": SETROutputAdapter,
        "unetr": UNETROutputAdapter,
        "conv_spatial": ConvSpatialOutputAdapter,
        "patch_conv_spatial": PatchConvSpatialOutputAdapter,
        "linear": LinearOutputAdapter,
    }

    output_tasks = (
        output_tasks if output_tasks is not None else cfg.datasets.sequences_to_use
    )
    output_adapters = []

    task_opts = {}
    if isinstance(output_tasks[0], str):
        output_tasks = [
            (
                task,
                ADAPTERS[
                    seg_output_adapter_type if task == "seg" else output_adapter_type
                ],
            )
            for task in output_tasks
        ]
    else:
        output_tasks = resolve_listconfig_of_dicts(output_tasks)
        for task, value in output_tasks.items():
            if isinstance(value, ListConfig):
                value = resolve_listconfig_of_dicts(value)
                task_adapter = value["type"]
                task_opts[task] = {k: v for k, v in value.items() if k != "type"}
            else:
                task_adapter = value
                task_opts = {}
            output_tasks[task] = task_adapter
        output_tasks = [
            (task, getattr(multimae3d_adapters, task_adapter))
            for task, task_adapter in output_tasks.items()
        ]

    for task, adapter in output_tasks:
        print(f"- Initialize {task} output adapter:")
        print(f"\t- type: {adapter}")
        print(f"\t- opts: {task_opts.get(task, {})}")
        task_out_channels = num_seg_classes if task == "seg" else out_channels
        task_in_channels = (
            num_seg_classes if "multilabel" in training_type and task == "seg" else 1
        )
        opts = task_opts.get(task, {})
        for key, value in adapter_opts.items():
            if key not in opts:
                opts[key] = value
        if "in_channels" not in opts:
            opts["in_channels"] = task_in_channels
        if "out_channels" not in opts:
            opts["out_channels"] = task_out_channels

        print(f"\t- expanded opts: {opts=}")

        adapter = adapter(**opts)
        output_adapters.append((task, adapter))

    if classifier == "linear":
        print(f"classifier")
        assert (
            num_classes is not None
        ), "classification adapter requires number of classes specified"
        adapter = LinearOutputAdapter(
            num_classes=num_classes,
        )
        output_adapters.append(("label", adapter))
    print(f"\nOutput Adapters: {[adapter[0] for adapter in output_adapters]}")

    return OrderedDict(output_adapters)
