import logging

logger = logging.getLogger(__name__)

from typing import Union, List, Tuple, Dict, Optional, Literal

import torch
import torch.nn as nn
from timm.layers import trunc_normal_
from timm.layers.helpers import to_3tuple
from timm.models.vision_transformer import Block
from einops import rearrange
from functools import partial
import math

# from pytorch_lightning.utilities.model_summary import LayerSummary
from models.multimae3d_utils import (
    calc_patchified_dim,
    patchify,
    unpatchify,
    shuffle_patches,
    unshuffle_patches,
    build_3d_sincos_position_embedding,
    build_perceptron_position_embedding,
    interpolate_pos_embed,
    CrossAttention,
    AttentionBlock,
    SpecificLayerNorm,
    FullAttention,
    pop_range,
    get_batch_pos_embed,
    build_position_embedding,
)
import models.multimae3d_unetr as UNETR


# =============================================================================
# Input and output adapters
class PatchedInputAdapter(nn.Module):
    def __init__(
        self,
        in_channels = 1,
        img_size = (160, 176, 144),
        patch_size = 16,
        embed_dim = None,
        has_learnable_embed = False,
        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)
        self.in_channels = in_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.embed_dim = embed_dim
        self.has_learnable_embed = has_learnable_embed

        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )

        if self.embed_dim is not None:
            self.init(embed_dim=embed_dim)

    def init(self, embed_dim: int, *args, **kwarks):
        # init is called by the MultiMAE model to initialize the enbedding sizes
        # This makes initialization of the model easier
        self.embed_dim = embed_dim

        if self.has_learnable_embed:
            self.learnable_embed = nn.Parameter(torch.randn(1, 1, embed_dim))

        # project patches to embedding dim
        # acts as a linear layer for each patch
        self.proj = torch.nn.Conv3d(
            self.in_channels,
            embed_dim,
            kernel_size=self.patch_size,
            stride=self.patch_size,
        )

    def forward(self, x):
        b = x.shape[0]
        x = rearrange(x, "b t c x y z -> (b t) c x y z")
        x = self.proj(x)
        x = rearrange(x, "(b t) c x y z -> b t c x y z", b=b)
        x = x.squeeze()
        if self.has_learnable_embed:
            x = x + self.learnable_embed
        return x


class SpatialOutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels = 1,
        img_size = (224, 224, 160),
        patch_size = (16, 16, 16),
        enc_embed_dim = None,
        embed_dim = 768,
        num_heads = 8,  # as MultiMAE
        depth = 2,  # as MultiMAE
        mlp_ratio = 4.0,
        qkv_bias = True,  # as MultiMAE
        drop_path_rate = 0.0,  # as MultiMAE
        attn_drop_rate = 0.0,  # as MultiMAE
        norm_layer = partial(nn.LayerNorm, eps=1e-6),
        use_x_attn_block = False,
        attn_type = None,
        num_learnable_tokens = 0,  # MultiMAE has learnable_pos_emb not learnable_tokens, but I first test if we can use same pos embedding for all modalities
        has_learnable_embed = False,
        pos_embed_type = "perceptron",
        reconstruct_rate = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing = True,  # if True, all tokens are reconstructed during testing
        global_img_size = None,
        *args,
        **kwargs,
    ):
        # super().__init__(*args, **kwargs)
        super().__init__()

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.depth = depth
        self.mlp_ratio = mlp_ratio  # can be removed here
        self.qkv_bias = qkv_bias  # can be removed here
        self.drop_path_rate = drop_path_rate  # can be removed here
        self.attn_drop_rate = attn_drop_rate  # can be removed here
        self.norm_layer = norm_layer  # can be removed here
        self.pos_embed_type = pos_embed_type
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        # if attn_type is specified, overwrite use_x_attn_block
        # this way we retain backwards compatibility while allowing for new features.
        # At the moment, attn_type can be "default", "cross" or "full"
        self.use_x_attn_block = (
            use_x_attn_block if attn_type is None else not (attn_type == "default")
        )
        self.attn_type = attn_type
        self.num_learnable_tokens = num_learnable_tokens
        self.has_learnable_embed = has_learnable_embed
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.global_img_size = global_img_size
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.visualizable = True

        # =====================================================================
        # like cls_token
        if self.num_learnable_tokens > 0:
            self.learnable_tokens = nn.Parameter(
                torch.randn(1, self.num_learnable_tokens, embed_dim)
            )

        # embedding that is added to each token for this task
        # self.init_pos_embed()
        if self.has_learnable_embed:
            self.learnable_embed = nn.Parameter(torch.randn(1, 1, embed_dim))

        self.mask_token = nn.Parameter(torch.randn(1, 1, embed_dim))
        torch.nn.init.normal_(self.mask_token, std=0.02)  # as MAE

        # =====================================================================
        # initialize main part of model here
        self.init_transformer()

        # number of values in each patch
        self.dim_patch = (
            self.patch_size[0]
            * self.patch_size[1]
            * self.patch_size[2]
            * self.out_channels
        )
        self.out_proj = nn.Linear(embed_dim, self.dim_patch)

        if self.enc_embed_dim is not None:
            self.init(enc_embed_dim=enc_embed_dim)

    def init_pos_embed(self):
        # positional embedding
        self.pos_embed, self.global_grid_size = build_position_embedding(
            num_patches=self.num_patches,
            embed_dim=self.embed_dim,
            pos_embed_type=self.pos_embed_type,
            patchified_dim=self.patchified_dim,
            patch_size=self.patch_size,
            global_img_size=self.global_img_size,
        )

    def init_transformer(self):
        if self.use_x_attn_block:
            logging.info("Using CrossAttentionBlock")
            # my contribution, use CrossAttention for every Attention step
            # in decoder
            if self.depth > 0:
                dpr = [
                    x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)
                ]
                self.blocks = nn.ModuleList(
                    [
                        AttentionBlock(
                            dim=self.embed_dim,
                            num_heads=self.num_heads,
                            mlp_ratio=self.mlp_ratio,
                            qkv_bias=self.qkv_bias,
                            attn_drop=self.attn_drop_rate,
                            drop_path=dpr[i],
                            act_layer=nn.GELU,  # as MultiMAE
                            norm_layer=self.norm_layer,  # as MultiMAE
                            attn_type=self.attn_type,
                        )
                        for i in range(self.depth)
                    ]
                )
            else:
                self.blocks = nn.Identity()
        else:
            logging.info("Using normal MultiMAE decoder architecture")
            # MultiMAE version, uses CrossAttention followed by normal transformer blocks
            self.xattn = CrossAttention(
                dim=self.embed_dim,
                num_heads=self.num_heads,
                qkv_bias=self.qkv_bias,
                attn_drop=self.attn_drop_rate,
                # proj_drop=drop_rate ?
            )
            self.context_norm = self.norm_layer(self.embed_dim)
            self.query_norm = self.norm_layer(self.embed_dim)
            self.out_norm = self.norm_layer(self.embed_dim)

            mlp_hidden_dim = int(self.embed_dim * self.mlp_ratio)
            self.mlp = nn.Sequential(
                nn.Linear(self.embed_dim, mlp_hidden_dim),
                nn.GELU(),
                nn.Linear(mlp_hidden_dim, self.embed_dim),
            )

            if self.depth > 0:
                dpr = [
                    x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)
                ]
                self.blocks = nn.Sequential(
                    *[
                        Block(
                            dim=self.embed_dim,
                            num_heads=self.num_heads,
                            mlp_ratio=self.mlp_ratio,
                            qkv_bias=self.qkv_bias,
                            attn_drop=self.attn_drop_rate,
                            drop_path=dpr[i],
                            act_layer=nn.GELU,  # as MultiMAE
                            norm_layer=self.norm_layer,  # as MultiMAE
                        )
                        for i in range(self.depth)
                    ]
                )
            else:
                self.blocks = nn.Identity()

    def init(
        self,
        enc_embed_dim: int,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwarks,
    ):
        if self.global_img_size is None:
            self.global_img_size = global_img_size
        self.init_pos_embed()
        self.enc_embed_dim = enc_embed_dim
        self.proj_context = nn.Linear(enc_embed_dim, self.embed_dim)

    @staticmethod
    def apply_pos_embed_to_query(
        query, batch_pos_embed, perm_idx: Optional[torch.Tensor]
    ):
        batch_size = query.shape[0]
        if perm_idx is None:
            query += batch_pos_embed
        else:
            query += batch_pos_embed[torch.arange(batch_size)[:, None], perm_idx, ...]
        return query

    @staticmethod
    def apply_pos_embed_to_context(context, batch_pos_embed, perm_indices, task_ranges):
        batch_size = context.shape[0]
        new_context = []
        for task, (start, end) in task_ranges.items():
            task_context_num_tokens = context[:, start:end].shape[1]
            if task in perm_indices:
                task_perm_idx = perm_indices[task]
                task_batch_pos_embed = batch_pos_embed[
                    torch.arange(batch_size)[:, None], task_perm_idx, ...
                ]
            else:
                task_batch_pos_embed = batch_pos_embed
            selected_task_batch_pos_embed = task_batch_pos_embed[
                :, :task_context_num_tokens
            ]
            new_context.append(context[:, start:end] + selected_task_batch_pos_embed)
        new_context = torch.concat(new_context, dim=1)
        
        cls_dims = context.shape[0] - new_context.shape[0]
        new_context = torch.concat([context[:, :cls_dims], new_context], dim=1)
        return new_context

    def _process_tokens(
        self,
        encoder_tokens,
        task_range: Optional[Union[Dict[str, Tuple[int, int]], Tuple[int, int]]],
        perm_idx: Optional[Union[Dict[str, torch.Tensor], torch.Tensor]],
        patchified_dim: Tuple[int, int, int] = None,
        return_all_layers: bool = False,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
    ):
        # if SpatialOutputAdapter receives a list of encoder tokens (for each
        # encoder block one), it uses the last layer
        if isinstance(encoder_tokens, list):
            encoder_tokens = encoder_tokens[-1]

        # handle task_range
        if isinstance(task_range, tuple):
            query_task_range = task_range
            context_task_range = None
        elif isinstance(task_range, dict):
            query_task_range = task_range.get(adapter_task, None)
            context_task_range = task_range
        
        # handle perm_idx
        if isinstance(perm_idx, torch.Tensor):
            query_perm_idx = perm_idx
            context_perm_idx = None
        elif isinstance(task_range, dict):
            query_perm_idx = perm_idx.get(adapter_task, None)
            context_perm_idx = perm_idx

        # if query_task_range is not None -> Task is part of input tasks
        # Compute the number of tokens that is passed from the input
        # Can be 0 (i.e. fully masked but part of input tasks)
        num_selected_tokens = (
            query_task_range[1] - query_task_range[0]
            if query_task_range is not None
            else 0
        )

        # if patchified_dim is specified, could be a different size than
        # learned size -> needs to recompute number of patches for the 
        # different sized input.
        # Otherwise use self.num_patches
        num_patches = (
            self.num_patches
            if patchified_dim is None
            else patchified_dim[0] * patchified_dim[1] * patchified_dim[2]
        )

        # Compute number of masked_tokens
        # reconstruct_all_testing is used to reduce the number of tokens 
        # during training to enable larger batches. 
        # This was just experimental and we did not follow this idea further. 
        num_masked_tokens = num_patches - num_selected_tokens  # put select ratio here
        if not self.training and self.reconstruct_all_testing:
            reconstruct_rate = 1.0
        else:
            reconstruct_rate = self.reconstruct_rate
        num_masked_tokens = int(num_masked_tokens * reconstruct_rate)

        batch_size = encoder_tokens.shape[0]

        # project encoder tokens to embedding dim of this task
        context_tokens = self.proj_context(encoder_tokens)

        # repeat mask token for each masked token from the input.
        # If this task is not part of the encoder tokens or is 100% masked, all 
        # tokens will be masked
        masked_tokens = self.mask_token.repeat(batch_size, num_masked_tokens, 1)

        # generate the fitting positional embedding for the input size and repeat it
        # for the entire batch. 
        batch_pos_embed, patchified_dim = get_batch_pos_embed(
            batch=batch,
            pos_embed=self.pos_embed,
            pos_embed_type=self.pos_embed_type,
            input_patchified_dim=(
                self.patchified_dim if patchified_dim is None else patchified_dim
            ),
            global_grid_size=self.global_grid_size,
            patch_size=self.patch_size,
            batch_size=batch_size,
        )
        
        # The query is the combination of the selected tokens corresponding to this
        # adapters task, and concatinated with masked_tokens for all the input patches 
        # that have been masked.
        # If query_task_range is not None, it means that this adapter task is part
        # of the input (even if it is 100% masked), which means that it has a 
        # permutation.
        if query_task_range is not None:
            if self.attn_type == "full":
                queries, context_tokens = pop_range(context_tokens, query_task_range)
            else:
                queries = context_tokens[:, query_task_range[0] : query_task_range[1]]
            queries = torch.cat([queries, masked_tokens], dim=1)
        else:
            queries = masked_tokens

        # add positional embedding to queries
        queries = self.apply_pos_embed_to_query(
            queries, batch_pos_embed, query_perm_idx
        )
        
        # if task_range is not just a tuple but a dictionary of tuples, then we apply 
        # positional embeddings to all tasks within context, otherwise we skip this step.
        if context_task_range is not None:
            context_tokens = self.apply_pos_embed_to_context(
                context_tokens, batch_pos_embed, context_perm_idx, context_task_range
            )

        # add learnable tokens for this task
        if self.num_learnable_tokens > 0:
            learnable_tokens = self.learnable_tokens.repeat(batch_size, 1, 1)
            queries = torch.cat([learnable_tokens, queries], dim=1)

        # add learnable embedding to queries
        if self.has_learnable_embed:
            queries = queries + self.learnable_embed

        all_tokens = []
        if self.depth > 0:
            if self.use_x_attn_block:
                # uses cross attention blocks instead of single cross attention
                # followed by normal transformer blocks
                x = queries
                for block in self.blocks:
                    x = block(x, context_tokens)
                    if return_all_layers:
                        all_tokens.append(x)
            else:
                x = self.xattn(
                    self.query_norm(queries), self.context_norm(context_tokens)
                )
                x = x + self.mlp(self.out_norm(x))

                for block in self.blocks:
                    x = block(x)
                    if return_all_layers:
                        all_tokens.append(x)
            return all_tokens if return_all_layers else x
        else:
            return [queries] if return_all_layers else queries

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):
        x = self._process_tokens(
            encoder_tokens,
            task_range,
            perm_idx,
            patchified_dim,
            batch=batch,
            adapter_task=adapter_task,
        )

        # remove learnable tokens
        if self.num_learnable_tokens > 0:
            learnable_tokens = x[:, : self.num_learnable_tokens, ...]
            x = x[:, self.num_learnable_tokens :, ...]

        # project token to number of values in one patch
        x = self.out_proj(x)

        # rearrange projected tokens to patch shape
        x = rearrange(
            x,
            "b t (c x y z) -> b t c x y z",
            x=self.patch_size[0],
            y=self.patch_size[1],
            z=self.patch_size[2],
        )

        return x


class SegmenterOutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels: int = 2,
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        enc_embed_dim: Optional[int] = None,
        embed_dim: int = 768,
        num_heads: int = 12,
        depth: int = 2,  # as MultiMAE
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,  # as MultiMAE
        attn_drop_rate: float = 0.0,  # as MultiMAE
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        attn_type: str = None,
        num_learnable_tokens: int = 0,
        has_learnable_embed: bool = False,
        pos_embed_type: str = "perceptron",
        reconstruct_rate: float = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing: bool = True,  # if True, all tokens are reconstructed during testing
        segmenter_patch_div: int = 1,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwargs,
    ):
        super().__init__()

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.depth = depth
        self.pos_embed_type = pos_embed_type
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        # self.use_x_attn_block = use_x_attn_block
        self.attn_type = attn_type
        self.num_learnable_tokens = num_learnable_tokens
        self.has_learnable_embed = has_learnable_embed
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.visualizable = True
        self.segmenter_patch_div = segmenter_patch_div
        self.scale = embed_dim**-0.5

        # =====================================================================
        self.patch_proj = nn.Parameter(
            self.scale
            * torch.randn(self.embed_dim, self.embed_dim * segmenter_patch_div**3)
        )
        self.classes_proj = nn.Parameter(
            self.scale * torch.randn(self.embed_dim, self.embed_dim)
        )

        self.mask_norm = norm_layer(out_channels)

        self.spatial_output_adapter = SpatialOutputAdapter(
            out_channels=out_channels,
            img_size=img_size,
            patch_size=patch_size,
            enc_embed_dim=enc_embed_dim,
            embed_dim=embed_dim,
            num_heads=num_heads,
            depth=depth,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            drop_path_rate=drop_path_rate,
            attn_drop_rate=attn_drop_rate,
            norm_layer=norm_layer,
            # use_x_attn_block=use_x_attn_block,
            attn_type=attn_type,
            num_learnable_tokens=out_channels,
            has_learnable_embed=has_learnable_embed,
            pos_embed_type=pos_embed_type,
            reconstruct_rate=reconstruct_rate,
            reconstruct_all_testing=reconstruct_all_testing,
            global_img_size=global_img_size,
        )

    def init(
        self,
        enc_embed_dim: int,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwarks,
    ):
        self.enc_embed_dim = enc_embed_dim
        self.spatial_output_adapter.init(
            enc_embed_dim=enc_embed_dim, global_img_size=global_img_size
        )

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):

        x = self.spatial_output_adapter._process_tokens(
            encoder_tokens,
            task_range,
            perm_idx,
            patchified_dim,
            batch=batch,
            adapter_task=adapter_task,
        )

        if not self.training and self.reconstruct_all_testing:
            reconstruct_rate = 1.0
        else:
            reconstruct_rate = self.reconstruct_rate

        num_patches = (
            self.num_patches
            if patchified_dim is None
            else patchified_dim[0] * patchified_dim[1] * patchified_dim[2]
        )

        batch_size = x.shape[0]

        # split cls token from other tokens
        cls_seg_feat = x[:, : self.out_channels, ...]
        patches = x[:, self.out_channels :, ...]

        patches = patches @ self.patch_proj
        cls_seg_feat = cls_seg_feat @ self.classes_proj

        # subdivide patches into 2**3 subpatches
        patches = rearrange(
            patches,
            "b t (s c) -> b (t s) c",
            c=self.embed_dim,
            s=self.segmenter_patch_div**3,
        )

        # normalize patches cls segmentation features
        patches = patches / patches.norm(dim=-1, keepdim=True)
        cls_seg_feat = cls_seg_feat / cls_seg_feat.norm(dim=-1, keepdim=True)

        # compute segmentation masks
        masks = patches @ cls_seg_feat.transpose(1, 2)
        masks = self.mask_norm(masks)

        # to improve compute, only reconstruct a reduced amount of tokens
        # does not yet support sub_patches
        if reconstruct_rate < 1.0:
            if self.segmenter_patch_div > 1:
                raise NotImplementedError
            zero_mask = torch.zeros(batch_size, 1, self.num_classes)
            zero_mask[:, :, 0] = 1
            num_pred_tokens = masks.shape[1]
            num_missing_tokens = num_patches - num_pred_tokens
            zero_masks = zero_mask.repeat(1, num_missing_tokens, 1)
            masks = torch.cat([masks, zero_masks], dim=1)

        new_img_size = [d * self.segmenter_patch_div for d in self.patchified_dim]
        masks = rearrange(
            masks,
            "b (t x y z) c -> b t c x y z",
            x=self.segmenter_patch_div,
            y=self.segmenter_patch_div,
            z=self.segmenter_patch_div,
        )

        if perm_idx is not None:
            masks = unshuffle_patches(
                masks, perm_idx, patch_size=self.segmenter_patch_div
            )

        masks = unpatchify(
            masks, img_size=new_img_size, patch_size=self.segmenter_patch_div
        )

        masks = torch.functional.F.interpolate(
            masks, size=self.img_size, mode="trilinear"
        )
        masks = patchify(masks, patch_size=self.patch_size)
        if perm_idx is not None:
            masks, _ = shuffle_patches(
                masks, patch_size=self.patch_size, permutations=perm_idx
            )

        return masks


# SETROutputAdapter:
class SETROutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels: int = 2,
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        enc_embed_dim: Optional[int] = None,
        embed_dim: int = 768,
        setr_embed_dim: int = None,
        num_heads: int = 8,  # as MultiMAE
        depth: int = 2,  # as MultiMAE
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,  # as MultiMAE
        attn_drop_rate: float = 0.0,  # as MultiMAE
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        attn_type: str = None,
        num_learnable_tokens: int = 0,
        has_learnable_embed: bool = False,
        pos_embed_type: str = "perceptron",
        reconstruct_rate: float = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing: bool = True,  # if True, all tokens are reconstructed during testing
        setr_interpolation_mode: str = "nearest",
        setr_version: int = 1,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwargs,
    ):
        super().__init__()

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.setr_embed_dim = setr_embed_dim
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.depth = depth
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        self.pos_embed_type = pos_embed_type
        self.attn_type = attn_type
        self.num_learnable_tokens = num_learnable_tokens
        self.has_learnable_embed = has_learnable_embed
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.visualizable = True
        self.setr_interpolation_mode = setr_interpolation_mode
        self.setr_version = setr_version

        self.spatial_output_adapter = SpatialOutputAdapter(
            out_channels=out_channels,
            img_size=img_size,
            patch_size=patch_size,
            enc_embed_dim=enc_embed_dim,
            embed_dim=embed_dim,
            num_heads=num_heads,
            depth=depth,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            drop_path_rate=drop_path_rate,
            attn_drop_rate=attn_drop_rate,
            norm_layer=norm_layer,
            # use_x_attn_block=use_x_attn_block,
            attn_type=attn_type,
            num_learnable_tokens=num_learnable_tokens,
            has_learnable_embed=has_learnable_embed,
            pos_embed_type=pos_embed_type,
            reconstruct_rate=reconstruct_rate,
            reconstruct_all_testing=reconstruct_all_testing,
            global_img_size=global_img_size,
        )

    def _init_decoder(self):
        input_module = torch.nn.Sequential(
            torch.nn.Conv3d(
                in_channels=self.embed_dim,
                out_channels=self.setr_embed_dim,
                kernel_size=3,
                padding=1,
            ),
            SpecificLayerNorm(dim=1, normalized_shape=self.setr_embed_dim),
            torch.nn.ReLU(),
        )

        if isinstance(self.patch_size, tuple) or isinstance(self.patch_size, list):
            if all([s == self.patch_size[0] for s in self.patch_size]):
                n_upsamples = int(math.log2(self.patch_size[0]))
            else:
                raise NotImplementedError
        elif isinstance(self.patch_size, int):
            n_upsamples = int(math.log2(self.patch_size))
        else:
            raise NotImplementedError

        # intermediate modules
        intermediate_modules = [
            torch.nn.Sequential(
                (
                    torch.nn.Conv3d(
                        in_channels=self.setr_embed_dim,
                        out_channels=self.setr_embed_dim,
                        kernel_size=3,
                        padding=1,
                    )
                    if self.setr_version == 1
                    else torch.nn.Conv3d(
                        in_channels=self.setr_embed_dim // 2**i,
                        out_channels=self.setr_embed_dim // 2 ** (i + 1),
                        kernel_size=3,
                        padding=1,
                    )
                ),
                (
                    SpecificLayerNorm(dim=1, normalized_shape=self.setr_embed_dim)
                    if self.setr_version == 1
                    else SpecificLayerNorm(
                        dim=1, normalized_shape=self.setr_embed_dim // 2 ** (i + 1)
                    )
                ),
                torch.nn.ReLU(),
            )
            for i in range(n_upsamples - 2)
        ]

        output_module = torch.nn.Sequential(
            (
                torch.nn.Conv3d(
                    in_channels=self.setr_embed_dim,
                    out_channels=self.setr_embed_dim,
                    kernel_size=3,
                    padding=1,
                )
                if self.setr_version == 1
                else torch.nn.Conv3d(
                    in_channels=self.setr_embed_dim // 2 ** (n_upsamples - 2),
                    out_channels=self.setr_embed_dim // 2 ** (n_upsamples - 1),
                    kernel_size=3,
                    padding=1,
                )
            ),
            (
                SpecificLayerNorm(dim=1, normalized_shape=self.setr_embed_dim)
                if self.setr_version == 1
                else SpecificLayerNorm(
                    dim=1,
                    normalized_shape=self.setr_embed_dim // 2 ** (n_upsamples - 1),
                )
            ),
            torch.nn.ReLU(),
            (
                torch.nn.Conv3d(
                    in_channels=self.setr_embed_dim,
                    out_channels=self.out_channels,
                    kernel_size=3,
                    padding=1,
                )
                if self.setr_version == 1
                else torch.nn.Conv3d(
                    in_channels=self.setr_embed_dim // 2 ** (n_upsamples - 1),
                    out_channels=self.out_channels,
                    kernel_size=3,
                    padding=1,
                )
            ),
        )

        self.setr_decoder = torch.nn.ModuleList(
            [input_module, *intermediate_modules, output_module]
        )

    def init(
        self,
        enc_embed_dim: int,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwarks,
    ):
        self.enc_embed_dim = enc_embed_dim
        self.setr_embed_dim = (
            self.embed_dim if self.setr_embed_dim is None else self.setr_embed_dim
        )
        self._init_decoder()
        self.spatial_output_adapter.init(enc_embed_dim, global_img_size=global_img_size)

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):
        patchified_dim = (
            self.patchified_dim if patchified_dim is None else patchified_dim
        )

        x = self.spatial_output_adapter._process_tokens(
            encoder_tokens,
            task_range,
            perm_idx,
            patchified_dim,
            batch=batch,
            adapter_task=adapter_task,
        )

        x = x.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
        if perm_idx is not None:
            x = unshuffle_patches(x, perm_idx, patch_size=1)
        x = unpatchify(x, patchified_dim, patch_size=1)

        for module in self.setr_decoder:
            x = module(x)
            x = torch.functional.F.interpolate(
                x, scale_factor=2, mode=self.setr_interpolation_mode
            )

        x = patchify(x, self.patch_size)
        if perm_idx is not None:
            x, _ = shuffle_patches(x, patch_size=self.patch_size, permutations=perm_idx)

        return x


class ConvSpatialOutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels: int = 1,
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        enc_embed_dim: Optional[int] = None,
        embed_dim: int = 768,
        conv_channels: int = 16,
        num_heads: int = 8,  # as MultiMAE
        depth: int = 2,  # as MultiMAE
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,  # as MultiMAE
        attn_drop_rate: float = 0.0,  # as MultiMAE
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        attn_type: str = None,
        num_learnable_tokens: int = 0,
        has_learnable_embed: bool = False,
        pos_embed_type: str = "perceptron",
        reconstruct_rate: float = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing: bool = True,  # if True, all tokens are reconstructed during testing
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwargs,
    ):
        super().__init__()

        if reconstruct_rate < 1.0:
            raise NotImplementedError

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.embed_dim = embed_dim
        self.conv_channels = conv_channels
        self.num_heads = num_heads
        self.depth = depth
        self.pos_embed_type = pos_embed_type
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        self.attn_type = attn_type
        self.num_learnable_tokens = num_learnable_tokens
        self.has_learnable_embed = has_learnable_embed
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.visualizable = True

        self.spatial_output_adapter = SpatialOutputAdapter(
            out_channels=out_channels,
            img_size=img_size,
            patch_size=patch_size,
            enc_embed_dim=enc_embed_dim,
            embed_dim=embed_dim,
            num_heads=num_heads,
            depth=depth,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            drop_path_rate=drop_path_rate,
            attn_drop_rate=attn_drop_rate,
            norm_layer=norm_layer,
            attn_type=attn_type,
            num_learnable_tokens=num_learnable_tokens,
            has_learnable_embed=has_learnable_embed,
            pos_embed_type=pos_embed_type,
            reconstruct_rate=reconstruct_rate,
            reconstruct_all_testing=reconstruct_all_testing,
            global_img_size=global_img_size,
        )

        self.out_conv = torch.nn.Sequential(
            torch.nn.Conv3d(
                in_channels=out_channels,
                out_channels=conv_channels,
                kernel_size=3,
                padding=1,
            ),
            torch.nn.LayerNorm([conv_channels, *self.img_size]),
            torch.nn.ReLU(),
            torch.nn.Conv3d(
                in_channels=conv_channels,
                out_channels=out_channels,
                kernel_size=3,
                padding=1,
            ),
        )

    def init(
        self,
        enc_embed_dim: int,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwarks,
    ):
        self.enc_embed_dim = enc_embed_dim
        self.spatial_output_adapter.init(enc_embed_dim, global_img_size=global_img_size)

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):
        x = self.spatial_output_adapter.forward(
            encoder_tokens, task_range, perm_idx, patchified_dim, batch=batch
        )
        x = unshuffle_patches(x, perm_idx, self.patch_size)
        x = unpatchify(x, self.img_size, self.patch_size)
        x = self.out_conv(x)
        x = patchify(x, patch_size=self.patch_size)
        x, _ = shuffle_patches(x, patch_size=self.patch_size, permutations=perm_idx)

        return x


class PatchConvSpatialOutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels: int = 1,
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        enc_embed_dim: Optional[int] = None,
        embed_dim: int = 768,
        conv_channels: int = 16,
        num_heads: int = 8,  # as MultiMAE
        depth: int = 2,  # as MultiMAE
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,  # as MultiMAE
        attn_drop_rate: float = 0.0,  # as MultiMAE
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        attn_type: str = None,
        num_learnable_tokens: int = 0,
        has_learnable_embed: bool = False,
        pos_embed_type: str = "perceptron",
        reconstruct_rate: float = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing: bool = True,  # if True, all tokens are reconstructed during testing
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwargs,
    ):
        super().__init__()

        if reconstruct_rate < 1.0:
            raise NotImplementedError

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.embed_dim = embed_dim
        self.conv_channels = conv_channels
        self.num_heads = num_heads
        self.depth = depth
        self.pos_embed_type = pos_embed_type
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        self.attn_type = attn_type
        self.num_learnable_tokens = num_learnable_tokens
        self.has_learnable_embed = has_learnable_embed
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.visualizable = True

        self.spatial_output_adapter = SpatialOutputAdapter(
            out_channels=out_channels,
            img_size=img_size,
            patch_size=patch_size,
            enc_embed_dim=enc_embed_dim,
            embed_dim=embed_dim,
            num_heads=num_heads,
            depth=depth,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            drop_path_rate=drop_path_rate,
            attn_drop_rate=attn_drop_rate,
            norm_layer=norm_layer,
            # use_x_attn_block=use_x_attn_block,
            attn_type=attn_type,
            num_learnable_tokens=num_learnable_tokens,
            has_learnable_embed=has_learnable_embed,
            pos_embed_type=pos_embed_type,
            reconstruct_rate=reconstruct_rate,
            reconstruct_all_testing=reconstruct_all_testing,
            global_img_size=global_img_size,
        )

        self.out_conv = torch.nn.Sequential(
            torch.nn.ConvTranspose3d(
                in_channels=embed_dim,
                out_channels=conv_channels,
                kernel_size=self.patch_size,
                padding=0,
            ),
            torch.nn.LayerNorm(self.patch_size),
            torch.nn.ReLU(),
            torch.nn.Conv3d(
                in_channels=conv_channels,
                out_channels=out_channels,
                kernel_size=3,
                padding=1,
            ),
        )

    def init(
        self,
        enc_embed_dim: int,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwarks,
    ):
        self.enc_embed_dim = enc_embed_dim
        self.spatial_output_adapter.init(enc_embed_dim, global_img_size=global_img_size)

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):
        print(f"PatchConvSpatialAdapter start forward")
        x = self.spatial_output_adapter._process_tokens(
            encoder_tokens,
            task_range,
            perm_idx,
            patchified_dim,
            batch=batch,
            adapter_task=adapter_task,
        )

        print(f"PatchConvSpatialAdapter after _process_tokens")
        # ConvTranspose3d can only take tensors with 4 or 5 dimensions, not 6
        # therefore we treat each combination of batch and token as its own
        # batch.
        batch_size = x.shape[0]

        x = rearrange(x, "b t c -> (b t) c")
        print(f"PatchConvSpatialAdapter after rearrange to '(b t) c'")
        x = x.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
        print(f"PatchConvSpatialAdapter after unsqueeze")
        x = self.out_conv(x)
        print(f"PatchConvSpatialAdapter after out_conv")
        x = rearrange(x, "(b t) c x y z-> b t c x y z", b=batch_size)
        print(f"PatchConvSpatialAdapter after rarrange back")

        return x


class UNETROutputAdapter(nn.Module):
    def __init__(
        self,
        out_channels: int = 2,
        img_size: Union[int, Tuple[int, int, int]] = (224, 224, 160),
        patch_size: Union[int, Tuple[int, int, int]] = (16, 16, 16),
        enc_embed_dim: Optional[int] = None,
        enc_depth: Optional[int] = None,
        embed_dim: int = 768,
        num_heads: int = 8,  # as MultiMAE
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,  # as MultiMAE
        attn_drop_rate: float = 0.0,  # as MultiMAE
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        attn_type: str = None,
        num_learnable_tokens: int = 0,
        has_learnable_embed: bool = False,
        pos_embed_type: str = "perceptron",
        reconstruct_rate: float = 1.0,  # among the masked tokens, how many are reconstructed
        reconstruct_all_testing: bool = True,  # if True, all tokens are reconstructed during testing
        unetr_use_input: bool = False,
        global_img_size: Union[int, Tuple[int, int, int]] = None,
        *args,
        **kwargs,
    ):
        super().__init__()

        if reconstruct_rate < 1.0:
            raise NotImplementedError

        # =====================================================================
        # store additional parameters here:
        self.out_channels = out_channels
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.enc_embed_dim = enc_embed_dim
        self.enc_depth = enc_depth
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        self.qkv_bias = qkv_bias
        self.drop_path_rate = drop_path_rate
        self.attn_drop_rate = attn_drop_rate
        self.norm_layer = norm_layer
        self.attn_type = attn_type
        self.has_learnable_embed = has_learnable_embed
        self.num_learnable_tokens = num_learnable_tokens
        self.pos_embed_type = pos_embed_type
        self.reconstruct_rate = reconstruct_rate
        self.reconstruct_all_testing = reconstruct_all_testing
        self.unetr_use_input = unetr_use_input
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = (
            self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]
        )
        self.global_img_size = global_img_size
        self.visualizable = True

        assert all(
            [p == self.patch_size[0] for p in list(self.patch_size)]
        ), "Currently only square patches are supported"
        self.depth = int(math.log2(self.patch_size[0]))
        print(f"setting depth to {self.depth}")
        print(f"unetr img_size: {self.img_size}")
        print(f"unetr patch_size: {self.patch_size}")
        print(f"unetr out_channels: {self.out_channels}")

        if (enc_embed_dim is not None) and (enc_depth is not None):
            self.init(enc_embed_dim, enc_depth)

    def init(
        self,
        enc_embed_dim: int,
        enc_depth: int,
        input_tasks,
        in_channels: int = None,
        global_img_size: Tuple[int, int, int] = None,
        *args,
        **kwarks,
    ):
        # assert that the encoder depth is a multiple of the decoder depth:
        assert (
            enc_depth % self.depth == 0
        ), "Encoder Depth must be a multiple of decoder depth"

        if global_img_size is None:
            global_img_size = self.global_img_size
        
        self.enc_embed_dim = enc_embed_dim
        self.enc_depth = enc_depth
        self.input_tasks = input_tasks

        blocks_per_decoder_adapter = int(enc_depth / self.depth)
        indices = list(
            range(blocks_per_decoder_adapter - 1, enc_depth, blocks_per_decoder_adapter)
        )
        print(f"initializing UNETR - {indices=}")

        self.proj_context = nn.Linear(enc_embed_dim, self.embed_dim)

        self.in_channels = (
            len([task for task in self.input_tasks if task != "seg"])
            if in_channels is None
            else in_channels
        )
        base_dim = 64
        decoder_dim = base_dim * 2 ** (self.depth - 1)
        print(f"{decoder_dim=}")

        self.decoders = nn.ModuleList(
            [
                nn.Sequential(
                    UNETR.Conv3DBlock(self.in_channels, base_dim // 2, 3),
                    UNETR.Conv3DBlock(base_dim // 2, base_dim, 3),
                )
            ]
        )
        self.decoder_upsamplers = nn.ModuleList()

        for i in range(1, self.depth):
            decoder = nn.Sequential(
                UNETR.Deconv3DBlock(self.embed_dim, decoder_dim),
                *[
                    UNETR.Deconv3DBlock(
                        decoder_dim // 2**j, decoder_dim // 2 ** (j + 1)
                    )
                    for j in range(self.depth - 1 - i)
                ],
            )

            target_dim = int(base_dim * 2 ** (i - 1))

            print(f"{target_dim=}")
            upsampler = nn.Sequential(
                UNETR.Conv3DBlock(target_dim * 4, target_dim * 2),
                UNETR.Conv3DBlock(target_dim * 2, target_dim * 2),
                UNETR.SingleDeconv3DBlock(target_dim * 2, target_dim),
            )

            self.decoders.append(decoder)
            self.decoder_upsamplers.append(upsampler)

        self.decoder_upsamplers.append(
            UNETR.SingleDeconv3DBlock(self.embed_dim, decoder_dim)
        )

        self.decoder_header_with_input = UNETR.Conv3DBlock(128, 64)
        self.decoder_header_no_input = UNETR.Conv3DBlock(64, 64)

        self.decoder_header = nn.Sequential(
            # changed input from 128 to 64 since it doesn't combine with input patches!
            # UNETR.Conv3DBlock(128, 64) if self.unetr_use_input else UNETR.Conv3DBlock(64, 64),
            UNETR.Conv3DBlock(64, 64),
            UNETR.SingleConv3DBlock(64, self.out_channels, 1),
        )

    def forward(
        self,
        encoder_tokens,
        task_range: Optional[Tuple[int, int]],
        perm_idx: Optional[torch.Tensor],
        patchified_dim: Tuple[int, int, int] = None,
        batch: Optional[Dict[str, torch.Tensor]] = None,
        adapter_task: str = None,
        *args,
        **kwargs,
    ):
        assert isinstance(
            encoder_tokens, list
        ), f"encoder_tokens must be list, found {type(encoder_tokens)}"
        blocks_per_decoder_adapter = int(self.enc_depth / self.depth)
        indices = list(
            range(
                blocks_per_decoder_adapter - 1,
                self.enc_depth,
                blocks_per_decoder_adapter,
            )
        )

        patchified_dim = (
            self.patchified_dim if patchified_dim is None else patchified_dim
        )

        z = []
        for i in range(self.depth):
            # for each token, average over all modalities.
            # For UNETR only one modality i.e. no averaging
            # Tokens must be unshuffled.
            if (not isinstance(perm_idx, torch.Tensor)) and (perm_idx is not None):
                task_embeddings = []
                for task, (start, end) in task_range.items():
                    task_tokens = encoder_tokens[indices[i]][:, start:end, ...]
                    if task_tokens.shape[1] == 0:
                        continue
                    p = perm_idx.get(task, None)
                    if p is not None:
                        task_tokens = unshuffle_patches(task_tokens, p)
                    task_embeddings.append(task_tokens[..., None])
                x = torch.concat(task_embeddings, dim=-1).mean(-1)
            else:
                x = encoder_tokens[indices[i]][
                    :, task_range[0] : task_range[1], ...
                ]
                if isinstance(perm_idx, torch.Tensor):
                    x = unshuffle_patches(x, perm_idx, patch_size=1)
            x = self.proj_context(x)
            x = x.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
            x = unpatchify(x, patchified_dim, patch_size=1)
            z.append(x)

        zb = self.decoder_upsamplers[-1](z[-1])
        for i in range(1, self.depth):
            za = self.decoders[self.depth - i](z[-i - 1])
            zb = self.decoder_upsamplers[self.depth - i - 1](torch.cat([za, zb], dim=1))
        if self.unetr_use_input:
            if "images" in batch:
                # if modalities are already stacked, there is a 'images' object in input_images
                # and it does not need to be restacked.
                batch = batch["images"]
            else:
                # otherwise stack modalities in correct order
                batch = torch.cat(
                    [batch[task] for task in self.input_tasks if task != "seg"],
                    dim=1,
                )
            # print(input_images.shape)
            za = self.decoders[0](batch)
            # za = processed input
            # zb = processed tokens
            x = torch.cat([za, zb], dim=1)
            x = self.decoder_header_with_input(x)
            x = self.decoder_header(x)
        else:
            x = self.decoder_header_no_input(zb)
            x = self.decoder_header(x)

        x = patchify(x, patch_size=self.patch_size)

        if isinstance(perm_idx, torch.Tensor):
            x, _ = shuffle_patches(x, patch_size=self.patch_size, permutations=perm_idx)
        return x

# Copied from original MultiMAE repository, slightly modified 
# https://github.com/EPFL-VILAB/MultiMAE
class LinearOutputAdapter(nn.Module):
    def __init__(
        self,
        num_classes: int = 2,
        enc_embed_dim: Optional[int] = None,
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        init_scale: float = 1.0,
        num_global_tokens: int = 1,
        token_aggregation: Literal["cls", "cls-mean", "mean"] = "cls",
        *args,
        **kwargs,
    ):
        super().__init__()

        # binary classification is done with 1 class, not 2 classes
        self.num_classes = 1 if num_classes == 2 else num_classes
        # self.num_classes = num_classes
        self.enc_embed_dim = enc_embed_dim
        self.norm_layer = norm_layer
        self.init_scale = init_scale  # copied from MultiMAE code
        self.num_global_tokens = num_global_tokens
        self.token_aggregation = token_aggregation
        self.visualizable = False

        if enc_embed_dim is not None:
            self.init(enc_embed_dim=enc_embed_dim)

    def init(self, enc_embed_dim: int, *args, **kwargs):
        self.enc_embed_dim = enc_embed_dim

        self.norm = self.norm_layer(enc_embed_dim)
        self.head = (
            nn.Linear(enc_embed_dim, self.num_classes)
            if self.num_classes > 0
            else nn.Identity()
        )

        self.apply(self._init_weights)
        self.head.weight.data.mul_(self.init_scale)
        self.head.bias.data.mul_(self.init_scale)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, encoder_tokens: torch.Tensor, *args, **kwargs):
        if isinstance(encoder_tokens, list):
            encoder_tokens = encoder_tokens[-1]

            # CLS tokens at the beginning
            if self.token_aggregation == "cls":
                x = encoder_tokens[:, 0]
            if self.token_aggregation == "cls-mean":
                x = encoder_tokens[:, : self.num_global_tokens].mean(dim=1)
            if self.token_aggregation == "mean":
                x = encoder_tokens.mean(dim=1)
        # x = encoder_tokens[:, 0]
        x = self.head(self.norm(x))
        return x
