import logging

logger = logging.getLogger(__name__)

from typing import Union, List, Tuple, Dict, Optional
import copy

import torch
import torch.nn as nn
from timm.layers.helpers import to_3tuple
from collections import OrderedDict
from omegaconf.listconfig import ListConfig
import math
from models.multimae3d_utils import (
    calc_patchified_dim,
    unpatchify,
    unshuffle_patches,
    build_position_embedding,
    get_batch_pos_embed,
    mask_data,
)
from util.setup_functions import resolve_listconfig_of_dicts
from models.multimae3d_adapters import (
    PatchedInputAdapter,
    SpatialOutputAdapter,
    SegmenterOutputAdapter,
    SETROutputAdapter,
    UNETROutputAdapter,
    ConvSpatialOutputAdapter,
    PatchConvSpatialOutputAdapter,
    LinearOutputAdapter,
)
from models import multimae3d_adapters
from models.vim_encoder import create_vision_mamaba_encoder


# =============================================================================
# MultiMAE for 3D data
# =============================================================================


class MultiMAE3D(torch.nn.Module):
    def __init__(
        self,
        input_adapters,
        output_adapters,
        mask_ratio = 0.75, 
        global_img_size = (240, 240, 160),
        img_size = (160, 176, 144),
        patch_size =  (16, 16, 16),
        num_global_tokens = 1,
        embed_dim = 768,
        use_dirichlet = True,
        use_seg_masking  = False,
        leave_one_out = False,
        dirichlet_alpha = 1.0,
        pos_embed_type = "sincos",
        token_aggregation = None,
        eval_masking = True,
        apply_pos_embed_to_context = False,  
        deep_copy_batch = False, 
        depth = 12,
        tasks = ['t1','t1ce', 't2', 'flair'],
        in_channels = 1,
        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)

        # =====================================================================
        # assertions
        # Input and output adapters need to have fixed order to guarantee
        # correct tokenization and concatenation of tokens
        assert isinstance(input_adapters, OrderedDict), "input_adapters must be an OrderedDict"
        assert isinstance(output_adapters, OrderedDict), "output_adapters must be an OrderedDict"

        # =====================================================================
        # store additional parameters here:

        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.num_global_tokens = num_global_tokens
        self.embed_dim = embed_dim
        self.use_seg_masking = use_seg_masking
        self.use_dirichlet = use_dirichlet
        self.dirichlet_alpha = dirichlet_alpha
        self.leave_one_out = leave_one_out
        # self.mlp_ratio = mlp_ratio
        self.mask_ratio = mask_ratio
        self.pos_embed_type = pos_embed_type
        self.depth = depth
        # self.return_as_image = return_as_image
        self.token_aggregation = token_aggregation
        self.eval_masking = eval_masking
        self.full_masked_val = None # if not None, any task in batch with 100% masking will be replaced by this value
        self.apply_pos_embed_to_context = apply_pos_embed_to_context
        self.deep_copy_batch = deep_copy_batch


        # img_size/patch_size = (10, 11, 9)
        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size) 
        self.num_patches = (self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2]) # patch 的数量

        print(f"img_size: {img_size}, patch_size: {patch_size}, num_patches: {self.num_patches}")
        print(f"{apply_pos_embed_to_context=}")

        # =====================================================================
        # 初始化 input adapter 和 output adapter
        for _, input_adapter in input_adapters.items():
            input_adapter.init(embed_dim=embed_dim)
        self.input_adapters = nn.ModuleDict(input_adapters)
        
        for _, output_adapter in output_adapters.items():
            output_adapter.init(
                enc_embed_dim=embed_dim,
                enc_depth=self.depth,
                input_tasks=list(self.input_adapters.keys()),
                in_channels=in_channels,
                global_img_size=global_img_size,
            )
        self.output_adapters = nn.ModuleDict(output_adapters)

        # =====================================================================
        # 初始化全局 token
        if num_global_tokens > 0:
            self.global_tokens = nn.Parameter(
                torch.randn(num_global_tokens, self.embed_dim)
            )

        # =====================================================================
        # 初始化位置嵌入 使用 sincos 位置嵌入 self.global_grid_size = (10, 11, 9)
        self.pos_embed, self.global_grid_size = build_position_embedding(
            num_patches=self.num_patches,
            embed_dim=embed_dim,
            pos_embed_type=pos_embed_type,
            patchified_dim=self.patchified_dim,
            patch_size=self.patch_size,
            global_img_size=global_img_size,
        )

        # =====================================================================
        # 使用原有的初始化方法初始化，已有参数，backbone 利用原本的初始化方法
        self.initialize_weights()

        # =====================================================================
        # 初始化 backbone 
        self.encoder = create_vision_mamaba_encoder()

    # =====================================================================
    # Copied from original MultiMAE repository, modified for 3D application
    # https://github.com/EPFL-VILAB/MultiMAE
    def initialize_weights(self):
        if self.num_global_tokens > 0:
            nn.init.normal_(self.global_tokens, std=0.02)

        self.apply(self._init_weights)
        for name, m in self.named_modules():
            if isinstance(m, nn.Linear):
                if "qkv" in name:
                    # treat the weights of Q, K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
                elif "kv" in name:
                    # treat the weights of K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 2 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
            if isinstance(m, nn.Conv3d):
                if ".proj" in name:
                    # From MAE, initialize projection like nn.Linear (instead of nn.Conv2d)
                    w = m.weight.data
                    nn.init.xavier_uniform_(w.view([w.shape[0], -1]))

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    

    def forward(
        self,
        x,
        mask_ratio: Union[float, Dict[str, float]] = None,
        permutations: Dict[str, torch.Tensor] = None,
        return_as_image: bool = False,
        use_dirichlet: bool = None,
        dirichlet_alpha: float = None,
        leave_one_out: bool = None,
        return_as_dict: bool = False,
        only_encode: bool = False,
        token_aggregation: str = None,
        use_seg_masking: bool = None,
        deep_copy_batch: bool = None, 
    ):
        # UNETR requires deep copy of input batch for leave-one-out reconstruction
        deep_copy_batch = self.deep_copy_batch if deep_copy_batch is None else deep_copy_batch
        if deep_copy_batch: 
            x = copy.deepcopy(x)

        # selected_patches, masked_patches, selected_pos_emb and masked_pos_emb are shuffled!
        selected_patches = {}
        masked_patches = {}
        reconstructed_patches = {}
        selected_pos_embed = {}
        masked_pos_embed = {}
        perm_indices = {}
        tokens = {}

        input_img_size = None
        input_patchified_dim = None
        # Tokenize input for each task

        if not (self.training) and not (self.eval_masking):
            use_dirichlet = False if use_dirichlet is None else use_dirichlet
            leave_one_out = False if leave_one_out is None else leave_one_out
            mask_ratio = 0.0 if mask_ratio is None else mask_ratio

        use_seg_masking = ( self.use_seg_masking if use_seg_masking is None else use_seg_masking )
        use_dirichlet = self.use_dirichlet if use_dirichlet is None else use_dirichlet
        dirichlet_alpha = ( self.dirichlet_alpha if dirichlet_alpha is None else dirichlet_alpha )
        leave_one_out = self.leave_one_out if leave_one_out is None else leave_one_out
        token_aggregation = ( self.token_aggregation if token_aggregation is None else token_aggregation )
        
        # 如果 mask_ratio 是字典则对不同模态使用不同的 mask ratio
        if isinstance(self.mask_ratio, dict):
            mask_ratio = self.mask_ratio.copy() if mask_ratio is None else mask_ratio
        else:
            mask_ratio = self.mask_ratio if mask_ratio is None else mask_ratio

        if use_seg_masking:
            mask_mode = "seg"
        elif use_dirichlet:
            mask_mode = "dirichlet"
        else:
            mask_mode = "uniform"
        # 任务 list 
        input_tasks = [task for task in self.input_adapters]

        # perm_indices 是打乱 patchs 时使用的索引
        selected_patches, masked_patches, perm_indices, mask_ratio, leave_one_out_task = mask_data(
            batch=x,
            tasks=input_tasks,
            mask_ratio=mask_ratio, 
            mode=mask_mode,
            patch_size=self.patch_size,
            leave_one_out=leave_one_out,
            dirichlet_alpha=dirichlet_alpha,
            permutations=permutations,
        )

        for task in self.input_adapters:
            selected_shuffled_patches = selected_patches[task]
            num_selected_patches = selected_shuffled_patches.shape[1]
            # masked_shuffled_patches = masked_patches[task]

            # full_masked_val 指定值，同时 mask_ratio 为字典， 把 masked patche 填充为 self.full_masked_val 
            if self.full_masked_val is not None and isinstance(mask_ratio, dict): 
                image_tasks = ['t1', 't1c', 't2', 'fla']
                for mask_task, val in mask_ratio.items(): 
                    if task == mask_task and val == 1.0: 
                        selected_shuffled_patches = torch.full_like(selected_shuffled_patches, self.full_masked_val)
                        x[task] = torch.full_like(x[task], self.full_masked_val) 
                    if task == 'images' and val == 1.0 and mask_task in image_tasks: 
                        channel_idx = image_tasks.index(mask_task)
                        selected_shuffled_patches[:, :, channel_idx] = self.full_masked_val
                        x['images'][:, channel_idx] = torch.full_like(x['images'][:, channel_idx], self.full_masked_val) 
                            
            perm_idx = perm_indices[task]
            batch_size = selected_shuffled_patches.shape[0]

            # Tokenize selected patches
            if self.input_adapters[task] is None:
                print("==================================")
                print(f'input adapted is not exist {task}')
                print("==================================")
                continue

            if task == "seg":
                selected_shuffled_patches = selected_shuffled_patches.float()
        
            # convert patchs mapping to tokens, 把特定任务的 patch 输入到特定任务的 adapter 生成 tokens
            tokens[task] = self.input_adapters[task](selected_shuffled_patches)

            # check if image has same shape as default. If not interpolte positional embeddings
            input_img_size = x[task].shape[2:]
            # (image_size[0]//patch_size[0], image_size[1]//patch_size[1], image_size[2]//patch_size[2])
            input_patchified_dim = calc_patchified_dim(input_img_size, self.patch_size)
            
            # 在 batch size 维度上对位置编码进行复制，便于后续与 tokens 直接相加
            batch_pos_embed, patchified_dim = get_batch_pos_embed(
                batch=x,
                pos_embed=self.pos_embed,
                pos_embed_type=self.pos_embed_type,
                input_patchified_dim=input_patchified_dim,
                global_grid_size=self.global_grid_size,
                patch_size=self.patch_size,
                batch_size=batch_size,
            )

            # 对位置编码使用和 patchs 打乱的索引进行打乱
            batch_pos_embed = batch_pos_embed[ torch.arange(batch_size)[:, None], perm_idx, ... ]
            selected_pos_embed[task] = batch_pos_embed[:, :num_selected_patches]
            # 特定任务的 tokens 与 位置编码相加
            tokens[task] = tokens[task] + selected_pos_embed[task]

        # =====================================================================
        # Concatenate tokens for each task
        task_order = list(tokens.keys())
        task_ranges = OrderedDict()  # 存储每个模态的范围

        # Calculate the range of tokens for each task
        lower_bound = self.num_global_tokens
        for task in task_order:
            upper_bound = lower_bound + tokens[task].shape[1]
            task_ranges[task] = (lower_bound, upper_bound)
            lower_bound = upper_bound

        # Stack input tokens (batch_size, all_patchs, embed_dim) 每个模态 cat 在 patchs 这个维度上进行 cat
        input_tokens = torch.cat([tokens[task] for task in task_order], dim=1)

        # Add [CLS] token(s) 如果 num_global_tokens > 0 则默认使用头 token
        if self.num_global_tokens > 0:
            # Repeat global tokens for each batch element
            batch_global_tokens = self.global_tokens.repeat(input_tokens.shape[0], 1, 1)
            input_tokens = torch.cat([batch_global_tokens, input_tokens], dim=1)

        # =====================================================================
        # Pass through encoder
        encoder_tokens = []
        x_hat = input_tokens

        if self.encoder is None:
            encoder_tokens = [input_tokens]
        else:
            encoder_tokens = [*self.encoder(x_hat)]

        # 仅编码
        if only_encode:
            if token_aggregation is None:
                return encoder_tokens
            if token_aggregation == "cls":
                return [enc[:, : self.num_global_tokens, ...] for enc in encoder_tokens]
            if token_aggregation == "mean":
                return [enc.mean(dim=1) for enc in encoder_tokens]

        # =====================================================================
        # Pass through output adapters
        for task, output_adapter in self.output_adapters.items():
            # # num_masked_tokens = masked_patches[task].shape[1]

            if self.apply_pos_embed_to_context:  # 对整体进行 decoder
                perm_index = perm_indices 
                task_range = task_ranges
            else: # 对特定模态进行 decoder
                task_range = task_ranges[task] if task in task_ranges else None
                perm_index = perm_indices[task] if task in perm_indices else None
            
            if hasattr(output_adapter, 'unetr_use_input'):  
                perm_index = perm_indices if perm_index is None else perm_index
                task_range = task_ranges if task_range is None else task_range
                
            output_patches = output_adapter(
                encoder_tokens,
                task_range,
                perm_index,
                patchified_dim,
                batch=x,
                adapter_task=task
            )

            num_selected_patches = (
                selected_patches[task].shape[1] if task in selected_patches else 0
            )
            
            num_masked_patches = output_patches.shape[1] - num_selected_patches
            
            if task in masked_patches:
                masked_patches[task] = masked_patches[task][
                    :, :num_masked_patches
                ]  # remove the patches ignored by the decoder

            task_range = task_ranges[task] if task in task_ranges else None
            perm_index = perm_indices[task] if task in perm_indices else None

            if (
                return_as_image and output_adapter.visualizable
            ):  # only for testing, needs full size
                if perm_index is not None and isinstance(perm_index, torch.Tensor):
                    output_patches = unshuffle_patches(output_patches, perm_index)
                # output_patches = unpatchify(output_patches, self.img_size, self.patch_size)
                output_patches = unpatchify(
                    output_patches, input_img_size, self.patch_size
                )

            reconstructed_patches[task] = output_patches

        # if return_as_image is true, unshuffle and unpatchify the selected
        # nput patches with zeros for masked patches
        if return_as_image:
            for task in selected_patches:
                # masked_patches can also be empty but has an entry in the dict
                if task in masked_patches:
                    patches = torch.cat(
                        [
                            selected_patches[task],
                            torch.zeros_like(masked_patches[task]),
                        ],
                        dim=1,
                    )
                    patches = unshuffle_patches(patches, perm_indices[task])
                    patches = unpatchify(patches, input_img_size, self.patch_size)
                    selected_patches[task] = (
                        patches  # overwrite selected patches with full size image of selected and masked (zeros)
                    )

        if return_as_dict:
            return_dict = {
                "selected_patches": selected_patches,
                "masked_patches": masked_patches,
                "perm_indices": perm_indices,
                "tokens": tokens,
                "encoder_tokens": encoder_tokens,
                "task_ranges": task_ranges,
                "reconstructed_patches": reconstructed_patches,
            }
            return return_dict
        else:
            return (
                selected_patches,
                masked_patches,
                perm_indices,
                tokens,
                encoder_tokens,
                task_ranges,
                reconstructed_patches,
            )


# =============================================================================
# Model creation function for MRI case
def create_multimae(
    cfg,
    img_size,
    patch_size,
    embed_dim,
    mask_ratio,
    num_global_tokens = 1,
    pos_embed_type = "sincos",
    use_seg_masking = False, 
    use_dirichlet = False,
    dirichlet_alpha = 1.0,
    leave_one_out = False,
    global_img_size = [240, 240, 160], 
    apply_pos_embed_to_context = False
):

    encoder_pos_embed_type = pos_embed_type

    # prepare input adpaters parametere is in config/model/vit-B.yaml
    print(f"\nInstantiate input adapters")
    input_adapters = instantiate_input_adapters(
        img_size=cfg.model.model_params.img_size,
        patch_size=cfg.model.model_params.patch_size,
        input_tasks=cfg.model.model_params.input_tasks,
        embed_dim=cfg.model.model_params.embed_dim,
        input_has_learnable_embed=False,
        in_channels=1,
    )

    print(f"\nInstantiate output adapters")
    output_adapters = instantiate_output_adapters(
        img_size=cfg.model.model_params.img_size,
        patch_size=cfg.model.model_params.patch_size,
        out_channels=1,
        output_tasks=cfg.model.model_params.output_tasks,
        num_seg_classes=3,
        decoder_pos_embed_type=cfg.model.model_params.decoder_pos_embed_type,
        decoder_depth=cfg.model.model_params.decoder_depth,
        decoder_num_heads=cfg.model.model_params.decoder_num_heads,
        decoder_num_learnable_tokens=0,
        decoder_has_learnable_embed=False,
        decoder_reconstruct_rate=cfg.model.model_params.decoder_reconstruct_rate,
        decoder_reconstruct_all_testing=True,
        attn_type = cfg.model.model_params.attn_type,
        seg_output_adapter_type = "spatial",
        output_adapter_type = "spatial",
        segmenter_patch_div = 1,
        classifier = None,
        num_classes = None,
        unetr_use_input = False,
        setr_interpolation_mode = "nearest",
        setr_embed_dim = None,
        setr_version = 1
    )

    print(f"\n{' Instantiate MultiMAE3D '}")
    return MultiMAE3D(
        input_adapters=input_adapters,
        output_adapters=output_adapters,
        img_size=img_size,
        patch_size=patch_size,
        embed_dim=embed_dim,
        mask_ratio=mask_ratio,
        num_global_tokens=num_global_tokens,
        pos_embed_type=encoder_pos_embed_type,
        use_seg_masking=use_seg_masking, 
        use_dirichlet=use_dirichlet,
        dirichlet_alpha=dirichlet_alpha,
        leave_one_out=leave_one_out,
        global_img_size=global_img_size, 
        apply_pos_embed_to_context=apply_pos_embed_to_context, 
    )


def instantiate_input_adapters(
    # cfg: OmegaConf,
    img_size = [160,176,144],
    patch_size = 16,
    in_channels = 1,
    input_tasks = ['t1','t1c', 't2', 'fla'],
    embed_dim: int = 768,
    input_has_learnable_embed: bool = False,
    **kwawrgs,
):
    assert input_tasks is not None

    input_adapters = []
    for task in input_tasks:   
        adapter = PatchedInputAdapter(
            in_channels,
            img_size,
            patch_size,
            embed_dim=embed_dim,
            has_learnable_embed=input_has_learnable_embed,
        )
        input_adapters.append((task, adapter))
    return OrderedDict(input_adapters)


def instantiate_output_adapters(
    # cfg: OmegaConf,
    img_size = [160,176,144],
    patch_size = 16,
    out_channels = 1,
    output_tasks = ['t1','t1ce', 't2', 'flair'],
    decoder_embed_dim = 384,
    num_seg_classes = 3,
    decoder_pos_embed_type = 'sincos',
    decoder_depth = 3,
    decoder_num_heads = 4,
    decoder_num_learnable_tokens = 0,
    decoder_has_learnable_embed = False,
    decoder_reconstruct_rate = 0.6,
    decoder_reconstruct_all_testing = True,
    attn_type = None,
    seg_output_adapter_type = "spatial",
    output_adapter_type = "spatial",
    segmenter_patch_div = 1,
    classifier = None,
    num_classes = None,
    unetr_use_input= False,
    setr_interpolation_mode = "nearest",
    setr_embed_dim = None,
    setr_version = 1,
    **kwargs,
):

    adapter_opts = {
        "img_size": img_size,
        "patch_size": patch_size,
        "embed_dim": decoder_embed_dim,
        "num_heads": decoder_num_heads,
        "depth": decoder_depth,
        "num_learnable_tokens": decoder_num_learnable_tokens,
        "has_learnable_embed": decoder_has_learnable_embed,
        # 'use_x_attn_block': use_x_attn_block,
        "attn_type": attn_type,
        "pos_embed_type": (
            decoder_pos_embed_type
            if decoder_pos_embed_type is not None
            else pos_embed_type
        ),
        "reconstruct_rate": decoder_reconstruct_rate,
        "reconstruct_all_testing": decoder_reconstruct_all_testing,
        "segmenter_patch_div": segmenter_patch_div,
        "unetr_use_input": unetr_use_input,
        "setr_interpolation_mode": setr_interpolation_mode,
        "setr_embed_dim": setr_embed_dim,
        "setr_version": setr_version,
        "num_classes": num_classes,
    }

    # for backwards compatibility
    ADAPTERS = {
        "spatial": SpatialOutputAdapter,
        "segmenter": SegmenterOutputAdapter,
        "setr": SETROutputAdapter,
        "unetr": UNETROutputAdapter,
        "conv_spatial": ConvSpatialOutputAdapter,
        "patch_conv_spatial": PatchConvSpatialOutputAdapter,
        "linear": LinearOutputAdapter,
    }
    assert output_tasks is not None ,'output task is not None'
    # assert isinstance(output_tasks, dict), type(output_tasks)
    output_adapters = []
    task_opts = {}
    
    for task, value in output_tasks.items():
        # assert isinstance(value, dict)
        task_adapter = value['type']
        task_opts = {task: value['opts']}
        output_tasks[task] = task_adapter
    output_tasks = [ (task, getattr(multimae3d_adapters, task_adapter)) for task, task_adapter in output_tasks.items() ]

    for task, adapter in output_tasks:
        print(f"- Initialize {task} output adapter:")
        print(f"\t- type: {adapter}")
        print(f"\t- opts: {task_opts.get(task, {})}")
        task_out_channels = num_seg_classes if task == "seg" else out_channels
        task_in_channels = 1
        opts = task_opts.get(task, {})
        for key, value in adapter_opts.items():
            if key not in opts:
                opts[key] = value
        if "in_channels" not in opts:
            opts["in_channels"] = task_in_channels
        if "out_channels" not in opts:
            opts["out_channels"] = task_out_channels

        print(f"\t- expanded opts: {opts=}")

        adapter = adapter(**opts)
        output_adapters.append((task, adapter))

    if classifier == "linear":
        print(f"classifier")
        assert (
            num_classes is not None
        ), "classification adapter requires number of classes specified"
        adapter = LinearOutputAdapter(
            num_classes=num_classes,
        )
        output_adapters.append(("label", adapter))
    print(f"\nOutput Adapters: {[adapter[0] for adapter in output_adapters]}")

    return OrderedDict(output_adapters)



if __name__ == '__main__':
    pass
