import logging

logger = logging.getLogger(__name__)

from typing import Union, List, Tuple, Dict, Optional
import copy

import torch
import torch.nn as nn
from timm.layers import trunc_normal_
from timm.layers.helpers import to_3tuple
from timm.models.vision_transformer import Block
from einops import rearrange
from collections import OrderedDict
from functools import partial
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
import math
from models.multimae3d_utils import (
    calc_patchified_dim,
    patchify,
    unpatchify,
    shuffle_patches,
    unshuffle_patches,
    generate_dirichlet_masked,
    sample_random_key,
    build_position_embedding,
    get_batch_pos_embed,
    mask_data,
)
from util.setup_functions import resolve_listconfig_of_dicts
from models.multimae3d_adapters import (
    PatchedInputAdapter,
    SpatialOutputAdapter,
    SegmenterOutputAdapter,
    SETROutputAdapter,
    UNETROutputAdapter,
    ConvSpatialOutputAdapter,
    PatchConvSpatialOutputAdapter,
    LinearOutputAdapter,
)
from models import multimae3d_adapters
from util.setup_functions import get_training_type
from vim_encoder import create_vim_encoder
# =============================================================================
# MultiMAE for 3D data
# =============================================================================


class MultiMAE3D(torch.nn.Module):
    def __init__(
        self,
        input_adapters,
        output_adapters,
        mask_ratio,  # TODO: change to Dirichlet distribution
        global_img_size = (240, 240, 160),
        img_size = (160, 176, 144),
        patch_size = (16, 16, 16),
        num_global_tokens = 1,
        embed_dim = 768,
        num_heads = 12,
        depth = 12,
        use_dirichlet = False,
        use_seg_masking = False,
        leave_one_out = False,
        dirichlet_alpha = 1.0,
        mlp_ratio = 4.0,
        pos_embed_type = "perceptron",  # TODO: add other positional embedding types
        qkv_bias = True,  # as MultiMAE
        drop_path_rate = 0.0,
        attn_drop_rate = 0.0,
        norm_layer = partial(nn.LayerNorm, eps=1e-6),  # as MultiMAE
        token_aggregation = None,
        eval_masking = True,
        apply_pos_embed_to_context = False,  
        deep_copy_batch = False, 
        # return_as_image: bool = False,
        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)

        # =====================================================================
        # assertions
        # Input and output adapters need to have fixed order to guarantee
        # correct tokenization and concatenation of tokens
        assert isinstance( input_adapters, OrderedDict ), "input_adapters must be an OrderedDict"
        assert isinstance( output_adapters, OrderedDict ), "output_adapters must be an OrderedDict"

        # =====================================================================
        # store additional parameters here:
        self.img_size = to_3tuple(img_size)
        self.patch_size = to_3tuple(patch_size)
        self.num_global_tokens = num_global_tokens
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.depth = depth
        self.use_seg_masking = use_seg_masking
        self.use_dirichlet = use_dirichlet
        self.dirichlet_alpha = dirichlet_alpha
        self.leave_one_out = leave_one_out
        self.mlp_ratio = mlp_ratio
        self.mask_ratio = mask_ratio
        self.pos_embed_type = pos_embed_type
        self.qkv_bias = qkv_bias
        # self.return_as_image = return_as_image
        self.token_aggregation = token_aggregation
        self.eval_masking = eval_masking
        # if not None, any task in batch with 100% masking will be replaced by this value
        self.full_masked_val = None
        self.apply_pos_embed_to_context = apply_pos_embed_to_context
        self.deep_copy_batch = deep_copy_batch

        self.patchified_dim = calc_patchified_dim(self.img_size, self.patch_size)
        self.num_patches = ( self.patchified_dim[0] * self.patchified_dim[1] * self.patchified_dim[2] )
        print(
            f"img_size: {img_size}, patch_size: {patch_size}, num_patches: {self.num_patches}"
        )
        print(f"{apply_pos_embed_to_context=}")

        # =====================================================================
        # init input and output adapters
        for _, input_adapter in input_adapters.items():
            input_adapter.init(embed_dim=embed_dim)
        self.input_adapters = nn.ModuleDict(input_adapters)

        in_channels = (
            self.input_adapters["images"].in_channels
            if "images" in self.input_adapters
            else None
        )
        
        for _, output_adapter in output_adapters.items():
            output_adapter.init(
                enc_embed_dim=embed_dim,
                enc_depth=self.depth,
                input_tasks=list(self.input_adapters.keys()),
                in_channels=in_channels,
                global_img_size=global_img_size,
            )
        self.output_adapters = nn.ModuleDict(output_adapters)

        # =====================================================================
        # init additional global tokens
        if num_global_tokens > 0:
            self.global_tokens = nn.Parameter(torch.randn(num_global_tokens, self.embed_dim))

        # =====================================================================
        # init positional_embeddings
        self.pos_embed, self.global_grid_size = build_position_embedding(
            num_patches=self.num_patches,
            embed_dim=embed_dim,
            pos_embed_type=pos_embed_type,
            patchified_dim=self.patchified_dim,
            patch_size=self.patch_size,
            global_img_size=global_img_size,
        )

        # TODO: Weight initialization procedures
        self.initialize_weights()

        # =====================================================================
        # init encoder transformer blocks
        self.encoder = create_vim_encoder()

    # =====================================================================
    # Copied from original MultiMAE repository, modified for 3D application
    # https://github.com/EPFL-VILAB/MultiMAE
    def initialize_weights(self):
        if self.num_global_tokens > 0:
            nn.init.normal_(self.global_tokens, std=0.02)

        self.apply(self._init_weights)
        for name, m in self.named_modules():
            if isinstance(m, nn.Linear):
                if "qkv" in name:
                    # treat the weights of Q, K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
                elif "kv" in name:
                    # treat the weights of K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 2 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
            if isinstance(m, nn.Conv3d):
                if ".proj" in name:
                    # From MAE, initialize projection like nn.Linear (instead of nn.Conv2d)
                    w = m.weight.data
                    nn.init.xavier_uniform_(w.view([w.shape[0], -1]))

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    # =====================================================================

    def forward(
        self,
        x,
        mask_ratio = None,
        permutations = None,
        return_as_image = False,
        use_dirichlet = None,
        dirichlet_alpha = None,
        leave_one_out = None,
        return_as_dict = False,
        only_encode = False,
        token_aggregation = None,
        use_seg_masking = None,
        deep_copy_batch = None, 
    ):
        # UNETR requires deep copy of input batch for leave-one-out reconstruction
        deep_copy_batch = self.deep_copy_batch if deep_copy_batch is None else deep_copy_batch
        if deep_copy_batch: 
            x = copy.deepcopy(x)

        # selected_patches, masked_patches, selected_pos_emb and masked_pos_emb are shuffled!
        selected_patches = {}
        masked_patches = {}
        reconstructed_patches = {}
        selected_pos_embed = {}
        masked_pos_embed = {}
        perm_indices = {}
        tokens = {}

        input_img_size = None
        input_patchified_dim = None
        # Tokenize input for each task

        if not (self.training) and not (self.eval_masking):
            use_dirichlet = False if use_dirichlet is None else use_dirichlet
            leave_one_out = False if leave_one_out is None else leave_one_out
            mask_ratio = 0.0 if mask_ratio is None else mask_ratio

        use_seg_masking = (
            self.use_seg_masking if use_seg_masking is None else use_seg_masking
        )
        use_dirichlet = self.use_dirichlet if use_dirichlet is None else use_dirichlet
        dirichlet_alpha = (
                self.dirichlet_alpha if dirichlet_alpha is None else dirichlet_alpha
            )
        leave_one_out = self.leave_one_out if leave_one_out is None else leave_one_out
        token_aggregation = (
            self.token_aggregation if token_aggregation is None else token_aggregation
        )
        if isinstance(self.mask_ratio, dict):
            mask_ratio = self.mask_ratio.copy() if mask_ratio is None else mask_ratio
        else:
            mask_ratio = self.mask_ratio if mask_ratio is None else mask_ratio

        if use_seg_masking:
            mask_mode = "seg"
        elif use_dirichlet:
            mask_mode = "dirichlet"
        else:
            mask_mode = "uniform"
        input_tasks = [task for task in self.input_adapters]
        selected_patches, masked_patches, perm_indices, mask_ratio, leave_one_out_task = mask_data(
            batch=x,
            tasks=input_tasks,
            mask_ratio=mask_ratio, 
            mode=mask_mode,
            patch_size=self.patch_size,
            leave_one_out=leave_one_out,
            dirichlet_alpha=dirichlet_alpha,
            permutations=permutations,
        )
        # print(f"{mask_ratio=}")

        for task in self.input_adapters:
            selected_shuffled_patches = selected_patches[task]
            num_selected_patches = selected_shuffled_patches.shape[1]
            # masked_shuffled_patches = masked_patches[task]

            if self.full_masked_val is not None and isinstance(mask_ratio, dict): 
                image_tasks = ['t1', 't1ce', 't2', 'flair']
                for mask_task, val in mask_ratio.items(): 
                    if task == mask_task and val == 1.0: 
                        selected_shuffled_patches = torch.full_like(selected_shuffled_patches, self.full_masked_val)
                        x[task] = torch.full_like(x[task], self.full_masked_val) 
                    if task == 'images' and val == 1.0 and mask_task in image_tasks: 
                        channel_idx = image_tasks.index(mask_task)
                        selected_shuffled_patches[:, :, channel_idx] = self.full_masked_val
                        x['images'][:, channel_idx] = torch.full_like(x['images'][:, channel_idx], self.full_masked_val) 
                            
            perm_idx = perm_indices[task]
            batch_size = selected_shuffled_patches.shape[0]

            # Tokenize selected patches
            if self.input_adapters[task] is None:
                continue

            if task == "seg":
                selected_shuffled_patches = selected_shuffled_patches.float()
            tokens[task] = self.input_adapters[task](selected_shuffled_patches)

            # check if image has same shape as default. If not interpolte positional embeddings
            input_img_size = x[task].shape[2:]
            input_patchified_dim = calc_patchified_dim(input_img_size, self.patch_size)

            batch_pos_embed, patchified_dim = get_batch_pos_embed(
                batch=x,
                pos_embed=self.pos_embed,
                pos_embed_type=self.pos_embed_type,
                input_patchified_dim=input_patchified_dim,
                global_grid_size=self.global_grid_size,
                patch_size=self.patch_size,
                batch_size=batch_size,
            )

            batch_pos_embed = batch_pos_embed[
                torch.arange(batch_size)[:, None], perm_idx, ...
            ]
            selected_pos_embed[task] = batch_pos_embed[:, :num_selected_patches]

            tokens[task] = tokens[task] + selected_pos_embed[task]

        # =====================================================================
        # Concatenate tokens for each task
        task_order = list(tokens.keys())
        task_ranges = OrderedDict()  # store the range of tokens for each task

        # Calculate the range of tokens for each task
        lower_bound = self.num_global_tokens
        for task in task_order:
            upper_bound = lower_bound + tokens[task].shape[1]
            task_ranges[task] = (lower_bound, upper_bound)
            lower_bound = upper_bound

        # Stack input tokens
        input_tokens = torch.cat([tokens[task] for task in task_order], dim=1)

        # Add [CLS] token(s)
        if self.num_global_tokens > 0:
            # Repeat global tokens for each batch element
            batch_global_tokens = self.global_tokens.repeat(input_tokens.shape[0], 1, 1)
            input_tokens = torch.cat([batch_global_tokens, input_tokens], dim=1)

        # =====================================================================
        # Pass through transformer
        encoder_tokens = []
        x_hat = input_tokens
        if self.encoder is None:
            encoder_tokens = [input_tokens]
        else:
            # for encoder_block in self.encoder:160
            #     x_hat = encoder_block(x_hat)
            #     encoder_tokens.append(x_hat)
            encoder_tokens = [*self.encoder(x_hat)]

        if only_encode:
            if token_aggregation is None:
                return encoder_tokens
            if token_aggregation == "cls":
                return [enc[:, : self.num_global_tokens, ...] for enc in encoder_tokens]
            if token_aggregation == "mean":
                return [enc.mean(dim=1) for enc in encoder_tokens]

        # =====================================================================
        # Pass through output adapters
        for task, output_adapter in self.output_adapters.items():
            # # num_masked_tokens = masked_patches[task].shape[1]
            if self.apply_pos_embed_to_context: 
                perm_index = perm_indices 
                task_range = task_ranges
            else: 
                task_range = task_ranges[task] if task in task_ranges else None
                perm_index = perm_indices[task] if task in perm_indices else None
            if hasattr(output_adapter, 'unetr_use_input'):  
                perm_index = perm_indices if perm_index is None else perm_index
                task_range = task_ranges if task_range is None else task_range
                
            output_patches = output_adapter(
                encoder_tokens,
                task_range,
                perm_index,
                patchified_dim,
                batch=x,
                adapter_task=task,
            )
            num_selected_patches = (
                selected_patches[task].shape[1] if task in selected_patches else 0
            )
            num_masked_patches = output_patches.shape[1] - num_selected_patches
            if task in masked_patches:
                masked_patches[task] = masked_patches[task][
                    :, :num_masked_patches
                ]  # remove the patches ignored by the decoder

            task_range = task_ranges[task] if task in task_ranges else None
            perm_index = perm_indices[task] if task in perm_indices else None

            if (
                return_as_image and output_adapter.visualizable
            ):  # only for testing, needs full size
                if perm_index is not None and isinstance(perm_index, torch.Tensor):
                    output_patches = unshuffle_patches(output_patches, perm_index)
                # output_patches = unpatchify(output_patches, self.img_size, self.patch_size)
                output_patches = unpatchify(
                    output_patches, input_img_size, self.patch_size
                )

            reconstructed_patches[task] = output_patches

        # if return_as_image is true, unshuffle and unpatchify the selected
        # nput patches with zeros for masked patches
        if return_as_image:
            for task in selected_patches:
                # masked_patches can also be empty but has an entry in the dict
                if task in masked_patches:
                    patches = torch.cat(
                        [
                            selected_patches[task],
                            torch.zeros_like(masked_patches[task]),
                        ],
                        dim=1,
                    )
                    patches = unshuffle_patches(patches, perm_indices[task])
                    patches = unpatchify(patches, input_img_size, self.patch_size)
                    selected_patches[task] = (
                        patches  # overwrite selected patches with full size image of selected and masked (zeros)
                    )

        if return_as_dict:
            return_dict = {
                "selected_patches": selected_patches,
                "masked_patches": masked_patches,
                "perm_indices": perm_indices,
                "tokens": tokens,
                "encoder_tokens": encoder_tokens,
                "task_ranges": task_ranges,
                "reconstructed_patches": reconstructed_patches,
            }
            return return_dict
        else:
            return (
                selected_patches,
                masked_patches,
                perm_indices,
                tokens,
                encoder_tokens,
                task_ranges,
                reconstructed_patches,
            )


# =============================================================================
# Model creation function for MRI case
def create_multimae(
    cfg,
    img_size,
    patch_size,
    embed_dim,
    mask_ratio,
    num_global_tokens = 1,
    pos_embed_type = "sincos",
    depth = 12,
    num_heads = 12,
    use_seg_masking = False, 
    use_dirichlet = False,
    dirichlet_alpha = 1.0,
    leave_one_out = False,
    global_img_size = [240, 240, 160], 
    apply_pos_embed_to_context = False, 
    **kwargs,
):

    encoder_pos_embed_type = cfg.model.model_params.get(
        "encoder_pos_embed_type", pos_embed_type
    )

    # prepare input adpaters
    print(f"\nInstantiate input adapters")
    input_adapters = instantiate_input_adapters(cfg, **cfg.model.model_params)
    print(f"\nInstantiate output adapters")
    output_adapters = instantiate_output_adapters(cfg, **cfg.model.model_params)

    print(f"\n{' Instantiate MultiMAE3D '}")
    return MultiMAE3D(
        input_adapters=input_adapters,
        output_adapters=output_adapters,
        img_size=img_size,
        patch_size=patch_size,
        embed_dim=embed_dim,
        mask_ratio=mask_ratio,
        depth=depth,
        num_heads=num_heads,
        num_global_tokens=num_global_tokens,
        pos_embed_type=encoder_pos_embed_type,
        use_seg_masking=use_seg_masking, 
        use_dirichlet=use_dirichlet,
        dirichlet_alpha=dirichlet_alpha,
        leave_one_out=leave_one_out,
        global_img_size=global_img_size, 
        apply_pos_embed_to_context=apply_pos_embed_to_context, 
    )


def instantiate_input_adapters(
    cfg,
    img_size,
    patch_size,
    in_channels = 1,
    input_tasks = None,
    embed_dim = 768,
    input_has_learnable_embed = False,
    **kwawrgs,
):
    input_tasks = ( input_tasks if input_tasks is not None else cfg.datasets.sequences_to_use )

    training_type = get_training_type(cfg)

    input_adapters = []
    for task in input_tasks:
        task_in_channels = (
            cfg.model.model_params.num_seg_classes
            if (task == "seg")
            and (
                ("multilabel" in training_type)
                or cfg.model.model_params.get("multilabel", False)
            )
            else in_channels
        )
        adapter = PatchedInputAdapter(
            task_in_channels,
            img_size,
            patch_size,
            embed_dim=embed_dim,
            has_learnable_embed=input_has_learnable_embed,
        )
        input_adapters.append((task, adapter))
    return OrderedDict(input_adapters)


def instantiate_output_adapters(
    cfg,
    img_size,
    patch_size,
    out_channels = 1,
    output_tasks = None,
    decoder_embed_dim = 384,
    num_seg_classes = 3,
    decoder_pos_embed_type = None,
    decoder_depth = 3,
    decoeder_num_heads = 4,
    decoder_num_learnable_tokens = 0,
    decoder_has_learnable_embed = False,
    decoder_reconstruct_rate = 0.6,
    decoder_reconstruct_all_testing = True,
    attn_type = None,
    seg_output_adapter_type = "spatial",
    output_adapter_type = "spatial",
    segmenter_patch_div = 1,
    classifier = None,
    num_classes = None,
    unetr_use_input = False,
    setr_interpolation_mode = "nearest",
    setr_embed_dim = None,
    setr_version = 1,
    **kwargs,
):

    # training_type = get_training_type(cfg)
    adapter_opts = {
        "img_size": img_size,
        "patch_size": patch_size,
        "embed_dim": decoder_embed_dim,
        "num_heads": decoeder_num_heads,
        "depth": decoder_depth,
        "num_learnable_tokens": decoder_num_learnable_tokens,
        "has_learnable_embed": decoder_has_learnable_embed,
        # 'use_x_attn_block': use_x_attn_block,
        "attn_type": attn_type,
        "pos_embed_type": (
            decoder_pos_embed_type
            if decoder_pos_embed_type is not None
            else pos_embed_type
        ),
        "reconstruct_rate": decoder_reconstruct_rate,
        "reconstruct_all_testing": decoder_reconstruct_all_testing,
        "segmenter_patch_div": segmenter_patch_div,
        "unetr_use_input": unetr_use_input,
        "setr_interpolation_mode": setr_interpolation_mode,
        "setr_embed_dim": setr_embed_dim,
        "setr_version": setr_version,
        "num_classes": num_classes,
    }

    # for backwards compatibility
    ADAPTERS = {
        "spatial": SpatialOutputAdapter,
        "segmenter": SegmenterOutputAdapter,
        "setr": SETROutputAdapter,
        "unetr": UNETROutputAdapter,
        "conv_spatial": ConvSpatialOutputAdapter,
        "patch_conv_spatial": PatchConvSpatialOutputAdapter,
        "linear": LinearOutputAdapter,
    }

    output_tasks = (
        output_tasks if output_tasks is not None else cfg.datasets.sequences_to_use
    )
    output_adapters = []

    task_opts = {}
    for task, value in output_tasks.items():
        # assert isinstance(value, dict)
        task_adapter = value['type']
        task_opts = {task: value['opts']}
        output_tasks[task] = task_adapter
    output_tasks = [ (task, getattr(multimae3d_adapters, task_adapter)) for task, task_adapter in output_tasks.items() ]
    # if isinstance(output_tasks[0], str):
    #     output_tasks = [
    #         (
    #             task,
    #             ADAPTERS[
    #                 seg_output_adapter_type if task == "seg" else output_adapter_type
    #             ],
    #         )
    #         for task in output_tasks
    #     ]
    # else:
    #     output_tasks = resolve_listconfig_of_dicts(output_tasks)
    #     for task, value in output_tasks.items():
    #         if isinstance(value, ListConfig):
    #             value = resolve_listconfig_of_dicts(value)
    #             task_adapter = value["type"]
    #             task_opts[task] = {k: v for k, v in value.items() if k != "type"}
    #         else:
    #             task_adapter = value
    #             task_opts = {}
    #         output_tasks[task] = task_adapter
    #     output_tasks = [
    #         (task, getattr(multimae3d_adapters, task_adapter))
    #         for task, task_adapter in output_tasks.items()
    #     ]

    for task, adapter in output_tasks:
        print(f"- Initialize {task} output adapter:")
        print(f"\t- type: {adapter}")
        print(f"\t- opts: {task_opts.get(task, {})}")
        task_out_channels = num_seg_classes if task == "seg" else out_channels
        # task_in_channels = (
        #     num_seg_classes if "multilabel" in training_type and task == "seg" else 1
        # )
        task_in_channels = 1
        opts = task_opts.get(task, {})
        for key, value in adapter_opts.items():
            if key not in opts:
                opts[key] = value
        if "in_channels" not in opts:
            opts["in_channels"] = task_in_channels
        if "out_channels" not in opts:
            opts["out_channels"] = task_out_channels

        print(f"\t- expanded opts: {opts=}")

        adapter = adapter(**opts)
        output_adapters.append((task, adapter))

    if classifier == "linear":
        print(f"classifier")
        assert (
            num_classes is not None
        ), "classification adapter requires number of classes specified"
        adapter = LinearOutputAdapter(
            num_classes=num_classes,
        )
        output_adapters.append(("label", adapter))
    print(f"\nOutput Adapters: {[adapter[0] for adapter in output_adapters]}")

    return OrderedDict(output_adapters)
