import logging

logger = logging.getLogger(__name__)

from typing import Union, List, Tuple, Dict, Optional
import copy

import torch
import torch.nn as nn
from timm.layers import trunc_normal_
from timm.layers.helpers import to_3tuple
from timm.models.vision_transformer import Block
from einops import rearrange
from collections import OrderedDict
from functools import partial
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
import math
from models.multimae3d_utils import (
    calc_patchified_dim,
    patchify,
    unpatchify,
    shuffle_patches,
    unshuffle_patches,
    generate_dirichlet_masked,
    sample_random_key,
    build_position_embedding,
    get_batch_pos_embed,
    mask_data,
)
from util.setup_functions import resolve_listconfig_of_dicts
from models.multimae3d_adapters import (
    PatchedInputAdapter,
    SpatialOutputAdapter,
    SegmenterOutputAdapter,
    SETROutputAdapter,
    UNETROutputAdapter,
    ConvSpatialOutputAdapter,
    PatchConvSpatialOutputAdapter,
    LinearOutputAdapter,
)
from models import multimae3d_adapters
from util.setup_functions import get_training_type



class TransorformerEncoder(torch.nn.Module):
    def __init__(
        self,
        embed_dim: int = 768,
        num_heads: int = 12,
        depth: int = 12,
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,  # as MultiMAE
        drop_path_rate: float = 0.0,
        attn_drop_rate: float = 0.0,
        norm_layer: nn.Module = partial(nn.LayerNorm, eps=1e-6),
        *args,
        **kwargs,
    ) -> None:
        super().__init__(*args, **kwargs)

        # =====================================================================
        # init encoder transformer blocks
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
        self.encoder = (
            nn.Sequential(
                *[
                    Block(
                        dim=embed_dim,
                        num_heads=num_heads,
                        mlp_ratio=mlp_ratio,
                        qkv_bias=qkv_bias,
                        attn_drop=attn_drop_rate,
                        drop_path=dpr[i],
                        act_layer=nn.GELU,  # as MultiMAE
                        norm_layer=norm_layer,  # as MultiMAE
                    )
                    for i in range(depth)
                ]
            )
            if depth > 0
            else None
        )

        # TODO: Weight initialization procedures
        self.initialize_weights()

    # =====================================================================
    # Copied from original MultiMAE repository, modified for 3D application
    # https://github.com/EPFL-VILAB/MultiMAE
    def initialize_weights(self):

        self.apply(self._init_weights)
        for name, m in self.named_modules():
            if isinstance(m, nn.Linear):
                if "qkv" in name:
                    # treat the weights of Q, K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
                elif "kv" in name:
                    # treat the weights of K, V separately
                    val = math.sqrt(
                        6.0 / float(m.weight.shape[0] // 2 + m.weight.shape[1])
                    )
                    nn.init.uniform_(m.weight, -val, val)
            if isinstance(m, nn.Conv3d):
                if ".proj" in name:
                    # From MAE, initialize projection like nn.Linear (instead of nn.Conv2d)
                    w = m.weight.data
                    nn.init.xavier_uniform_(w.view([w.shape[0], -1]))

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    # =====================================================================

    def forward(self,input_tokens):
        # =====================================================================
        # Pass through transformer
        encoder_tokens = []
        x_hat = input_tokens
        if self.encoder is None:
            encoder_tokens = [input_tokens]
        else:
            for encoder_block in self.encoder:
                x_hat = encoder_block(x_hat)
                encoder_tokens.append(x_hat)

        return encoder_tokens


def create_vit_encoder():
    return TransorformerEncoder()
       