# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
from timm.models import register_model
from .vit import Block
from utils.toolkit import NamespaceDict
from .vit_multi_SideNet import VisionTransformer as _VisionTransformer
from .utils import initialize_vit_model


class TransformerSideNet(nn.Module):
    def __init__(
        self,
        config: NamespaceDict,
        depth: int,
        device: torch.device,
    ):
        """
        使用预训练 Transformer Block 替换原始 SideNet

        Args:
            config: 配置参数
            depth: SideNet 深度
            device: 设备
        """
        super().__init__()
        self._device = device

        self.fuse_layer_idx = [int(i) for i in config.fuse_layer_idx]
        self.depth = depth
        # 是否保存最初通过patch embedding时的特征？
        # 是：则是13个特征，不是：则是12个特征
        # 默认为 True
        self.keep_raw = config.get("keep_raw", False)
        self.norm = nn.LayerNorm(768, eps=1e-6)

        self.__init_layers()
        self.__init_weights()

    def __init_layers(self):
        # 使用预训练的 Transformer Block 作为核心处理模块
        # 这里创建多个 transformer block 来处理不同层的特征
        self.layers = nn.ModuleList(
            [
                Block(
                    dim=768,
                    num_heads=12,
                    mlp_ratio=4.0,
                    qkv_bias=True,
                )
                for i in range(self.depth)
            ]
        )

    def __init_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                if module.bias is not None:
                    nn.init.zeros_(module.bias)
            elif isinstance(module, nn.LayerNorm):
                nn.init.ones_(module.weight)
                nn.init.zeros_(module.bias)

    def forward(self, ptm_feats):
        """
        使用预训练 Transformer Block 处理特征

        Args:
            ptm_feats: [batch, 1+nb_layers, len, embed_dim]

        Returns:
            dict: {"output": processed_features}
        """
        # 选择指定层的特征
        if not self.keep_raw:
            ptm_feats = ptm_feats[:, 1:]

        assert ptm_feats.shape[1] > self.depth

        B, _, L, D = ptm_feats.shape
        popro_feats = torch.zeros([B, self.depth, L, D], device=self._device)

        # FIXME, try to remove the useless initialization
        o_side = ptm_feats[:, self.fuse_layer_idx[0], ...]

        for ly_idx, layer in enumerate(self.layers):
            o_side = ptm_feats[:, self.fuse_layer_idx[ly_idx], ...]
            o_side = layer(o_side)
            popro_feats[:, ly_idx, ...] = o_side
            try:
                o_side += ptm_feats[:, self.fuse_layer_idx[ly_idx + 1], ...]
            except IndexError:
                final_index = self.fuse_layer_idx[-1]
                o_side += ptm_feats[:, final_index + 1, ...]

        o_side = self.norm(o_side)

        return {"output": o_side, "hidden": popro_feats}

    def freeze_norm(self):
        """冻结 LayerNorm 层的权重"""
        for layer in self.layers:
            if hasattr(layer, "norm1"):
                layer.norm1.weight.requires_grad = False
                layer.norm1.bias.requires_grad = False
            if hasattr(layer, "norm2"):
                layer.norm2.weight.requires_grad = False
                layer.norm2.bias.requires_grad = False

        self.norm.weight.requires_grad = False
        self.norm.bias.requires_grad = False

    def load_pretrained_weights(self, pretrained_blocks, selected_layer_indices=None):
        """
        从预训练的 Transformer blocks 加载权重

        Args:
            pretrained_blocks: 预训练的 transformer blocks (nn.Sequential 或 nn.ModuleList)
            selected_layer_indices: 选择哪些层的权重来初始化，如果为None则按顺序选择
        """
        if selected_layer_indices is None:
            # 默认选择最后几层
            total_pretrained_layers = len(pretrained_blocks)
            start_idx = max(0, total_pretrained_layers - self.depth)
            selected_layer_indices = list(range(start_idx, total_pretrained_layers))

        print(f"Loading pretrained weights from layers {selected_layer_indices}")

        for i, layer_idx in enumerate(selected_layer_indices[: self.depth]):
            if i <= len(self.layers) and layer_idx < len(pretrained_blocks):
                source_block = pretrained_blocks[layer_idx]
                target_block = self.layers[i]

                # 复制注意力权重
                self._copy_attention_weights(source_block.attn, target_block.attn)

                # 复制MLP权重 - 根据源块和目标块的实际结构
                # 源块：直接有fc1, fc2属性
                # 目标块：检查是否有mlp模块
                if hasattr(target_block, "mlp"):
                    # 目标块有mlp模块，从源块的fc1, fc2复制到目标块的mlp.fc1, mlp.fc2
                    self._copy_mlp_weights_from_block_to_module(
                        source_block, target_block.mlp
                    )
                else:
                    # 目标块也直接有fc1, fc2属性
                    self._copy_mlp_weights_from_block_to_block(
                        source_block, target_block
                    )

                # 复制LayerNorm权重
                self._copy_layernorm_weights(source_block.norm1, target_block.norm1)
                self._copy_layernorm_weights(source_block.norm2, target_block.norm2)

                print(f"  Loaded weights: layer {i} <- pretrained layer {layer_idx}")

    def _copy_attention_weights(self, source_attn, target_attn):
        """复制注意力模块的权重"""
        # 源注意力使用分离的q_proj, k_proj, v_proj权重，直接复制
        target_attn.q_proj.weight.data.copy_(source_attn.q_proj.weight.data)
        target_attn.k_proj.weight.data.copy_(source_attn.k_proj.weight.data)
        target_attn.v_proj.weight.data.copy_(source_attn.v_proj.weight.data)

        # 处理偏置
        if hasattr(source_attn.q_proj, "bias") and source_attn.q_proj.bias is not None:
            if (
                hasattr(target_attn.q_proj, "bias")
                and target_attn.q_proj.bias is not None
            ):
                target_attn.q_proj.bias.data.copy_(source_attn.q_proj.bias.data)
                target_attn.k_proj.bias.data.copy_(source_attn.k_proj.bias.data)
                target_attn.v_proj.bias.data.copy_(source_attn.v_proj.bias.data)

        # 复制输出投影权重
        target_attn.proj.weight.data.copy_(source_attn.proj.weight.data)
        if hasattr(source_attn.proj, "bias") and source_attn.proj.bias is not None:
            if hasattr(target_attn.proj, "bias") and target_attn.proj.bias is not None:
                target_attn.proj.bias.data.copy_(source_attn.proj.bias.data)

    def _copy_mlp_weights_from_block_to_module(self, source_block, target_mlp):
        """从block的fc1, fc2属性复制权重到mlp模块"""
        # 复制 fc1 权重 (源块的fc1 -> 目标mlp的fc1)
        if hasattr(source_block, "fc1") and hasattr(target_mlp, "fc1"):
            target_mlp.fc1.weight.data.copy_(source_block.fc1.weight.data)
            if hasattr(source_block.fc1, "bias") and source_block.fc1.bias is not None:
                target_mlp.fc1.bias.data.copy_(source_block.fc1.bias.data)

        # 复制 fc2 权重 (源块的fc2 -> 目标mlp的fc2)
        if hasattr(source_block, "fc2") and hasattr(target_mlp, "fc2"):
            target_mlp.fc2.weight.data.copy_(source_block.fc2.weight.data)
            if hasattr(source_block.fc2, "bias") and source_block.fc2.bias is not None:
                target_mlp.fc2.bias.data.copy_(source_block.fc2.bias.data)

    def _copy_mlp_weights_from_block_to_block(self, source_block, target_block):
        """从block的fc1, fc2属性复制权重到另一个block的fc1, fc2属性"""
        # 复制 fc1 权重
        if hasattr(source_block, "fc1") and hasattr(target_block, "fc1"):
            target_block.fc1.weight.data.copy_(source_block.fc1.weight.data)
            if hasattr(source_block.fc1, "bias") and source_block.fc1.bias is not None:
                target_block.fc1.bias.data.copy_(source_block.fc1.bias.data)

        # 复制 fc2 权重
        if hasattr(source_block, "fc2") and hasattr(target_block, "fc2"):
            target_block.fc2.weight.data.copy_(source_block.fc2.weight.data)
            if hasattr(source_block.fc2, "bias") and source_block.fc2.bias is not None:
                target_block.fc2.bias.data.copy_(source_block.fc2.bias.data)

    def _copy_layernorm_weights(self, source_norm, target_norm):
        """复制 LayerNorm 模块的权重"""
        target_norm.weight.data.copy_(source_norm.weight.data)
        target_norm.bias.data.copy_(source_norm.bias.data)


class VisionTransformer(_VisionTransformer):
    def before_task(self):

        super().before_task()

        print("Initializing TransformerSideNet with pretrained weights...")

        # 获取预训练的 transformer blocks
        pretrained_blocks = self.blocks

        # 根据 fuse_layer_idx 选择对应的层来初始化
        selected_indices = self.cur_module.fuse_layer_idx
        # 加载预训练权重
        self.cur_module.load_pretrained_weights(
            pretrained_blocks, selected_layer_indices=selected_indices
        )
        self.cur_module.norm.weight.data.copy_(self.norm.weight.data)
        self.cur_module.norm.bias.data.copy_(self.norm.bias.data)

        print("Successfully initialized TransformerSideNet with pretrained weights!")

        self.cur_module.requires_grad_(True)
        self.cur_module.freeze_norm()

    def get_new_module(self):
        """为每个新任务重置适配器模块"""
        transformer_sidenet = TransformerSideNet(
            self.config.sidenet_cfg,
            depth=self.config.sidenet_cfg.depth,
            device=self._device,
        )
        self.cur_module = transformer_sidenet
        self.cur_module.requires_grad_(True)
        self.cur_module.freeze_norm()


"""
Only last block

"""


@register_model
def vit_base_patch16_224_sTrans(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_sTrans(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        **kwargs,
    )
