# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
import copy
import math
from timm.models.registry import register_model

from utils.toolkit import NamespaceDict
from .vit_multi_SideNet import VisionTransformer as _VisionTransformer
from models.components.backbone.vit import Attention
from models.components.backbone.utils import initialize_vit_model


class KVCacheAttention(Attention):
    def __init__(
        self,
        dim,
        num_heads=8,
        qkv_bias=False,
        attn_drop=0.0,
        proj_drop=0.0,
    ):
        super().__init__(dim, num_heads, qkv_bias, attn_drop, proj_drop)
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.head_dim = dim // num_heads
        self.scale = head_dim**-0.5

        self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
        # [B, L, num_heads, head_dim]
        self.k = nn.Parameter(torch.Tensor(1, 1, num_heads, head_dim))
        self.v = nn.Parameter(torch.Tensor(1, 1, num_heads, head_dim))

        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

        self.__init_weights()

    def __init_weights(self):
        nn.init.xavier_uniform_(self.q_proj.weight)
        nn.init.xavier_uniform_(self.k)
        nn.init.xavier_uniform_(self.v)
        nn.init.xavier_uniform_(self.proj.weight)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return (
            tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
            .transpose(1, 2)
            .contiguous()
        )

    def forward(self, x):
        B, N, C = x.shape

        q = self.q_proj(x)
        k = self._shape(self.k.repeat(B, N, 1, 1), N, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        v = self._shape(self.v.repeat(B, N, 1, 1), N, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        q = self._shape(q, N, B).view(B * self.num_heads, -1, self.head_dim)

        # attn = (q @ k.transpose(-2, -1)) * self.scale
        attn_weights = torch.bmm(q, k.transpose(1, 2)) * self.scale

        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        attn_probs = self.attn_drop(attn_weights)
        attn_output = torch.bmm(attn_probs, v)

        attn_output = attn_output.view(B, self.num_heads, N, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(B, N, C)

        x = self.proj(attn_output)
        x = self.proj_drop(x)

        return x


class DownSideNetModule(nn.Module):
    def __init__(
        self,
        peft_cfg: NamespaceDict,
        atten_cfg: NamespaceDict,
        device: torch.device,
    ):
        """
        Attri:
            peft_cfg: ex. {
                            "embed_dim": 768,
                            "ffn_rank": 64,
                            "adapter_layernorm_option": "in",
                            "adapter_scalar": 0.1,
                            }
            atten_cfg: ex. {
                            "dim": 64,
                            "num_heads": 16,
                            "qkv_bias": "False",
                            "attn_drop": 0.0,
                            "proj_drop": 0.0,
                            }
        """
        super().__init__()
        self.peft_cfg = peft_cfg
        self.atten_cfg = atten_cfg
        self._device = device
        assert (
            self.atten_cfg.dim == self.peft_cfg.ffn_rank
        ), "atten dim != peft rank"

        # peft method initialization
        self.embed_dim = self.peft_cfg.embed_dim
        self.down_size = self.peft_cfg.ffn_rank
        self.adapter_layernorm_option = self.peft_cfg.adapter_layernorm_option
        adapter_scalar = self.peft_cfg.adapter_scalar
        if (
            self.adapter_layernorm_option == "in"
            or self.adapter_layernorm_option == "out"
        ):
            self.adapter_layer_norm = nn.LayerNorm(self.embed_dim)
            self.adapter_layer_norm.reset_parameters()
        if adapter_scalar == "learnable_scalar":
            self.scale = nn.Parameter(torch.ones(1))
        else:
            self.scale = float(adapter_scalar)
        self.down_proj = nn.Linear(self.embed_dim, self.down_size, bias=True)
        self.up_proj = nn.Linear(self.down_size, self.embed_dim, bias=True)

        # attention module
        self.atten = Attention(**self.atten_cfg)
        self.act_func = nn.GELU()
        # self.act_func = nn.Identity()

        with torch.no_grad():
            nn.init.kaiming_uniform_(self.down_proj.weight, a=math.sqrt(5))
            nn.init.kaiming_uniform_(self.up_proj.weight, a=math.sqrt(5))

    def forward(self, x):
        """Forward pass of the module
        Args:
            x: input tensor [batch, len, embed_dim]
        """
        residual = x

        if self.adapter_layernorm_option == "in":
            x = self.adapter_layer_norm(x)

        down = self.down_proj(x)
        down_atten = self.atten(down)
        # down_atten = self.act_func(down_atten)
        up = self.up_proj(down_atten)
        up = up * self.scale

        if self.adapter_layernorm_option == "out":
            up = self.adapter_layer_norm(up)

        output = up + residual
        return output


class SideNet(nn.Module):
    def __init__(
        self,
        config: NamespaceDict,
        depth: int,
        device: torch.device,
    ):
        """
        Attri:
            peft_cfg: ex. {
                            "embed_dim": 768,
                            "ffn_rank": 64,
                            "adapter_layernorm_option": "in",
                            "adapter_scalar": 0.1,
                            }
            atten_cfg: ex. {
                            "dim": 64,
                            "num_heads": 16,
                            "qkv_bias": "False",
                            "attn_drop": 0.0,
                            "proj_drop": 0.0,
                            }
            fuse_layer_idx: start from 0, where 0-th repersent feature before block,
                            ex. [3,6,9,10,11,12]
        """
        super().__init__()
        self.peft_cfg = config.peft_cfg
        self.atten_cfg = config.atten_cfg
        self._device = device

        self.fuse_layer_idx = [int(i) for i in config.fuse_layer_idx]
        self.embed_dim = self.peft_cfg.embed_dim
        self.depth = depth

        assert (
            self.atten_cfg.dim == config.peft_cfg.ffn_rank
        ), "atten dim != peft dim"
        assert self.depth == len(
            self.fuse_layer_idx
        ), "depth != len(fuse_layer_idx)"

        self.layers = self._init_layers()
        self.keep_raw = True  # if the raw feat is in the ptm_feats

    def _init_layers(self):
        layers = nn.ModuleList(
            [
                DownSideNetModule(self.peft_cfg, self.atten_cfg, self._device)
                for _ in range(self.depth)
            ]
        )
        return layers

    def forward(self, ptm_feats):
        """The last three layers of features correspond to the last three blocks,
        and the previous features correspond to the remaining blocks through interpolation

        Args:
            ptrm_feats: features from transformer layers
                [batch, 1+nb_layers, len, embed_dim]
        """
        ptm_feats = ptm_feats[:, [0, *self.fuse_layer_idx]]
        B, _, L, D = ptm_feats.shape
        popro_feats = torch.zeros([B, self.depth, L, D], device=self._device)
        # [B, L, D]
        o_ptm = ptm_feats[:, 0, ...] + ptm_feats[:, -1, ...]
        o_side = copy.deepcopy(o_ptm)
        for ly_idx, layer in enumerate(self.layers):

            feat_idx = ly_idx + 1 if self.keep_raw else ly_idx
            o_fuse = o_side + ptm_feats[:, feat_idx, ...]
            o_side = layer(o_fuse)

            popro_feats[:, ly_idx, ...] = o_side
        # popro_feats[:, -1, ...] = popro_feats[:, -1, ...] + ptm_feats[:, -1, ...]
        return {"output": o_side, "hidden": popro_feats}


class VisionTransformer(_VisionTransformer):
    def get_new_module(self):
        """Reset self.cur_adapter for every new task."""
        mnc = SideNet(
            self.config.sidenet_cfg,
            depth=self.config.sidenet_cfg.depth,
            device=self._device,
        )
        self.cur_module = mnc
        self.cur_module.requires_grad_(True)


@register_model
def vit_base_patch16_224_downSideNet(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_downSideNet(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224_in21k",
        VisionTransformer,
        **kwargs,
    )
