# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
from timm.models import register_model

from utils.toolkit import NamespaceDict
from .vit_multi_SideNet import VisionTransformer as _VisionTransformer
from .utils import initialize_vit_model


class SideNet(nn.Module):
    def __init__(
        self,
        config: NamespaceDict,
        depth: int,
        device: torch.device,
    ):
        """
        Attri:
            peft_cfg: ex. {
                            "embed_dim": 768,
                            "ffn_rank": 64,
                            "adapter_layernorm_option": "in",
                            "adapter_scalar": 0.1,
                            }
            atten_cfg: ex. {
                            "dim": 64,
                            "num_heads": 16,
                            "qkv_bias": "False",
                            "attn_drop": 0.0,
                            "proj_drop": 0.0,
                            }
            fuse_layer_idx: start from 0, where 0-th repersent feature before block,
                            ex. [3,6,9,10,11,12]
        """
        super().__init__()
        self.peft_cfg = config.peft_cfg
        self.atten_cfg = config.atten_cfg
        self._device = device

        self.embed_dim = self.peft_cfg.embed_dim
        self.down_size = self.peft_cfg.ffn_rank

        self.fuse_layer_idx = [int(i) for i in config.fuse_layer_idx]
        self.depth = depth

        assert (
            self.atten_cfg.dim == config.peft_cfg.ffn_rank
        ), "atten dim != peft dim"
        assert self.depth == len(
            self.fuse_layer_idx
        ), "depth != len(fuse_layer_idx)"

        self.__init_layers()
        self.__init_weights()

    def __init_layers(self):
        self.side_downsample_anchor = nn.Sequential(
            nn.Linear(self.embed_dim, self.down_size),
            nn.LayerNorm(self.down_size),
        )
        self.side_downsamples = nn.ModuleList(
            [
                nn.Sequential(
                    nn.Linear(self.embed_dim, self.down_size),
                    nn.LayerNorm(self.down_size),
                )
                for _ in range(self.depth)
            ]
        )
        self.side_projection = nn.Sequential(
            nn.Linear(self.down_size, self.embed_dim),
            nn.ReLU(),
            nn.Linear(self.embed_dim, self.down_size),
            nn.LayerNorm(self.down_size),
        )
        self.side_final_upsample = nn.Sequential(
            nn.Linear(self.down_size, self.embed_dim)
        )
        self.side_gate_layer = nn.Linear(self.down_size, 1)

    def __init_weights(self):
        for p in self.parameters():
            p.requires_grad_(True)
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)
            else:
                nn.init.zeros_(p)

    def forward(self, ptm_feats):
        """The last three layers of features correspond to the last three blocks,
        and the previous features correspond to the remaining blocks through interpolation

        Args:
            ptrm_feats: features from transformer layers
                [batch, 1+nb_layers, len, embed_dim]
        """
        # [B, 1+nb_layers, L, D]
        ptm_feats = ptm_feats[:, [0, *self.fuse_layer_idx]]
        B, _, L, D = ptm_feats.shape
        all_downsample_features = torch.zeros(
            [B, self.depth + 1, L, self.down_size], device=self._device
        )
        assert all_downsample_features.size(1) == self.depth + 1

        all_downsample_features[:, -1, ...] = self.side_downsample_anchor(
            ptm_feats[:, -1, ...]
        )
        for ly_idx in range(self.depth):
            downsample_features = self.side_downsamples[ly_idx](
                ptm_feats[:, ly_idx, ...]
            )
            all_downsample_features[:, ly_idx, ...] = downsample_features

        confidence_set = []
        merged_features_set = []
        # select the transformer's last block's feature as anchor feature
        raw_anchor_features = ptm_feats[:, -1]
        anchor_features = all_downsample_features[:, -1]
        anchor = self.aggregate_feature(anchor_features)
        for ly_idx in range(self.depth):
            ly_idx += 1
            # [B, L, D]
            target_features = all_downsample_features[:, ly_idx]
            target = self.aggregate_feature(target_features)
            merged_features, _ = self.cross_attention(
                anchor_features, target_features
            )
            merged_features_set.append(merged_features)
            confidence_set.append(self.side_gate_layer(anchor * target))

        confidence_norm = torch.softmax(torch.cat(confidence_set, dim=1), dim=1)
        all_merged_features = torch.stack(merged_features_set, dim=1)
        merged_features = torch.sum(
            all_merged_features * confidence_norm[:, :, None, None], dim=1
        )
        output_features = anchor_features + self.side_projection(
            merged_features
        )

        output = raw_anchor_features + self.side_final_upsample(
            output_features
        )
        return {"output": output}

    def l1norm(self, X, dim, eps=1e-7):
        """L1-normalize columns of X"""
        norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
        X = torch.div(X, norm)
        return X

    def aggregate_feature(self, features, mask=None):

        if mask is None:
            agg_feature = features.mean(dim=1)
        else:
            agg_mask = mask.unsqueeze(-1).float()
            features = features * agg_mask
            agg_feature = features.sum(dim=1) / agg_mask.sum(dim=1)

        return agg_feature

    def cross_attention(
        self, query, context, mask=None, residual=True, **kwargs
    ):

        cross_weights = torch.matmul(query, context.permute(0, 2, 1))

        if mask is not None:
            cross_weights = cross_weights * mask.float().unsqueeze(1)

        cross_weights = self.l1norm(torch.relu(cross_weights), dim=-1)

        if residual:
            cross_weights += torch.eye(cross_weights.size(-1)).to(
                cross_weights.device
            )

        wcontext = torch.matmul(cross_weights, context)

        return wcontext, cross_weights


class VisionTransformer(_VisionTransformer):
    def get_new_module(self):
        """Reset self.cur_adapter for every new task."""
        mnc = SideNet(
            self.config.sidenet_cfg,
            depth=self.config.sidenet_cfg.depth,
            device=self._device,
        )
        self.cur_module = mnc
        self.cur_module.requires_grad_(True)


@register_model
def vit_base_patch16_224_UniPT(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_UniPT(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        **kwargs,
    )
