# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
import copy
from timm.models.registry import register_model
from utils.toolkit import NamespaceDict

from .vit_multi import VisionTransformer as _VisionTransformer
from models.components.backbone.vit import Block
from models.components.backbone.utils import initialize_vit_model


class SideNet(nn.Module):
    def __init__(
        self,
        config: NamespaceDict,
        depth: int,
        device: torch.device,
    ):
        """
        Attri:
            block_cfg: ex. {
                            "dim": 768,
                            "num_heads": 4
                            "mlp_ratio": 1.0
                            "qkv_bias": False
                            "drop": 0.0
                            "attn_drop": 0.0,
                            "drop_path": 0.0
                            }
            fuse_layer_idx: start from 0, where 0-th repersent feature before block,
                            ex. [3,6,9,10,11,12]
        """
        super().__init__()
        self.embed_dim = config.block_cfg.dim
        self.block_cfg = config.block_cfg
        self.fuse_layer_idx = [int(i) for i in config.fuse_layer_idx]
        self.depth = depth
        self._device = device
        assert self.depth == len(self.fuse_layer_idx), "depth != len(fuse_layer_idx)"

        self.layers = self._init_layers()
        self.keep_raw = False

    def _init_layers(self):
        layers = nn.ModuleList([Block(**self.block_cfg) for _ in range(self.depth)])
        return layers

    def forward(self, ptm_feats):
        """The last three layers of features correspond to the last three blocks,
        and the previous features correspond to the remaining blocks through interpolation

        Args:
            ptrm_feats: features from transformer layers
                [batch, 1+nb_layers, len, embed_dim]
        """
        ptm_feats = ptm_feats[:, [0, *self.fuse_layer_idx]]
        B, _, L, D = ptm_feats.shape
        popro_feats = torch.zeros([B, self.depth, L, D], device=self._device)
        # [B, L, D]
        o_ptm = ptm_feats[:, -1, ...]
        o_side = copy.deepcopy(o_ptm)
        for ly_idx, layer in enumerate(self.layers):
            feat_idx = ly_idx if self.keep_raw else ly_idx + 1
            o_fuse = o_side + ptm_feats[:, feat_idx, ...]
            o_side = layer(o_fuse)
            popro_feats[:, ly_idx, ...] = o_side

        return {"output": o_side, "hidden": popro_feats}


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling"""

    def forward_cur_feats(self, x):
        B = x.shape[0]

        x = self.patch_embed(x)
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        ptm_feats = copy.deepcopy(x_init).unsqueeze(1)
        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            x = self.blocks[i](x, adapt=None)
            ptm_feats = torch.cat([ptm_feats, x[:, None, ...]], dim=1)
        ptm_feats = ptm_feats.clone().detach()
        o_feats = self.cur_module(ptm_feats)
        x = o_feats["output"]
        return x[:, None, :]

    def forward_all_feats(self, x):
        B = x.shape[0]

        x = self.patch_embed(x)
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        ptm_feats = copy.deepcopy(x_init).unsqueeze(1)
        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            x = self.blocks[i](x, adapt=None)
            ptm_feats = torch.cat([ptm_feats, x[:, None, ...]], dim=1)
        ptm_feats = ptm_feats.clone().detach()
        """Forward task features
        """
        task_feats = []
        with torch.no_grad():
            for i in range(len(self.module_list)):
                hidden_feats = copy.deepcopy(ptm_feats)
                tmp_feats = self.module_list[i](hidden_feats)
                # hidden_feats = tmp_feats["hidden"]
                task_feats.append(tmp_feats["output"][:, None, ...])

        hidden_feats = copy.deepcopy(ptm_feats)
        o_feats = self.cur_module(hidden_feats)
        task_feats.append(o_feats["output"][:, None, ...])

        task_feats = torch.cat(task_feats, dim=1)
        return task_feats

    def get_new_module(self):
        """Reset self.cur_adapter for every new task."""
        mnc = SideNet(
            self.config.sidenet_cfg,
            depth=self.config.sidenet_cfg.depth,
            device=self._device,
        )
        self.cur_module = mnc
        self.cur_module.requires_grad_(True)


@register_model
def vit_base_patch16_224_SideNet(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_SideNet(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224_in21k",
        VisionTransformer,
        **kwargs,
    )
