# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
import copy
from timm.models.registry import register_model

from .vit_multi import VisionTransformer as _VisionTransformer
from models.components.backbone.utils import initialize_vit_model


class VisionTransformer(_VisionTransformer):

    def forward_all_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        features = []
        # backbone feature
        with torch.no_grad():
            x = copy.deepcopy(x_init)
            for j in range(len(self.blocks)):
                x = self.blocks[j](x, adapt=None)
            x = self.norm(x)
            features.append(x)
        # old task feature
        with torch.no_grad():
            for i in range(len(self.module_list)):
                x = copy.deepcopy(x_init)
                for j in range(len(self.blocks)):
                    adapt = self.module_list[i][j]  # type: ignore
                    x = self.blocks[j](x, adapt)
                x = self.norm(x)
                features.append(x)
        # current task feature
        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            adapt = self.cur_module[i]
            x = self.blocks[i](x, adapt)
        x = self.norm(x)
        features.append(x)
        # [B, T, L, D]
        return torch.stack(features, dim=1)

    forward_cur_feats = forward_all_feats


@register_model
def vit_base_patch16_224_multiNetWbb(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_multiNetWbb(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224_in21k",
        VisionTransformer,
        **kwargs,
    )
