# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
from typing import List
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
import copy
from utils.toolkit import NamespaceDict
import timm
from functools import partial
from timm.models.vision_transformer import PatchEmbed
from timm.models.registry import register_model

from models.components.backbone.vit import interpolate_positional_embeddings
from models.components.backbone.vit import VisionTransformer as _VisionTransformer
from models.components.backbone._PEFTs import _PEFT_dict


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        global_pool=False,
        config: NamespaceDict = NamespaceDict(),
    ):
        super(VisionTransformer, self).__init__(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            num_classes=num_classes,
            embed_dim=embed_dim,
            depth=depth,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            representation_size=representation_size,
            distilled=distilled,
            drop_rate=drop_rate,
            attn_drop_rate=attn_drop_rate,
            drop_path_rate=drop_path_rate,
            embed_layer=embed_layer,
            norm_layer=norm_layer,
            act_layer=act_layer,
            weight_init=weight_init,
            global_pool=global_pool,
            config=config,
        )

        # ####### Adapter begins #########
        self.peft_func = _PEFT_dict[config.peft_name]
        self.adapter_list = []
        self.cur_adapter = nn.ModuleList()
        self.get_new_adapter()

    # TODO UNfinished!!!!!!!!!!!!!!!!!!!!!
    def forward(self, x, mode="") -> torch.Tensor:
        if mode == "cur":
            output = self.forward_train(x)
        elif mode == "all":
            features = self.forward_test(x)
            output = torch.Tensor().to(features[0].device)
            for x in features:
                cls = x[:, 0, :]
                output = torch.cat((output, cls), dim=1)
        else:
            raise ValueError("mode should be 'cur' or 'all'")
        return output

    def forward_train(self, x) -> torch.Tensor:
        x = self.forward_token(x)
        return x

    def forward_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)

        for idx, blk in enumerate(self.blocks):
            x = blk(x, adapt=self.cur_adapter[idx])

        x = self.norm(x)

        return x

    def forward_test(self, x) -> List[torch.Tensor]:
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        features = []
        for i in range(len(self.adapter_list)):
            x = copy.deepcopy(x_init)
            for j in range(len(self.blocks)):
                adapt = self.adapter_list[i][j]
                x = self.blocks[j](x, adapt)
            x = self.norm(x)
            features.append(x)

        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            adapt = self.cur_adapter[i]
            x = self.blocks[i](x, adapt)
        x = self.norm(x)
        features.append(x)

        return features

    def forward_proto(self, x, adapt_index):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        # the init_PTM's feature
        if adapt_index == -1:
            x = copy.deepcopy(x_init)
            x = self.blocks(x)
            x = self.norm(x)
            output = x[:, 0, :]
            return output

        i = adapt_index
        x = copy.deepcopy(x_init)
        for j in range(len(self.blocks)):
            if i < len(self.adapter_list):
                adapt = self.adapter_list[i][j]
            else:
                adapt = self.cur_adapter[j]
            x = self.blocks[j](x, adapt)
        x = self.norm(x)
        output = x[:, 0, :]

        return output

    def after_task(self):
        self.freeze()
        self.add_adapter_to_list()

    def get_new_adapter(self):
        config = self.config
        self.cur_adapter = nn.ModuleList()
        if config.ffn_adapt:
            for i in range(len(self.blocks)):
                adapter = self.peft_func(
                    self.config,
                    embed_dim=config.embed_dim,
                    bottleneck=config.ffn_rank,
                    dropout=0.1,
                    adapter_scalar=config.ffn_adapter_scalar,
                    adapter_layernorm_option=config.ffn_adapter_layernorm_option,
                ).to(self._device)
                self.cur_adapter.append(adapter)
            self.cur_adapter.requires_grad_(True)
        else:
            print("====Not use adapter===")

    def add_adapter_to_list(self):
        self.adapter_list.append(copy.deepcopy(self.cur_adapter.requires_grad_(False)))
        self.get_new_adapter()

    def freeze(self):
        for param in self.parameters():
            param.requires_grad = False

        self.cur_adapter.requires_grad_(True)


@register_model
def vit_base_patch16_224_ease(pretrained=False, **kwargs):

    model = VisionTransformer(
        patch_size=16,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4,
        qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        **kwargs,
    )

    # checkpoint_model = torch.load('./pretrained_models/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz')
    checkpoint_model = timm.create_model(
        "vit_base_patch16_224", pretrained=True, num_classes=0
    )
    state_dict = checkpoint_model.state_dict()
    # modify the checkpoint state dict to match the model
    # first, split qkv weight into q, k, v
    for key in list(state_dict.keys()):
        if "qkv.weight" in key:
            qkv_weight = state_dict.pop(key)
            q_weight = qkv_weight[:768]
            k_weight = qkv_weight[768 : 768 * 2]
            v_weight = qkv_weight[768 * 2 :]
            state_dict[key.replace("qkv.weight", "q_proj.weight")] = q_weight
            state_dict[key.replace("qkv.weight", "k_proj.weight")] = k_weight
            state_dict[key.replace("qkv.weight", "v_proj.weight")] = v_weight
        elif "qkv.bias" in key:
            qkv_bias = state_dict.pop(key)
            q_bias = qkv_bias[:768]
            k_bias = qkv_bias[768 : 768 * 2]
            v_bias = qkv_bias[768 * 2 :]
            state_dict[key.replace("qkv.bias", "q_proj.bias")] = q_bias
            state_dict[key.replace("qkv.bias", "k_proj.bias")] = k_bias
            state_dict[key.replace("qkv.bias", "v_proj.bias")] = v_bias
    # second, modify the mlp.fc.weight to match fc.weight
    for key in list(state_dict.keys()):
        if "mlp.fc" in key:
            fc_weight = state_dict.pop(key)
            state_dict[key.replace("mlp.", "")] = fc_weight

    if model.img_size == 1024:
        new_pos_embed = interpolate_positional_embeddings(state_dict["pos_embed"])
        state_dict["pos_embed"] = new_pos_embed
    msg = model.load_state_dict(state_dict, strict=False)
    print("Missing keys: ", msg.missing_keys)
    print("Unexpected keys: ", msg.unexpected_keys)

    # freeze all but the adapter
    for name, p in model.named_parameters():
        if name in msg.missing_keys:
            p.requires_grad = True
        else:
            p.requires_grad = False
    return model


@register_model
def vit_base_patch16_224_in21k_ease(pretrained=False, **kwargs):

    model = VisionTransformer(
        patch_size=16,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4,
        qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        **kwargs,
    )

    # checkpoint_model = torch.load('./pretrained_models/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz')
    checkpoint_model = timm.create_model(
        "vit_base_patch16_224_in21k", pretrained=True, num_classes=0
    )
    state_dict = checkpoint_model.state_dict()
    # modify the checkpoint state dict to match the model
    # first, split qkv weight into q, k, v
    for key in list(state_dict.keys()):
        if "qkv.weight" in key:
            qkv_weight = state_dict.pop(key)
            q_weight = qkv_weight[:768]
            k_weight = qkv_weight[768 : 768 * 2]
            v_weight = qkv_weight[768 * 2 :]
            state_dict[key.replace("qkv.weight", "q_proj.weight")] = q_weight
            state_dict[key.replace("qkv.weight", "k_proj.weight")] = k_weight
            state_dict[key.replace("qkv.weight", "v_proj.weight")] = v_weight
        elif "qkv.bias" in key:
            qkv_bias = state_dict.pop(key)
            q_bias = qkv_bias[:768]
            k_bias = qkv_bias[768 : 768 * 2]
            v_bias = qkv_bias[768 * 2 :]
            state_dict[key.replace("qkv.bias", "q_proj.bias")] = q_bias
            state_dict[key.replace("qkv.bias", "k_proj.bias")] = k_bias
            state_dict[key.replace("qkv.bias", "v_proj.bias")] = v_bias
    # second, modify the mlp.fc.weight to match fc.weight
    for key in list(state_dict.keys()):
        if "mlp.fc" in key:
            fc_weight = state_dict.pop(key)
            state_dict[key.replace("mlp.", "")] = fc_weight

    if model.img_size == 1024:
        new_pos_embed = interpolate_positional_embeddings(state_dict["pos_embed"])
        state_dict["pos_embed"] = new_pos_embed
    msg = model.load_state_dict(state_dict, strict=False)
    print("Missing keys: ", msg.missing_keys)
    print("Unexpected keys: ", msg.unexpected_keys)

    # freeze all but the adapter
    for name, p in model.named_parameters():
        if name in msg.missing_keys:
            p.requires_grad = True
        else:
            p.requires_grad = False
    return model
