# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
import copy
from timm.models import register_model
from utils.toolkit import NamespaceDict
from typing import Callable

from .vit_multi import VisionTransformer as _VisionTransformer
from .utils import initialize_vit_model


class MicroNC(nn.Module):
    def __init__(
        self,
        config: NamespaceDict,
        peft_func: Callable,
        depth: int = 12,
        transpose: bool = True,
    ):
        super().__init__()
        self.config = config
        self.peft_func = peft_func
        self._device = config["_device"]

        self.embed_dim = config.embed_dim
        self.transpose = transpose
        self.depth = depth

        trans_kwargs = {
            "config": config,
            "embed_dim": 197,
            "bottleneck": config["ffn_rank"],
            "dropout": 0.2,
            "adapter_scalar": config["ffn_adapter_scalar"],
            "adapter_layernorm_option": config["ffn_adapter_layernorm_option"],
        }
        self.trans_kwargs = trans_kwargs

        ad_kwargs = {
            "config": config,
            "embed_dim": self.embed_dim,
            "bottleneck": config["ffn_rank"],
            "dropout": 0.2,
            "adapter_scalar": config["ffn_adapter_scalar"],
            "adapter_layernorm_option": config["ffn_adapter_layernorm_option"],
        }
        self.ad_kwargs = ad_kwargs

        self.layers = self._init_layers()
        self.norm = nn.LayerNorm(self.embed_dim) if config.mnc_norm else nn.Identity()
        if isinstance(self.norm, nn.LayerNorm):
            self.norm.reset_parameters()

    def _init_layers(self):
        layers = nn.ModuleList(
            [
                nn.ModuleList(
                    [
                        self.peft_func(**self.trans_kwargs),
                        self.peft_func(**self.ad_kwargs),
                    ]
                )
                for _ in range(self.depth)
            ]
        )
        return layers

    def forward(self, ptm_feats):
        """
        Args:
            ptrm_feats: features from transformer layers
                [batch, nb_layers, len, embed_dim]

        ao_(i + 1) = PEFT(lo_i + ao_i) + ao_i
        """
        popro_feats = torch.zeros_like(ptm_feats)
        ptm_feats = ptm_feats[:, -self.depth:]
        o_ptm = ptm_feats[:, -1, ...]
        o_side = copy.deepcopy(o_ptm)
        for ly_idx, layer in enumerate(self.layers):
            o_ml = o_side + ptm_feats[:, ly_idx, ...]
            # attention
            # o_ml = o_ml.transpose(1, 2)
            # oc = layer[0](o_ml, add_residual=True, residual=o_ml)  # type: ignore
            # o_feat = oc.transpose(1, 2)
            o_feat = o_ml

            # ffn
            o = layer[1](o_feat, add_residual=True, residual=o_feat)  # type: ignore

            o_side = o
            popro_feats[:, ly_idx, ...] = o

        o_side = self.norm(o_side)
        return {"output": o_side, "hidden": popro_feats}


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling
    """

    def forward_cur_feats(self, x):
        B = x.shape[0]
        ptm_feats = torch.Tensor().to(x.device)

        x = self.patch_embed(x)
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            x = self.blocks[i](x, adapt=None)
            ptm_feats = torch.cat([ptm_feats, x[:, None, ...]], dim=1)
        ptm_feats = ptm_feats.clone().detach()
        o_feats = self.cur_module(ptm_feats)
        x = o_feats["output"]
        return x[:, None, :]

    def forward_all_feats(self, x):
        B = x.shape[0]
        ptm_feats = torch.Tensor().to(x.device)

        x = self.patch_embed(x)
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            x = self.blocks[i](x, adapt=None)
            ptm_feats = torch.cat([ptm_feats, x[:, None, ...]], dim=1)
        ptm_feats = ptm_feats.clone().detach()
        """Forward task features
        """
        task_feats = []
        with torch.no_grad():
            for i in range(len(self.module_list)):
                hidden_feats = copy.deepcopy(ptm_feats)
                tmp_feats = self.module_list[i](hidden_feats)
                # hidden_feats = tmp_feats["hidden"]
                task_feats.append(
                    tmp_feats["output"][:, None, ...]
                )

        hidden_feats = copy.deepcopy(ptm_feats)
        o_feats = self.cur_module(hidden_feats)
        task_feats.append(o_feats["output"][:, None, ...])

        task_feats = torch.cat(task_feats, dim=1)
        return task_feats

    def get_new_module(self):
        """Reset self.cur_adapter for every new task."""
        tuning_config = self.config
        mnc = MicroNC(
            tuning_config,
            self.peft_func,
            depth=12,
            # depth=int(self.depth),
            transpose=True,
        )
        self.cur_module = mnc
        self.cur_module.requires_grad_(True)


@register_model
def vit_base_patch16_224_mnc(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_mnc(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        **kwargs,
    )
