# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.vision_transformer import PatchEmbed
from timm.models.registry import register_model
import copy

from utils.toolkit import NamespaceDict
from models.components.backbone.vit import VisionTransformer as _VisionTransformer
from models.components.backbone._PEFTs import _PEFT_dict
from models.components.backbone.utils import initialize_vit_model


class MoPefts(nn.Module):
    """
    MoE Adapter for training, merged to MoEAdapter after task trainig
    Freeze MoEAdapter, train self.adapter and self.ider only
        create adapter list with old task adpaters freeze
        create router weights with old id tokens freeze
    """

    def __init__(
        self,
        config: NamespaceDict,
    ):
        super().__init__()
        self.config = config
        self.n_embd = config.embed_dim
        self._device = self.config._device
        self.router_w_n = nn.Parameter(torch.Tensor(1, self.n_embd))
        nn.init.kaiming_normal_(self.router_w_n)

        self.peft_func = _PEFT_dict[config.peft_name]
        new_adapter = self.peft_func(
            self.config,
            embed_dim=config.embed_dim,
            bottleneck=config.ffn_rank,
            dropout=0.1,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)
        self.adapter_list = nn.ModuleList([new_adapter])

    def forward(self, x, add_residual=False, residual=None):
        out = self.forward_seq(x, add_residual, residual)
        return out

    def forward_cls(self, x, add_residual=False, residual=None):
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        E = len(self.adapter_list)
        score = self._cal_sim_cls(x)
        # sim = self._prob(score)
        sim = F.softmax(score, dim=-1) if E > 1 else self._prob(score)
        # sim = F.softmax(score, dim=-1)
        features = torch.Tensor().to(x.device)
        for adapter in self.adapter_list:
            tmp_o = adapter(x, add_residual, residual)
            features = torch.cat([features, tmp_o.unsqueeze(1)], dim=1)

        o = torch.einsum("bk,bkld->bld", sim, features)
        return o

    def forward_seq(self, x, add_residual=False, residual=None):
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        E = len(self.adapter_list)
        score = self._cal_sim_seq(x)
        # sim = self._prob(score)
        sim = F.softmax(score, dim=-1) if E > 1 else self._prob(score)
        # sim = F.softmax(score, dim=-1)
        features = torch.Tensor().to(x.device)
        for adapter in self.adapter_list:
            tmp_o = adapter(x, add_residual, residual)
            features = torch.cat([features, tmp_o.unsqueeze(2)], dim=2)

        o = torch.einsum("blk,blkd->bld", sim, features)
        return o

    def _cal_sim_cls(self, x):
        """
        Args:
            x: input tensor
                [batch, seq_len, n_embd]
            self.ider: [1, n_embd]

        Returns:
            sim: similarity between cls_token and self.ider
                [batch, 1, 1]
        """
        if getattr(self, "router_w_o", None) is not None:
            score_o = torch.einsum(
                "bd,kd->bk",
                F.normalize(x[:, 0, :], dim=-1, p=2),
                F.normalize(self.router_w_o, dim=-1, p=2),
            )
        else:
            score_o = torch.Tensor().to(x.device)

        score_n = torch.einsum(
            "bd,kd->bk",
            F.normalize(x[:, 0, :], dim=-1, p=2),
            F.normalize(self.router_w_n, dim=-1, p=2),
        )

        score = torch.cat([score_o, score_n], dim=-1)
        return score

    def _cal_sim_seq(self, x):
        """
        Args:
            x: input tensor
                [batch, seq_len, n_embd]
            self.ider: [1, n_embd]

        Returns:
            sim: similarity between cls_token and self.ider
                [batch, 1, 1]
        """
        if getattr(self, "router_w_o", None) is not None:
            score_o = torch.einsum(
                "bld,kd->blk",
                F.normalize(x[:, :, :], dim=-1, p=2),
                F.normalize(self.router_w_o, dim=-1, p=2),
            )
        else:
            score_o = torch.Tensor().to(x.device)

        score_n = torch.einsum(
            "bld,kd->blk",
            F.normalize(x[:, :, :], dim=-1, p=2),
            F.normalize(self.router_w_n, dim=-1, p=2),
        )

        score = torch.cat([score_o, score_n], dim=-1)
        return score

    def update(self):
        new_adapter = self.peft_func(
            self.config,
            dropout=0.1,
            bottleneck=self.config.ffn_rank,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)

        if getattr(self, "router_w_o", None) is not None:
            router_w_o = torch.cat(
                [
                    self.router_w_o.clone().detach().requires_grad_(False),
                    self.router_w_n.clone().detach().requires_grad_(False),
                ]
            )
            del self.router_w_o
        else:
            router_w_o = self.router_w_n.clone().detach().requires_grad_(False)
        self.router_w_o = nn.Parameter(router_w_o)
        self.router_w_o = self.router_w_o.to(self.config._device)
        self.router_w_o.requires_grad_(False)

        self.router_w_n.data = nn.Parameter(torch.Tensor(1, self.n_embd)).to(
            self.config._device
        )
        nn.init.kaiming_normal_(self.router_w_n)

        adapter_list = nn.ModuleList()
        if getattr(self, "adapter_list", None) is not None:
            for old_adapter in self.adapter_list:
                adapter_list.append(
                    copy.deepcopy(old_adapter.requires_grad_(False))
                )
            del self.adapter_list
        adapter_list.append(copy.deepcopy(new_adapter).requires_grad_(True))
        self.adapter_list = adapter_list.to(self.config._device)

    @staticmethod
    def _prob(x):
        """
        Args:
            x: input tensor
                [batch, 1, 1]
        """
        # x = torch.where(x < 0, x * 0.75, x)
        pi = torch.tensor(math.pi)
        result = (pi - torch.acos(x)) / pi
        return result


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        global_pool=False,
        config: NamespaceDict = NamespaceDict(),
    ):
        super(VisionTransformer, self).__init__(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            num_classes=num_classes,
            embed_dim=embed_dim,
            depth=depth,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            representation_size=representation_size,
            distilled=distilled,
            drop_rate=drop_rate,
            attn_drop_rate=attn_drop_rate,
            drop_path_rate=drop_path_rate,
            embed_layer=embed_layer,
            norm_layer=norm_layer,
            act_layer=act_layer,
            weight_init=weight_init,
            global_pool=global_pool,
            config=config,
        )
        self.cur_adapter = nn.ModuleList(
            [MoPefts(config) for _ in range(depth)]
        )
        self.cur_adapter.requires_grad_(True)

    def forward_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)

        for idx, blk in enumerate(self.blocks):
            x = blk(x, self.cur_adapter[idx])

        x = self.norm(x)

        return x

    def freeze(self):
        for param in self.parameters():
            param.requires_grad = False

        self.cur_adapter.requires_grad_(True)

    def after_task(self):
        self.freeze()

        for adapter in self.cur_adapter:
            adapter.update()  # type: ignore


@register_model
def vit_base_patch16_224_moe(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_moe(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224_in21k",
        VisionTransformer,
        **kwargs,
    )
