# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import argparse
import torch
import torch.nn as nn
from timm.models.vision_transformer import PatchEmbed
from timm.models import register_model
import copy
from .utils import initialize_vit_model
from .vit import VisionTransformer as _VisionTransformer
from utils.toolkit import NamespaceDict
from ._PEFTs import Adapter as Lora


class MultiLora(nn.Module):
    """
    MultiLora for training, merged to MultiLora after task trainig
    Freeze MoEAdapter, train self.adapter and self.ider only
        create adapter list with old task adpaters freeze
        create router weights with old id tokens freeze
    """

    def __init__(
        self,
        tuning_config: argparse.Namespace,
    ):
        super().__init__()
        self.config = tuning_config
        self.n_embd = tuning_config.embed_dim
        self._device = self.config._device

        new_adapter = Lora(
            self.config,
            dropout=0.1,
            bottleneck=self.config.ffn_rank,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)
        self.adapter_list = nn.ModuleList([new_adapter])

    def forward(self, x, add_residual=False, residual=None):
        out = self.forward_mean(x, add_residual, residual)
        return out

    def forward_mean(self, x, add_residual=False, residual=None):
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        E = len(self.adapter_list)
        sim = torch.ones(x.shape[0], E).to(x.device) / E
        features = torch.Tensor().to(x.device)
        for adapter in self.adapter_list:
            tmp_o = adapter(x, add_residual, residual)
            features = torch.cat([features, tmp_o.unsqueeze(1)], dim=1)

        o = torch.einsum("bk,bkld->bld", sim, features)
        return o

    def update(self):
        new_adapter = Lora(
            self.config,
            dropout=0.1,
            bottleneck=self.config.ffn_rank,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)

        adapter_list = nn.ModuleList()
        if getattr(self, "adapter_list", None) is not None:
            for old_adapter in self.adapter_list:
                adapter_list.append(
                    copy.deepcopy(old_adapter.requires_grad_(False))
                )
            del self.adapter_list
        adapter_list.append(copy.deepcopy(new_adapter).requires_grad_(True))
        self.adapter_list = adapter_list.to(self.config._device)


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        global_pool=False,
        tuning_config: NamespaceDict = NamespaceDict(),
    ):
        super(VisionTransformer, self).__init__(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            num_classes=num_classes,
            embed_dim=embed_dim,
            depth=depth,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            representation_size=representation_size,
            distilled=distilled,
            drop_rate=drop_rate,
            attn_drop_rate=attn_drop_rate,
            drop_path_rate=drop_path_rate,
            embed_layer=embed_layer,
            norm_layer=norm_layer,
            act_layer=act_layer,
            weight_init=weight_init,
            global_pool=global_pool,
            tuning_config=tuning_config,
        )
        self.cur_adapter = nn.ModuleList(
            [MultiLora(tuning_config) for _ in range(depth)]
        )
        self.cur_adapter.requires_grad_(True)

    def forward_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)

        for idx, blk in enumerate(self.blocks):
            x = blk(x, self.cur_adapter[idx])

        x = self.norm(x)

        return x

    def freeze(self):
        for param in self.parameters():
            param.requires_grad = False

        self.cur_adapter.requires_grad_(True)

    def after_task(self):
        self.freeze()

        for adapter in self.cur_adapter:
            adapter.update()  # type: ignore


@register_model
def vit_base_patch16_224_baseMulti(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_baseMulti(
    pretrained=False, pretrained_cfg={}, **kwargs
):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        **kwargs,
    )
