import torch
import torch.nn as nn
import copy
from timm.models.vision_transformer import PatchEmbed
from timm.models.registry import register_model

from utils.toolkit import NamespaceDict
from models.components.backbone.vit import VisionTransformer as _VisionTransformer
from models.components.backbone._PEFTs import _PEFT_dict
from models.components.backbone.utils import initialize_vit_model


class VisionTransformer(_VisionTransformer):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        global_pool=False,
        config: NamespaceDict = NamespaceDict(),
    ):
        super(VisionTransformer, self).__init__(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            num_classes=num_classes,
            embed_dim=embed_dim,
            depth=depth,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            representation_size=representation_size,
            distilled=distilled,
            drop_rate=drop_rate,
            attn_drop_rate=attn_drop_rate,
            drop_path_rate=drop_path_rate,
            embed_layer=embed_layer,
            norm_layer=norm_layer,
            act_layer=act_layer,
            weight_init=weight_init,
            global_pool=global_pool,
            config=config,
        )

        # ####### Adapter begins #########
        try:
            self.peft_func = _PEFT_dict[config.peft_name]
        except KeyError or AttributeError:
            print("Invalid peft_name")

        """Don't use List which results in error in torch
        """
        self.module_list = nn.ModuleList()
        self.cur_module = nn.ModuleList()
        self.get_new_module()

    def forward(self, x, mode=""):
        # Dynamically get the method based on mode
        mode = mode.lower()
        method_name = f"forward_{mode}_token"  # Create the method name dynamically
        if hasattr(self, method_name):
            method = getattr(self, method_name)  # Get the method reference
            return method(x)  # Call the method
        else:
            raise ValueError(f"Invalid mode: {mode}. Method {method_name} not found.")

    def forward_cur_token(self, x):
        x = self.forward_cur_feats(x)
        outcome = x[:, :, 0]
        return outcome

    def forward_all_token(self, x):
        x = self.forward_all_feats(x)
        outcome = x[:, :, 0]
        return outcome

    def forward_cur_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)
        for idx, blk in enumerate(self.blocks):
            x = blk(x, adapt=self.cur_module[idx])
        x = self.norm(x)
        return x[:, None, :]

    def forward_all_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x_init = self.pos_drop(x)

        features = []
        with torch.no_grad():
            for i in range(len(self.module_list)):
                x = copy.deepcopy(x_init)
                for j in range(len(self.blocks)):
                    adapt = self.module_list[i][j]  # type: ignore
                    x = self.blocks[j](x, adapt)
                x = self.norm(x)
                features.append(x)

        x = copy.deepcopy(x_init)
        for i in range(len(self.blocks)):
            adapt = self.cur_module[i]
            x = self.blocks[i](x, adapt)
        x = self.norm(x)
        features.append(x)
        # [B, T, L, D]
        return torch.stack(features, dim=1)

    def after_task(self):
        self.freeze()
        self.add_module_to_list()
        self.cur_module.requires_grad_(True)

    def add_module_to_list(self):
        self.module_list.append(copy.deepcopy(self.cur_module).requires_grad_(False))

    def get_new_module(self):
        config = self.config
        self.cur_module = nn.ModuleList()
        for _ in range(len(self.blocks)):
            adapter = self.peft_func(
                config=self.config,
                embed_dim=config.embed_dim,
                bottleneck=config.ffn_rank,
                dropout=0.1,
                adapter_scalar=config.ffn_adapter_scalar,
                adapter_layernorm_option=config.ffn_adapter_layernorm_option,
            ).to(self._device)
            self.cur_module.append(adapter)
        self.cur_module.requires_grad_(True)

    def freeze(self):
        for param in self.parameters():
            param.requires_grad = False
        self.cur_module.requires_grad_(True)


@register_model
def vit_base_patch16_224_multiNet(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_multiNet(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224_in21k",
        VisionTransformer,
        **kwargs,
    )
