# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import torch
import torch.nn as nn
from timm.layers import DropPath

# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
from functools import partial
from collections import OrderedDict
from timm.models.vision_transformer import PatchEmbed
from timm.models import register_model
import argparse
from typing import Optional
import torch.nn.functional as F
import logging
import math
from .utils import initialize_vit_model
from utils.toolkit import NamespaceDict

_logger = logging.getLogger(__name__)


class Attention(nn.Module):
    def __init__(
        self,
        dim,
        num_heads=8,
        qkv_bias=False,
        attn_drop=0.0,
        proj_drop=0.0,
    ):
        super().__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.head_dim = dim // num_heads
        self.scale = head_dim**-0.5

        self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
        self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
        self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)

        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

        self.__init_weights()

    def __init_weights(self):
        nn.init.xavier_uniform_(self.q_proj.weight)
        nn.init.xavier_uniform_(self.k_proj.weight)
        nn.init.xavier_uniform_(self.v_proj.weight)
        nn.init.xavier_uniform_(self.proj.weight)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return (
            tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
            .transpose(1, 2)
            .contiguous()
        )

    def forward(self, x):
        B, N, C = x.shape

        q = self.q_proj(x)
        k = self._shape(self.k_proj(x), -1, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        v = self._shape(self.v_proj(x), -1, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        q = self._shape(q, N, B).view(B * self.num_heads, -1, self.head_dim)

        # attn = (q @ k.transpose(-2, -1)) * self.scale
        attn_weights = torch.bmm(q, k.transpose(1, 2)) * self.scale

        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        attn_probs = self.attn_drop(attn_weights)
        attn_output = torch.bmm(attn_probs, v)

        attn_output = attn_output.view(B, self.num_heads, N, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(B, N, C)

        x = self.proj(attn_output)
        x = self.proj_drop(x)

        return x


class Block(nn.Module):
    layer_id: int

    def __init__(
        self,
        dim,
        num_heads,
        mlp_ratio=4.0,
        qkv_bias=False,
        drop=0.0,
        attn_drop=0.0,
        drop_path=0.0,
        act_layer=nn.GELU,
        norm_layer=nn.LayerNorm,
        config: argparse.Namespace = argparse.Namespace(),
        layer_id=0,
    ):
        super().__init__()
        self.config = config
        self.layer_id = layer_id

        self.norm1 = norm_layer(dim)
        self.attn = Attention(
            dim,
            num_heads=num_heads,
            qkv_bias=qkv_bias,
            attn_drop=attn_drop,
            proj_drop=drop,
        )
        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
        self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()

        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.fc1 = nn.Linear(dim, mlp_hidden_dim)
        self.fc2 = nn.Linear(mlp_hidden_dim, dim)
        self.act = act_layer()
        self.mlp_drop = nn.Dropout(drop)

        self.ffn_option = getattr(self.config, "ffn_option", None)
        # assert (
        #     self.ffn_option == "sequential"
        #     or self.ffn_option == "parallel"
        #     or getattr(self.config, "ffn_option", None) is None
        # ), (
        #     "FFN option must be either 'sequential' or 'parallel',"
        #     + " rather than {}".format(self.ffn_option)
        # )

        if not (
            self.ffn_option == "sequential"
            or self.ffn_option == "parallel"
            or getattr(self.config, "ffn_option", None) is None
        ):
            import sys

            # 如果是类似在Web环境中可以使用JavaScript的 alert语法，这里只是示例在Python控制台打印错误信息
            print(
                "FFN option must be either 'sequential' or 'parallel', rather than {}".format(
                    self.ffn_option
                ),
                file=sys.stderr,
            )

    def forward(self, x, adapt: Optional[nn.Module] = None):
        x = x + self.drop_path(self.attn(self.norm1(x)))
        residual = x

        adapt_x = adapt(x, add_residual=False) if adapt is not None else None

        x = self.mlp_drop(self.act(self.fc1(self.norm2(x))))
        x = self.drop_path(self.mlp_drop(self.fc2(x)))

        if self.ffn_option == "sequential" and adapt:
            x = adapt(x)
        elif self.ffn_option == "parallel" and adapt:
            x = x + adapt_x

        x = residual + x

        return x


class VisionTransformer(nn.Module):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        global_pool=False,
        config: NamespaceDict = NamespaceDict(),
    ):
        super().__init__()
        # custom
        self.out_dim: int = embed_dim
        self.depth: int = depth
        self.img_size: int = img_size if not config.cam_visual else 1024
        print("I'm using ViT with adapters.")
        self.config = config
        self._device = config._device
        self.global_pool = global_pool
        assert global_pool is False, "Global pooling is not supported in this model."

        self.num_classes = num_classes
        self.num_features = self.embed_dim = (
            embed_dim  # num_features for consistency with other models
        )
        self.num_tokens = 2 if distilled else 1
        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
        act_layer = act_layer or nn.GELU

        self.patch_embed = embed_layer(
            img_size=self.img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            embed_dim=embed_dim,
        )
        num_patches = self.patch_embed.num_patches

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.dist_token = (
            nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
        )
        self.pos_embed = nn.Parameter(
            torch.zeros(1, num_patches + self.num_tokens, embed_dim)
        )
        self.pos_drop = nn.Dropout(p=drop_rate)

        dpr = [
            x.item() for x in torch.linspace(0, drop_path_rate, depth)
        ]  # stochastic depth decay rule
        self.blocks = nn.Sequential(
            *[
                Block(
                    dim=embed_dim,
                    num_heads=num_heads,
                    mlp_ratio=mlp_ratio,
                    qkv_bias=qkv_bias,
                    drop=drop_rate,
                    attn_drop=attn_drop_rate,
                    drop_path=dpr[i],
                    norm_layer=norm_layer,  # type: ignore
                    act_layer=act_layer,
                    config=config,  # type: ignore
                    layer_id=i,
                )
                for i in range(depth)
            ]
        )
        self.norm = norm_layer(embed_dim)
        # ######## MAE begins ############
        # Representation layer
        if representation_size and not distilled:
            self.num_features = representation_size
            self.pre_logits = nn.Sequential(
                OrderedDict(
                    [
                        ("fc", nn.Linear(embed_dim, representation_size)),
                        ("act", nn.Tanh()),
                    ]
                )
            )
        else:
            self.pre_logits = nn.Identity()

        # Classifier head(s)
        self.head = (
            nn.Linear(self.num_features, num_classes)
            if num_classes > 0
            else nn.Identity()
        )
        self.head_dist = None
        if distilled:
            self.head_dist = (
                nn.Linear(self.embed_dim, self.num_classes)
                if num_classes > 0
                else nn.Identity()
            )

        del self.head

    def init_weights(self, mode=""):
        raise NotImplementedError()

    @torch.jit.ignore  # type: ignore
    def no_weight_decay(self):
        return {"pos_embed", "cls_token", "dist_token"}

    def get_classifier(self):
        if self.dist_token is None:
            return self.head
        else:
            return self.head, self.head_dist

    def reset_classifier(self, num_classes, global_pool=""):
        self.num_classes = num_classes
        self.head = (
            nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
        )
        if self.num_tokens == 2:
            self.head_dist = (
                nn.Linear(self.embed_dim, self.num_classes)
                if num_classes > 0
                else nn.Identity()
            )

    def forward(self, x):
        return self.forward_token(x)

    def forward_token(self, x):
        x = self.forward_feats(x)
        outcome = x[:, 0]

        return outcome

    def forward_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)

        for _, blk in enumerate(self.blocks):
            x = blk(x, adapt=None)

        x = self.norm(x)

        return x

    def freeze(self):
        for param in self.parameters():
            param.requires_grad = False

    def before_task(self):
        """
        Prepare the model structure for a new task.
        Configure parameter trainability.
        Initialize task-specific components.
        Dynamically adjust model architecture if necessary.
        """
        pass

    def before_train(self):
        """
        Prepare the model for training mode.
        Initialize pre-training state.
        Set specific training configuration parameters.
        Ensure trainable parameters are correctly configured.
        """
        pass

    def before_medium(self):
        pass

    def after_train(self):
        """
        Process model state after training.
        Compute or update representation vectors.
        Clean up temporary resources from training.
        Prepare for evaluation phase.
        """
        pass

    def after_medium(self):
        pass

    def after_task(self):
        """
        Save or integrate the current model state into overall knowledge.
        Freeze parameters to prevent catastrophic forgetting.
        Prepare model structure for the next task.
        Manage model resources.
        """
        self.freeze()


class SLCA(VisionTransformer):
    def before_task(self):
        for param in self.parameters():
            param.requires_grad = True

    def after_task(self):
        pass


@register_model
def vit_base_patch16_224_slca(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        ft=True,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_vanila(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224",
        VisionTransformer,
        ft=False,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_slca(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        ft=True,
        **kwargs,
    )


@register_model
def vit_base_patch16_224_in21k_vanila(pretrained=False, pretrained_cfg={}, **kwargs):
    del pretrained
    del pretrained_cfg
    return initialize_vit_model(
        "vit_base_patch16_224.augreg_in21k",
        VisionTransformer,
        ft=False,
        **kwargs,
    )


def interpolate_positional_embeddings(old_pos_embed, patch_size=16, image_size=1024):
    # Get the original patch grid size
    cls_token = old_pos_embed[:, 0, :].unsqueeze(1)
    old_num_patches = old_pos_embed.shape[1]  # exclude the [CLS] token

    # Compute the new number of patches for the higher-resolution image
    new_num_patches = (image_size // patch_size) ** 2

    # Reshape the positional embeddings to match the old patch grid size
    old_pos_embed = old_pos_embed[:, 1:, :]  # exclude the [CLS] token
    # [1, num_patches, embed_dim] -> [1, 14, 14, embed_dim]
    old_pos_embed = old_pos_embed.reshape(
        1, int(old_num_patches**0.5), int(old_num_patches**0.5), -1
    )
    old_pos_embed = old_pos_embed.permute(0, 3, 1, 2)
    # Use bilinear interpolation to resize the positional embeddings
    # [1, 14, 14, embed_dim] -> [1, 64, 64, embed_dim]
    new_pos_embed = F.interpolate(
        old_pos_embed,
        size=(int(image_size / patch_size), int(image_size / patch_size)),
        mode="bilinear",
    )
    new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
    # Reshape back to the appropriate size
    new_pos_embed = new_pos_embed.reshape(1, new_num_patches, -1)

    # Reinsert the [CLS] token if the model uses one
    new_pos_embed = torch.cat([cls_token, new_pos_embed], dim=1)

    return new_pos_embed


# TODO update pos_emb resize
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
    # Rescale the grid of position embeddings when loading from state_dict. Adapted from
    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
    _logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
    ntok_new = posemb_new.shape[1]
    if num_tokens:
        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
        ntok_new -= num_tokens
    else:
        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
    gs_old = int(math.sqrt(len(posemb_grid)))
    if not len(gs_new):  # backwards compatibility
        gs_new = [int(math.sqrt(ntok_new))] * 2
    assert len(gs_new) >= 2
    _logger.info("Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new)
    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
    posemb_grid = F.interpolate(
        posemb_grid, size=gs_new, mode="bicubic", align_corners=False
    )
    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
    return posemb
