""" Vision Transformer (ViT) in PyTorch

A PyTorch implement of Vision Transformers as described in:

'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
    - https://arxiv.org/abs/2010.11929

`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
    - https://arxiv.org/abs/2106.10270

The official jax code is released and available at https://github.com/google-research/vision_transformer

Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert

Hacked together by / Copyright 2020, Ross Wightman
"""

import math
import logging
from functools import partial
import copy
from collections import OrderedDict
from re import S
from typing import Optional

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint

from src.models.subm.mol import MoLoRaMLP, MoLoRaAttention, LoRaMLP
from src.models.routing.soft_router import (
    ClsRouterWeights,
    RouterWeights,
    SoftRouter,
)

from timm.data import (
    # IMAGENET_DEFAULT_MEAN,
    # IMAGENET_DEFAULT_STD,
    IMAGENET_INCEPTION_MEAN,
    IMAGENET_INCEPTION_STD,
)
from timm.models.helpers import (
    build_model_with_cfg,
    named_apply,
    adapt_input_conv,
    resolve_pretrained_cfg,
    checkpoint_seq,
)
from timm.models.layers import (
    DropPath,
    trunc_normal_,
    lecun_normal_,
    _assert,
)
from timm.models.layers.helpers import to_2tuple
from timm.models.registry import register_model


_logger = logging.getLogger(__name__)


def _cfg(url="", **kwargs):
    return {
        "url": url,
        "num_classes": 1000,
        "input_size": (3, 224, 224),
        "pool_size": None,
        "crop_pct": 0.9,
        "interpolation": "bicubic",
        "fixed_input_size": True,
        "mean": IMAGENET_INCEPTION_MEAN,
        "std": IMAGENET_INCEPTION_STD,
        "first_conv": "patch_embed.proj",
        "classifier": "head",
        **kwargs,
    }


class FeatSetCls(nn.Module):
    """
    Take feature set as a whole and classify
    feature set: [batch, depth, num_patches, hidden_dim]
    output: [batch, num_classes]

    Use convnet to aggrogate features from different block ex. [batch, 12, 197, 768] -> [batch, 512]
    """

    def __init__(self, num_classes):
        super().__init__()
        self.convnet = nn.Sequential(
            nn.Conv2d(12, 64, 7, stride=(1, 3), bias=False),
            nn.BatchNorm2d(64),
            nn.GELU(),
            nn.Conv2d(64, 128, 7, stride=(1, 2), bias=False),
            nn.BatchNorm2d(128),
            nn.GELU(),
            nn.Conv2d(128, 256, 7, stride=(3, 2), bias=False),
            nn.BatchNorm2d(256),
            nn.GELU(),
            nn.Conv2d(256, 512, 7, stride=(3, 3), bias=False),
            nn.BatchNorm2d(512),
            nn.GELU(),
            nn.AdaptiveAvgPool2d((1, 1)),
        )
        self.shortcut = nn.Sequential()
        self.fc = nn.Linear(512, num_classes)

    def forward(self, x):
        x = self.convnet(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x


class Mlp(nn.Module):
    """MLP as used in Vision Transformer, MLP-Mixer and related networks"""

    def __init__(
        self,
        in_features,
        hidden_features=None,
        out_features=None,
        act_layer=nn.GELU,
        bias=True,
        drop=0.0,
        tuning_mode="molh",
        num_experts=20,
        rank=8,
        alpha=16,
        top_k=None,
        jitter_noise=1e-1,
    ):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        bias = to_2tuple(bias)
        drop_probs = to_2tuple(drop)

        self.fc1 = nn.Linear(
            in_features, hidden_features, bias=bias[0]
        )
        self.act = act_layer()
        self.drop1 = nn.Dropout(drop_probs[0])  # type: ignore
        self.fc2 = nn.Linear(
            hidden_features, out_features, bias=bias[1]
        )
        self.drop2 = nn.Dropout(drop_probs[1])  # type: ignore

        self.tuning_mode = tuning_mode
        self.num_experts = num_experts
        if self.tuning_mode == "molh":
            if num_experts > 1:
                self.mol = MoLoRaMLP(
                    router=SoftRouter(
                        RouterWeights(
                            input_dim=in_features,
                            num_experts=num_experts,
                        ),
                        input_dim=in_features,
                        num_experts=num_experts,
                        top_k=top_k,
                        jitter_noise=jitter_noise,
                    ),
                    hidden_dim=in_features,
                    output_dim=hidden_features,
                    num_experts=num_experts,
                    rank=rank,
                    alpha=alpha,
                    dtype=torch.float32,
                )
            else:
                self.mol = LoRaMLP(
                    hidden_dim=in_features,
                    output_dim=hidden_features,
                    rank=rank,
                )

        elif self.tuning_mode == "mol_cls":
            self.mol = MoLoRaMLP(
                router=SoftRouter(
                    ClsRouterWeights(
                        input_dim=in_features, num_experts=num_experts
                    ),
                    input_dim=in_features,
                    num_experts=num_experts,
                    top_k=top_k,
                    jitter_noise=jitter_noise,
                ),
                hidden_dim=in_features,
                output_dim=hidden_features,
                num_experts=num_experts,
                rank=rank,
                alpha=alpha,
                dtype=torch.float32,
            )
        elif self.tuning_mode == "mol_base":
            self.mol_linear_a = nn.Linear(
                in_features, rank, bias=False
            )
            self.mol_linear_b = nn.Linear(
                rank, hidden_features, bias=False
            )
            nn.init.kaiming_uniform_(self.mol_linear_a.weight)
            nn.init.zeros_(self.mol_linear_b.weight)

    def forward(self, x):
        h = self.fc1(x)
        if self.tuning_mode == "mol_base":
            delta_h_up = self.mol_linear_b(self.mol_linear_a(x))
            h = h + delta_h_up
        if (
            self.tuning_mode == "molh"
            or self.tuning_mode == "mol_cls"
        ):
            delta_h_up = self.mol(x)["o"]
            h = h + delta_h_up

        h = self.act(h)
        h = self.drop1(h)

        h_o = self.fc2(h)
        o = self.drop2(h_o)

        return o


class Attention(nn.Module):
    def __init__(
        self,
        dim,
        num_heads=8,
        qkv_bias=False,
        attn_drop=0.0,
        proj_drop=0.0,
        tuning_mode="molh",
        num_experts=20,
        rank=8,
        alpha=16,
    ):
        super().__init__()
        assert (
            dim % num_heads == 0
        ), "dim should be divisible by num_heads"

        self.dim = dim
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = self.head_dim**-0.5

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

        self.tuning_mode = tuning_mode
        if tuning_mode == "molkkkkkk":
            router_weights_q = RouterWeights(
                input_dim=self.dim, num_experts=num_experts
            )
            router_q = SoftRouter(
                router_weights_q,
                input_dim=self.dim,
                num_experts=num_experts,
            )
            self.mol_q = MoLoRaAttention(
                router_q,
                hidden_dim=dim,
                output_dim=dim,
                num_heads=self.num_heads,
                num_experts=num_experts,
                rank=rank,
                alpha=alpha,
            )

            router_weights_v = RouterWeights(
                input_dim=self.dim, num_experts=num_experts
            )
            router_v = SoftRouter(
                router_weights_v,
                input_dim=self.dim,
                num_experts=num_experts,
            )
            self.mol_v = MoLoRaAttention(
                router_v,
                hidden_dim=dim,
                output_dim=dim,
                num_heads=self.num_heads,
                num_experts=num_experts,
                rank=rank,
                alpha=alpha,
            )

    def forward(self, x):
        B, N, C = x.shape

        if self.tuning_mode == "molkkkkkk":
            qkv = self.qkv(x)  # B,N,3*org_C
            new_q = self.mol_q(x)
            new_v = self.mol_v(x)

            qkv = qkv.reshape(
                B, N, 3, self.num_heads, C // self.num_heads
            )
            q, k, v = qkv.unbind(
                2
            )  # make torchscript happy (cannot use tensor as tuple)
            q, k, v = (
                (q + new_q).transpose(1, 2),
                k.transpose(1, 2),
                (v + new_v).transpose(1, 2),
            )
        else:
            qkv = (
                self.qkv(x)
                .reshape(B, N, 3, self.num_heads, C // self.num_heads)
                .permute(2, 0, 3, 1, 4)
            )
            q, k, v = qkv.unbind(
                0
            )  # make torchscript happy (cannot use tensor as tuple)

        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x


class LayerScale(nn.Module):
    def __init__(self, dim, init_values=1e-5, inplace=False):
        super().__init__()
        self.inplace = inplace
        self.gamma = nn.Parameter(init_values * torch.ones(dim))

    def forward(self, x):
        return x.mul_(self.gamma) if self.inplace else x * self.gamma


class Adapter(nn.Module):
    def __init__(self, ebed_dim, reduction_factor):
        super().__init__()

        self.adapter_downsample = nn.Linear(
            ebed_dim, ebed_dim // reduction_factor
        )
        self.adapter_upsample = nn.Linear(
            ebed_dim // reduction_factor, ebed_dim
        )
        self.adapter_act_fn = nn.GELU()

        nn.init.zeros_(self.adapter_downsample.weight)
        nn.init.zeros_(self.adapter_downsample.bias)

        nn.init.zeros_(self.adapter_upsample.weight)
        nn.init.zeros_(self.adapter_upsample.bias)

    def forward(self, x):

        adpt = self.adapter_downsample(x)
        adpt = self.adapter_act_fn(adpt)
        adpt = self.adapter_upsample(adpt)
        x = adpt + x

        return x


class AdBlock(nn.Module):

    def __init__(
        self,
        dim,
        num_heads,
        mlp_ratio=4.0,
        qkv_bias=False,
        drop=0.0,
        attn_drop=0.0,
        init_values=None,
        drop_path=0.0,
        act_layer=nn.GELU,
        norm_layer=nn.LayerNorm,
        tuning_mode="molh",
        num_experts=5,
        rank=8,
        alpha=8,
        top_k=None,
        jitter_noise=1e-1,
    ):
        super().__init__()
        self.dim = dim
        self.norm1 = norm_layer(dim)

        self.attn = Attention(
            dim,
            num_heads=num_heads,
            qkv_bias=qkv_bias,
            attn_drop=attn_drop,
            proj_drop=drop,
        )

        if "molh" in tuning_mode:
            self.mol_adapter_module = Adapter(dim, dim // rank)

        self.tuning_mode = tuning_mode

        self.ls1 = (
            LayerScale(dim, init_values=init_values)
            if init_values
            else nn.Identity()
        )
        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
        self.drop_path1 = (
            DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        )

        self.norm2 = norm_layer(dim)
        self.mlp = Mlp(
            in_features=dim,
            hidden_features=int(dim * mlp_ratio),
            act_layer=act_layer,
            drop=drop,
            tuning_mode=tuning_mode,
            num_experts=num_experts,
            rank=rank,
            alpha=alpha,
            top_k=top_k,
            jitter_noise=jitter_noise,
        )
        self.ls2 = (
            LayerScale(dim, init_values=init_values)
            if init_values
            else nn.Identity()
        )
        self.drop_path2 = (
            DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        )

    def forward(self, x):

        if "molh" in self.tuning_mode:
            x = x + self.drop_path1(
                self.ls1(self.attn(self.norm1(x)))
            )

            h = x
            x = self.norm2(x)
            x = self.ls2(self.mlp(x))

            # start to insert adapter layers...
            x = self.mol_adapter_module(x)
            # ...end

            x = self.drop_path2(x)
            x = x + h

        else:
            x = x + self.drop_path1(
                self.ls1(self.attn(self.norm1(x)))
            )
            x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))

        return x


class Block(nn.Module):

    def __init__(
        self,
        dim,
        num_heads,
        mlp_ratio=4.0,
        qkv_bias=False,
        drop=0.0,
        attn_drop=0.0,
        init_values=None,
        drop_path=0.0,
        act_layer=nn.GELU,
        norm_layer=nn.LayerNorm,
        tuning_mode="molh",
        num_experts=20,
        rank=8,
        alpha=16,
        top_k=None,
        jitter_noise=1e-3,
    ):
        super().__init__()
        self.dim = dim
        self.norm1 = norm_layer(dim)

        self.attn = Attention(
            dim,
            num_heads=num_heads,
            qkv_bias=qkv_bias,
            attn_drop=attn_drop,
            proj_drop=drop,
            tuning_mode=tuning_mode,
            num_experts=num_experts,
            rank=rank,
            alpha=alpha,
        )

        self.tuning_mode = tuning_mode

        self.ls1 = (
            LayerScale(dim, init_values=init_values)
            if init_values
            else nn.Identity()
        )
        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
        self.drop_path1 = (
            DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        )

        self.norm2 = norm_layer(dim)
        self.mlp = Mlp(
            in_features=dim,
            hidden_features=int(dim * mlp_ratio),
            act_layer=act_layer,
            drop=drop,
            tuning_mode=tuning_mode,
            num_experts=num_experts,
            rank=rank,
            alpha=alpha,
            top_k=top_k,
            jitter_noise=jitter_noise,
        )
        self.ls2 = (
            LayerScale(dim, init_values=init_values)
            if init_values
            else nn.Identity()
        )
        self.drop_path2 = (
            DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        )

    def forward(self, x):

        x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
        x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))

        return x


class PatchEmbed(nn.Module):
    """2D Image to Patch Embedding"""

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        embed_dim=768,
        norm_layer=None,
        flatten=True,
    ):
        super().__init__()
        img_size = to_2tuple(img_size)
        patch_size = to_2tuple(patch_size)
        self.img_size = img_size
        self.patch_size = patch_size
        self.grid_size = (
            img_size[0] // patch_size[0],  # type: ignore
            img_size[1] // patch_size[1],  # type: ignore
        )
        self.num_patches = self.grid_size[0] * self.grid_size[1]
        self.flatten = flatten
        self.norm_layer = norm_layer

        self.proj = nn.Conv2d(
            in_chans,
            embed_dim,
            kernel_size=patch_size,
            stride=patch_size,
        )
        self.norm = (
            norm_layer(embed_dim) if norm_layer else nn.Identity()
        )

    def forward(self, x):
        B, C, H, W = x.shape
        _assert(
            H == self.img_size[0],  # type: ignore
            f"Input image height ({H}) doesn't match model ({self.img_size[0]}).",  # type: ignore
        )
        _assert(
            W == self.img_size[1],  # type: ignore
            f"Input image width ({W}) doesn't match model ({self.img_size[1]}).",  # type: ignore
        )

        x = self.proj(x)
        if self.flatten:
            x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC
        x = self.norm(x)
        return x


class VisionTransformer_molh(nn.Module):
    """Vision Transformer

    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
        - https://arxiv.org/abs/2010.11929
    """

    def __init__(
        self,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        global_pool="token",
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        init_values=None,
        class_token=True,
        fc_norm=None,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        weight_init="",
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        block_fn=Block,
        # cus args
        insertlength=12,
        tuning_mode="molh",
        rank=8,
        alpha=16,
        num_experts=20,
        top_k=None,
        jitter_noise=1e-1,
    ):
        """
        Args:
            img_size (int, tuple): input image size
            patch_size (int, tuple): patch size
            in_chans (int): number of input channels
            num_classes (int): number of classes for classification head
            global_pool (str): type of global pooling for final sequence (default: 'token')
            embed_dim (int): embedding dimension
            depth (int): depth of transformer
            num_heads (int): number of attention heads
            mlp_ratio (int): ratio of mlp hidden dim to embedding dim
            qkv_bias (bool): enable bias for qkv if True
            init_values: (float): layer-scale init values
            class_token (bool): use class token
            fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None)
            drop_rate (float): dropout rate
            attn_drop_rate (float): attention dropout rate
            drop_path_rate (float): stochastic depth rate
            weight_init (str): weight init scheme
            embed_layer (nn.Module): patch embedding layer
            norm_layer: (nn.Module): normalization layer
            act_layer: (nn.Module): MLP activation layer
        """
        super().__init__()

        assert global_pool in ("", "avg", "token")
        assert class_token or global_pool != "token"
        use_fc_norm = (
            global_pool == "avg" if fc_norm is None else fc_norm
        )
        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
        act_layer = act_layer or nn.GELU

        self.num_classes = num_classes
        self.global_pool = global_pool
        self.num_features = self.embed_dim = (
            embed_dim  # num_features for consistency with other models
        )
        self.num_tokens = 1 if class_token else 0
        self.grad_checkpointing = False

        self.patch_embed = embed_layer(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            embed_dim=embed_dim,
        )
        num_patches = self.patch_embed.num_patches

        self.cls_token = (
            nn.Parameter(torch.zeros(1, 1, embed_dim))
            if self.num_tokens > 0
            else None
        )
        self.pos_embed = nn.Parameter(
            torch.randn(1, num_patches + self.num_tokens, embed_dim)
            * 0.02
        )
        self.pos_drop = nn.Dropout(p=drop_rate)

        dpr = [
            x.item() for x in torch.linspace(0, drop_path_rate, depth)
        ]  # stochastic depth decay rule

        tuning_mode_list = (depth - insertlength) * [
            None
        ] + insertlength * [tuning_mode]
        nb_experts_list = (depth - insertlength) * [
            None
        ] + insertlength * [num_experts]

        # tuning_mode_list = (
        #     [tuning_mode, "mol_base", tuning_mode]
        #     + [tuning_mode, "mol_base", tuning_mode]
        #     + [tuning_mode, "mol_base", tuning_mode]
        #     + [tuning_mode, "mol_base", tuning_mode]
        # )
        tuning_mode_list = (
            [None, None, None]
            + [None, None, None]
            + [None, None, None]
            + [None, None, None]
        )
        nb_experts_list = (
            [6, 1, 4] + [4, 1, 2] + [2, 1, 4] + [4, 1, 6]
        )
        self.mol_bt = FeatSetCls(self.num_classes)

        # if tuning_mode == "molh" or tuning_mode == "mol_base":
        if "molh" in tuning_mode:
            self.blocks = nn.ModuleList(
                [
                    block_fn(
                        dim=embed_dim,
                        num_heads=num_heads,
                        mlp_ratio=mlp_ratio,
                        qkv_bias=qkv_bias,
                        init_values=init_values,
                        drop=drop_rate,
                        attn_drop=attn_drop_rate,
                        drop_path=dpr[i],
                        norm_layer=norm_layer,  # type: ignore
                        act_layer=act_layer,
                        tuning_mode=tuning_mode_list[i],  # type: ignore
                        num_experts=nb_experts_list[i],  # type: ignore
                        rank=rank,
                        alpha=alpha,
                        top_k=top_k,
                        jitter_noise=jitter_noise,
                    )
                    for i in range(depth)
                ]
            )
            # self.blocks = nn.Sequential(
            #     *[
            #         block_fn(
            #             dim=embed_dim,
            #             num_heads=num_heads,
            #             mlp_ratio=mlp_ratio,
            #             qkv_bias=qkv_bias,
            #             init_values=init_values,
            #             drop=drop_rate,
            #             attn_drop=attn_drop_rate,
            #             drop_path=dpr[i],
            #             norm_layer=norm_layer,  # type: ignore
            #             act_layer=act_layer,
            #             tuning_mode=tuning_mode_list[i],  # type: ignore
            #             num_experts=nb_experts_list[i],  # type: ignore
            #             rank=rank,
            #             alpha=alpha,
            #             top_k=top_k,
            #             jitter_noise=jitter_noise,
            #         )
            #         for i in range(depth)
            #     ]
            # )

        self.norm = (
            norm_layer(embed_dim)
            if not use_fc_norm
            else nn.Identity()
        )

        # Classifier Head
        self.fc_norm = (
            norm_layer(embed_dim) if use_fc_norm else nn.Identity()
        )
        self.head = (
            nn.Linear(self.embed_dim, num_classes)
            if num_classes > 0
            else nn.Identity()
        )

        if weight_init != "skip":
            self.init_weights(weight_init)

    def init_weights(self, mode=""):
        assert mode in ("jax", "jax_nlhb", "moco", "")
        head_bias = (
            -math.log(self.num_classes) if "nlhb" in mode else 0.0
        )
        trunc_normal_(self.pos_embed, std=0.02)
        if self.cls_token is not None:
            nn.init.normal_(self.cls_token, std=1e-6)
        named_apply(get_init_weights_vit(mode, head_bias), self)

    def _init_weights(self, m):
        # this fn left here for compat with downstream users
        init_weights_vit_timm(m)

    @torch.jit.ignore()  # type: ignore
    def load_pretrained(self, checkpoint_path, prefix=""):
        # print('do load pretrained')
        # print('checkpoint_path', checkpoint_path)
        _load_weights(self, checkpoint_path, prefix)

    @torch.jit.ignore  # type: ignore
    def no_weight_decay(self):
        return {"pos_embed", "cls_token", "dist_token"}

    @torch.jit.ignore  # type: ignore
    def group_matcher(self, coarse=False):
        return dict(
            stem=r"^cls_token|pos_embed|patch_embed",  # stem and embed
            blocks=[(r"^blocks\.(\d+)", None), (r"^norm", (99999,))],
        )

    @torch.jit.ignore  # type: ignore
    def set_grad_checkpointing(self, enable=True):
        self.grad_checkpointing = enable

    @torch.jit.ignore  # type: ignore
    def get_classifier(self):
        return self.head

    def reset_classifier(self, num_classes: int, global_pool=None):
        self.num_classes = num_classes
        if global_pool is not None:
            assert global_pool in ("", "avg", "token")
            self.global_pool = global_pool
        self.head = (
            nn.Linear(self.embed_dim, num_classes)
            if num_classes > 0
            else nn.Identity()
        )

    def update_classifier(self, nb_tgt: int, inplace=False):
        self.bp_head = nn.Linear(self.embed_dim, nb_tgt)
        with torch.no_grad():
            if isinstance(self.head, nn.Linear):
                current_weights: torch.Tensor = self.head.weight.data
                current_bias: torch.Tensor = self.head.bias.data
                self.bp_head.weight.data.copy_(
                    copy.deepcopy(current_weights[:nb_tgt])
                )
                self.bp_head.bias.data.copy_(
                    copy.deepcopy(current_bias[:nb_tgt])
                )
                if inplace:
                    self.head = copy.deepcopy(self.bp_head)
            else:
                raise ValueError("head is not nn.Linear")

    def forward_features(self, x):
        self.interm = []
        x = self.patch_embed(x)
        if self.cls_token is not None:
            x = torch.cat(
                (self.cls_token.expand(x.shape[0], -1, -1), x), dim=1
            )
        x = self.pos_drop(x + self.pos_embed)
        for block in self.blocks:
            x = block(x)
            self.interm.append(x.clone())

        x = self.norm(x)

        return x

    def forward_head(self, x, pre_logits: bool = False):
        # if self.global_pool:
        #     x = (
        #         x[:, self.num_tokens :].mean(dim=1)
        #         if self.global_pool == "avg"
        #         else x[:, 0]
        #     )
        # x = self.fc_norm(x) + self.mol_bt(torch.stack(self.interm, dim=1))
        # return x if pre_logits else self.head(x)
        x = self.mol_bt(torch.stack(self.interm, dim=1))
        return x

    def forward_bp_head(self, x, pre_logits: bool = False):
        if self.global_pool:
            x = (
                x[:, self.num_tokens :].mean(dim=1)
                if self.global_pool == "avg"
                else x[:, 0]
            )
        x = self.fc_norm(x)
        return x if pre_logits else self.bp_head(x)

    def forward(self, x, use_bp=False):
        x = self.forward_features(x)
        if use_bp:
            o = self.forward_bp_head(x)
        else:
            o = self.forward_head(x)
        return o


def init_weights_vit_timm(module: nn.Module, name: str = ""):
    """ViT weight initialization, original timm impl (for reproducibility)"""
    if isinstance(module, nn.Linear):
        trunc_normal_(module.weight, std=0.02)
        if module.bias is not None:
            nn.init.zeros_(module.bias)
    elif hasattr(module, "init_weights"):
        module.init_weights()


def init_weights_vit_jax(
    module: nn.Module, name: str = "", head_bias: float = 0.0
):
    """ViT weight initialization, matching JAX (Flax) impl"""
    if isinstance(module, nn.Linear):
        if name.startswith("head"):
            nn.init.zeros_(module.weight)
            nn.init.constant_(module.bias, head_bias)
        else:
            nn.init.xavier_uniform_(module.weight)
            if module.bias is not None:
                (
                    nn.init.normal_(module.bias, std=1e-6)
                    if "mlp" in name
                    else nn.init.zeros_(module.bias)
                )
    elif isinstance(module, nn.Conv2d):
        lecun_normal_(module.weight)
        if module.bias is not None:
            nn.init.zeros_(module.bias)
    elif hasattr(module, "init_weights"):
        module.init_weights()


def init_weights_vit_moco(module: nn.Module, name: str = ""):
    """ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed"""
    if isinstance(module, nn.Linear):
        if "qkv" in name:
            # treat the weights of Q, K, V separately
            val = math.sqrt(
                6.0
                / float(
                    module.weight.shape[0] // 3
                    + module.weight.shape[1]
                )
            )
            nn.init.uniform_(module.weight, -val, val)
        else:
            nn.init.xavier_uniform_(module.weight)
        if module.bias is not None:
            nn.init.zeros_(module.bias)
    elif hasattr(module, "init_weights"):
        module.init_weights()


def get_init_weights_vit(mode="jax", head_bias: float = 0.0):
    if "jax" in mode:
        return partial(init_weights_vit_jax, head_bias=head_bias)
    elif "moco" in mode:
        return init_weights_vit_moco
    else:
        return init_weights_vit_timm


@torch.no_grad()
def _load_weights(
    model: VisionTransformer_molh,
    checkpoint_path: str,
    prefix: str = "",
):
    """Load weights from .npz checkpoints for official Google Brain Flax implementation"""
    import numpy as np

    def _n2p(w, t=True):
        if (
            w.ndim == 4
            and w.shape[0] == w.shape[1] == w.shape[2] == 1
        ):
            w = w.flatten()
        if t:
            if w.ndim == 4:
                w = w.transpose([3, 2, 0, 1])
            elif w.ndim == 3:
                w = w.transpose([2, 0, 1])
            elif w.ndim == 2:
                w = w.transpose([1, 0])
        return torch.from_numpy(w)

    w = np.load(checkpoint_path)
    if not prefix and "opt/target/embedding/kernel" in w:
        prefix = "opt/target/"

    if hasattr(model.patch_embed, "backbone"):
        # hybrid
        backbone = model.patch_embed.backbone
        stem_only = not hasattr(backbone, "stem")
        stem = backbone if stem_only else backbone.stem  # type: ignore
        # type: ignore
        stem.conv.weight.copy_(  # type: ignore
            adapt_input_conv(  # type: ignore
                stem.conv.weight.shape[1],  # type: ignore
                _n2p(w[f"{prefix}conv_root/kernel"]),
            )
        )
        stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"]))  # type: ignore
        stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"]))  # type: ignore
        if not stem_only:
            for i, stage in enumerate(backbone.stages):  # type: ignore
                for j, block in enumerate(stage.blocks):
                    bp = f"{prefix}block{i + 1}/unit{j + 1}/"
                    for r in range(3):
                        getattr(block, f"conv{r + 1}").weight.copy_(
                            _n2p(w[f"{bp}conv{r + 1}/kernel"])
                        )
                        getattr(block, f"norm{r + 1}").weight.copy_(
                            _n2p(w[f"{bp}gn{r + 1}/scale"])
                        )
                        getattr(block, f"norm{r + 1}").bias.copy_(
                            _n2p(w[f"{bp}gn{r + 1}/bias"])
                        )
                    if block.downsample is not None:
                        block.downsample.conv.weight.copy_(
                            _n2p(w[f"{bp}conv_proj/kernel"])
                        )
                        block.downsample.norm.weight.copy_(
                            _n2p(w[f"{bp}gn_proj/scale"])
                        )
                        block.downsample.norm.bias.copy_(
                            _n2p(w[f"{bp}gn_proj/bias"])
                        )
        embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"])
    else:
        embed_conv_w = adapt_input_conv(
            model.patch_embed.proj.weight.shape[1],
            _n2p(w[f"{prefix}embedding/kernel"]),
        )
    model.patch_embed.proj.weight.copy_(embed_conv_w)
    model.patch_embed.proj.bias.copy_(  # type: ignore
        _n2p(w[f"{prefix}embedding/bias"])
    )
    model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False))  # type: ignore
    pos_embed_w = _n2p(
        w[f"{prefix}Transformer/posembed_input/pos_embedding"],
        t=False,
    )
    if pos_embed_w.shape != model.pos_embed.shape:
        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights
            pos_embed_w,
            model.pos_embed,
            getattr(model, "num_tokens", 1),
            model.patch_embed.grid_size,
        )
    model.pos_embed.copy_(pos_embed_w)
    model.norm.weight.copy_(
        _n2p(w[f"{prefix}Transformer/encoder_norm/scale"])  # type: ignore
    )
    model.norm.bias.copy_(
        _n2p(w[f"{prefix}Transformer/encoder_norm/bias"])  # type: ignore
    )
    if (
        isinstance(model.head, nn.Linear)
        and model.head.bias.shape[0]
        == w[f"{prefix}head/bias"].shape[-1]
    ):
        model.head.weight.copy_(_n2p(w[f"{prefix}head/kernel"]))
        model.head.bias.copy_(_n2p(w[f"{prefix}head/bias"]))
    # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights
    # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
    #     model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
    #     model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
    for i, block in enumerate(model.blocks.children()):
        block_prefix = f"{prefix}Transformer/encoderblock_{i}/"
        mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/"
        block.norm1.weight.copy_(
            _n2p(w[f"{block_prefix}LayerNorm_0/scale"])
        )
        block.norm1.bias.copy_(
            _n2p(w[f"{block_prefix}LayerNorm_0/bias"])
        )
        block.attn.qkv.weight.copy_(
            torch.cat(
                [
                    _n2p(w[f"{mha_prefix}{n}/kernel"], t=False)
                    .flatten(1)
                    .T
                    for n in ("query", "key", "value")
                ]
            )
        )
        block.attn.qkv.bias.copy_(
            torch.cat(
                [
                    _n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(
                        -1
                    )
                    for n in ("query", "key", "value")
                ]
            )
        )
        block.attn.proj.weight.copy_(
            _n2p(w[f"{mha_prefix}out/kernel"]).flatten(1)
        )
        block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"]))
        for r in range(2):
            getattr(block.mlp, f"fc{r + 1}").weight.copy_(
                _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"])
            )
            getattr(block.mlp, f"fc{r + 1}").bias.copy_(
                _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"])
            )
        block.norm2.weight.copy_(
            _n2p(w[f"{block_prefix}LayerNorm_2/scale"])
        )
        block.norm2.bias.copy_(
            _n2p(w[f"{block_prefix}LayerNorm_2/bias"])
        )


def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
    # Rescale the grid of position embeddings when loading from state_dict. Adapted from
    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
    _logger.info(
        "Resized position embedding: %s to %s",
        posemb.shape,
        posemb_new.shape,
    )
    ntok_new = posemb_new.shape[1]
    if num_tokens:
        posemb_tok, posemb_grid = (
            posemb[:, :num_tokens],
            posemb[0, num_tokens:],
        )
        ntok_new -= num_tokens
    else:
        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
    gs_old = int(math.sqrt(len(posemb_grid)))
    if not len(gs_new):  # backwards compatibility
        gs_new = [int(math.sqrt(ntok_new))] * 2
    assert len(gs_new) >= 2
    _logger.info(
        "Position embedding grid-size from %s to %s",
        [gs_old, gs_old],
        gs_new,
    )
    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(
        0, 3, 1, 2
    )
    posemb_grid = F.interpolate(
        posemb_grid, size=gs_new, mode="bicubic", align_corners=False
    )
    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(
        1, gs_new[0] * gs_new[1], -1
    )
    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
    return posemb


def checkpoint_filter_fn(state_dict, model):
    """convert patch embedding weight from manual patchify + linear proj to conv"""
    out_dict = {}
    if "model" in state_dict:
        # For deit models
        state_dict = state_dict["model"]
    for k, v in state_dict.items():
        if "patch_embed.proj.weight" in k and len(v.shape) < 4:
            # For old models that I trained prior to conv based patchification
            O, I, H, W = model.patch_embed.proj.weight.shape
            v = v.reshape(O, -1, H, W)
        elif k == "pos_embed" and v.shape != model.pos_embed.shape:
            # To resize pos embedding when using model at different size from pretrained weights
            v = resize_pos_embed(
                v,
                model.pos_embed,
                getattr(model, "num_tokens", 1),
                model.patch_embed.grid_size,
            )
        elif "pre_logits" in k:
            # NOTE representation layer removed as not used in latest 21k/1k pretrained weights
            continue
        out_dict[k] = v
    return out_dict


def _create_vision_transformer_molh(
    variant, pretrained=False, **kwargs
):
    if kwargs.get("features_only", None):
        raise RuntimeError(
            "features_only not implemented for Vision Transformer models."
        )

    pretrained_cfg = resolve_pretrained_cfg(
        variant, pretrained_cfg=kwargs.pop("pretrained_cfg", None)
    )
    model = build_model_with_cfg(
        VisionTransformer_molh,
        variant,
        pretrained,
        pretrained_cfg=pretrained_cfg,
        pretrained_filter_fn=checkpoint_filter_fn,
        pretrained_custom_load="npz" in pretrained_cfg["url"],
        **kwargs,
    )
    return model


@register_model
def vit_base_patch16_224_in21k_molh(pretrained=False, **kwargs):
    """ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
    ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
    NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
    """
    model_kwargs = dict(
        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs
    )
    model = _create_vision_transformer_molh(
        "vit_base_patch16_224_in21k",
        pretrained=pretrained,
        **model_kwargs,
    )
    return model


@register_model
def vit_large_patch16_224_in21k_molh(pretrained=False, **kwargs):
    """ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
    ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
    NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
    """
    model_kwargs = dict(
        patch_size=16,
        embed_dim=1024,
        depth=24,
        num_heads=16,
        **kwargs,
    )
    model = _create_vision_transformer_molh(
        "vit_large_patch16_224_in21k",
        pretrained=pretrained,
        **model_kwargs,
    )
    return model


@register_model
def vit_huge_patch14_224_in21k_molh(pretrained=False, **kwargs):
    """ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
    ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
    NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
    """
    model_kwargs = dict(
        patch_size=14,
        embed_dim=1280,
        depth=32,
        num_heads=16,
        **kwargs,
    )
    model = _create_vision_transformer_molh(
        "vit_huge_patch14_224_in21k",
        pretrained=pretrained,
        **model_kwargs,
    )
    return model
