# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------
import argparse
import torch
import torch.nn as nn
from timm.layers import DropPath
import timm
import math
from functools import partial
from collections import OrderedDict
from timm.models.vision_transformer import PatchEmbed
from timm.models import register_model
import copy
from typing import Optional
import torch.nn.functional as F


class TaskSpace(nn.Module):
    def __init__(
        self,
        tuning_config: argparse.Namespace,
        embed_dim: Optional[int] = None,
        bottleneck: Optional[int] = None,
        dropout: float = 0.0,
        adapter_scalar="1.0",
        adapter_layernorm_option="none",
    ):
        super().__init__()
        self._device = tuning_config._device
        self.n_embd = (
            tuning_config.embed_dim if embed_dim is None else embed_dim
        )
        self.numb_expert = (
            tuning_config.attn_bn if bottleneck is None else bottleneck
        )

        # layernorm
        self.adapter_layernorm_option = adapter_layernorm_option

        if (
            adapter_layernorm_option == "in"
            or adapter_layernorm_option == "out"
        ):
            self.adapter_layer_norm = nn.LayerNorm(self.n_embd)

        if adapter_scalar == "learnable_scalar":
            self.scale = nn.Parameter(torch.ones(1))
        else:
            self.scale = torch.tensor(float(adapter_scalar))

        self.feature_space = nn.Parameter(
            torch.empty(self.numb_expert, self.n_embd)
        )
        self.eign = nn.Parameter(torch.empty(self.numb_expert))
        self.task_space = nn.Parameter(
            torch.empty(self.numb_expert, self.n_embd)
        )

        self.dropout = dropout

        with torch.no_grad():
            nn.init.kaiming_uniform_(self.feature_space, a=math.sqrt(5))
            nn.init.uniform_(self.eign)
            nn.init.zeros_(self.task_space)

    def forward(self, x, add_residual=False, residual=None):
        return self._forward(x, add_residual, residual)

    def _update(self):
        print("Update Lora: Not Implemented")

    def _forward(self, x, add_residual=False, residual=None):
        residual = x if residual is None else residual
        if self.adapter_layernorm_option == "in":
            x = self.adapter_layer_norm(x)

        feature_proj = nn.functional.linear(x, self.feature_space)
        eign_fp = feature_proj * self.eign[None, None, :]
        task_proj = nn.functional.linear(eign_fp, self.task_space)

        task_proj = task_proj * self.scale

        if self.adapter_layernorm_option == "out":
            task_proj = self.adapter_layer_norm(task_proj)

        if add_residual:
            output = task_proj + residual
        else:
            output = task_proj

        return output


class SubspaceDecompositionAndMerge(nn.Module):
    """
    MoE Adapter for training, merged to MoEAdapter after task trainig
    Freeze MoEAdapter, train self.adapter and self.ider only
        create adapter list with old task adpaters freeze
        create router weights with old id tokens freeze
    """

    def __init__(
        self,
        tuning_config: argparse.Namespace,
    ):
        super().__init__()
        self.config = tuning_config
        self.topk = tuning_config.topk
        self.n_embd = tuning_config.embed_dim
        self._device = tuning_config._device

        ts = TaskSpace(
            self.config,
            dropout=0.1,
            bottleneck=self.config.ffn_rank,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)
        self.feature_space = nn.ParameterList(
            [copy.deepcopy(ts.feature_space).requires_grad_(True)]
        )
        self.eign = nn.ParameterList(
            [copy.deepcopy(ts.eign).requires_grad_(True)]
        )
        self.task_space = nn.ParameterList(
            [copy.deepcopy(ts.task_space).requires_grad_(True)]
        )

        if not ts.scale.requires_grad:
            self.scale = ts.scale
        else:
            NotImplementedError(
                "Scale should not be trainable/Add new Implementation!"
            )

    def forward(self, x, add_residual=False, residual=None):
        out = self.forward_seq_sparse(x, add_residual, residual)
        return out

    def forward_seq_soft(self, x, add_residual=False, residual=None):
        # TODO linear? Cosine Similarity?
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        residual = x if residual is None else residual

        def _add_residual(task_proj, add_residual, residual):
            if add_residual:
                output = task_proj + residual
            else:
                output = task_proj
            return output

        feature_space_list = []
        eign_fp_list = []
        task_space_list = []

        for ifp, ie, itp in zip(self.feature_space, self.eign, self.task_space):
            """
            ifp [R, D]
            ie [R]
            itp [R, D]
            """
            # [B L R]
            fp = nn.functional.linear(x, ifp)
            # [B L R]
            efp = fp * torch.exp(ie[None, None, :])

            feature_space_list.append(fp)
            eign_fp_list.append(efp)
            task_space_list.append(itp)

        # [B L R*T]
        eign_fp = torch.cat(eign_fp_list, dim=-1)
        # [R*T D]
        task_space = torch.cat(task_space_list, dim=0)

        # [B L D]
        o = torch.einsum("blr,rd->bld", eign_fp, task_space) * self.scale

        return _add_residual(o, add_residual, residual)

    def forward_seq_sparse(self, x, add_residual=False, residual=None):
        # TODO linear? Cosine Similarity?
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        residual = x if residual is None else residual

        def _add_residual(task_proj, add_residual, residual):
            if add_residual:
                output = task_proj + residual
            else:
                output = task_proj
            return output

        # # [B L R*T]
        # feature_space = torch.Tensor().to(self._device)
        # # [B L R*T]
        # eign_fp = torch.Tensor().to(self._device)
        # # [R*T D]
        # task_space = torch.Tensor().to(self._device)

        feature_space_list = []
        eign_fp_list = []
        task_space_list = []

        for ifp, ie, itp in zip(self.feature_space, self.eign, self.task_space):
            """
            ifp [R, D]
            ie [R]
            itp [R, D]
            """
            # [B L R]
            fp = nn.functional.linear(x, ifp)
            # [B L R]
            efp = fp * torch.exp(ie[None, None, :])

            # feature_space = torch.cat([feature_space, fp], dim=-1)
            # eign_fp = torch.cat([eign_fp, efp], dim=-1)
            # task_space = torch.cat([task_space, itp], dim=0)
            feature_space_list.append(fp)
            eign_fp_list.append(efp)
            task_space_list.append(itp)

        eign_fp = torch.cat(eign_fp_list, dim=-1)
        task_space = torch.cat(task_space_list, dim=0)

        # o = nn.functional.linear(eign_fp, task_space.T)
        # fetch topk eign_fp/[B L K]
        v_topk_eign_fp, in_topk_eign_fp = torch.topk(eign_fp, self.topk, dim=-1)
        # [B L K D]
        v_topk_task_space = task_space[in_topk_eign_fp]

        # [B L D]
        o = (
            torch.einsum("blk,blkd->bld", v_topk_eign_fp, v_topk_task_space)
            * self.scale
        )

        return _add_residual(o, add_residual, residual)

    def update(self):
        new_ts = TaskSpace(
            self.config,
            dropout=0.1,
            bottleneck=self.config.ffn_rank,
            adapter_scalar=self.config.ffn_adapter_scalar,
            adapter_layernorm_option=self.config.ffn_adapter_layernorm_option,
        ).to(self._device)
        self._update_parameter_list("feature_space", new_ts.feature_space)
        self._update_parameter_list("eign", new_ts.eign)
        self._update_parameter_list("task_space", new_ts.task_space)

    def _update_parameter_list(self, attr_name, new_component):
        param_list = nn.ParameterList()
        old_component = getattr(self, attr_name, None)

        if old_component is not None:
            for param in old_component:
                # Append the existing parameters as non-trainable (requires_grad=False)
                param_list.append(copy.deepcopy(param).requires_grad_(False))
            delattr(self, attr_name)  # Clean up the old attribute

        # Append the new component's parameter as trainable
        param_list.append(copy.deepcopy(new_component).requires_grad_(True))
        setattr(
            self, attr_name, param_list.to(self._device)
        )  # Set and move to device

    def compute_ortho_loss(self):
        # ortho_loss_fp, ortho_loss_tp = self._ortho_cos()
        ortho_loss_fp, ortho_loss_tp = self._ortho_matmul()
        return ortho_loss_fp, ortho_loss_tp

    def _ortho_cos(self):
        """Compute the Orthogonal Loss for self.feature_space and self.task_space, respectively"""
        ortho_loss_fp = ortho_loss_tp = 0
        fp = self.feature_space[-1]
        for idx in range(len(self.feature_space) - 1):
            cos_matrix = torch.nn.functional.linear(
                torch.nn.functional.normalize(fp, p=2, dim=-1),
                torch.nn.functional.normalize(
                    self.feature_space[idx], p=2, dim=-1
                ),
            )
            ortho_loss_fp += torch.norm(cos_matrix, p="fro")

        I = torch.eye(fp.shape[0], device=self._device)
        cos_matrix = torch.nn.functional.linear(
            torch.nn.functional.normalize(fp, p=2, dim=-1),
            torch.nn.functional.normalize(fp, p=2, dim=-1),
        )
        ortho_loss_fp += torch.norm(cos_matrix - I, p="fro")
        ortho_loss_fp /= len(self.feature_space) * self.config.ffn_rank

        tp = self.task_space[-1]
        for idx in range(len(self.task_space) - 1):
            cos_matrix = torch.nn.functional.linear(
                torch.nn.functional.normalize(tp, p=2, dim=-1),
                torch.nn.functional.normalize(
                    self.task_space[idx], p=2, dim=-1
                ),
            )
            ortho_loss_tp += torch.norm(cos_matrix, p="fro")

        I = torch.eye(tp.shape[0], device=self._device)
        cos_matrix = torch.nn.functional.linear(
            torch.nn.functional.normalize(tp, p=2, dim=-1),
            torch.nn.functional.normalize(tp, p=2, dim=-1),
        )
        ortho_loss_tp += torch.norm(cos_matrix - I, p="fro")
        ortho_loss_tp /= len(self.task_space) * self.config.ffn_rank

        return ortho_loss_fp, ortho_loss_tp

    def _ortho_matmul(self):
        """Compute the Orthogonal Loss for self.feature_space and self.task_space, respectively"""
        ortho_loss_fp = ortho_loss_tp = 0
        fp = self.feature_space[-1]
        for ifp in self.feature_space[:-1]:
            ortho_loss_fp += torch.norm(fp @ ifp.T, p="fro")
        I = torch.eye(fp.shape[0], device=self._device)
        ortho_loss_fp += torch.norm(fp @ fp.T - I, p="fro")
        ortho_loss_fp /= len(self.feature_space) * self.config.ffn_rank

        tp = self.task_space[-1]
        for itp in self.task_space[:-1]:
            ortho_loss_tp += torch.norm(tp @ itp.T, p="fro")
        I = torch.eye(tp.shape[0], device=self._device)
        ortho_loss_tp += torch.norm(tp @ tp.T - I, p="fro")
        ortho_loss_tp /= len(self.task_space) * self.config.ffn_rank

        return ortho_loss_fp, ortho_loss_tp

    def __forward_test(self, x, add_residual=False, residual=None):
        """Weighted Sum of multi adapters
        Args:
            x: [B L D]
            kwargs: adapter args

        Returns:
            o: [B L D]
        """
        residual = x if residual is None else residual

        def _add_residual(task_proj, add_residual, residual):
            if add_residual:
                output = task_proj + residual
            else:
                output = task_proj
            return output

        # [B L R*T]
        feature_space = torch.Tensor().to(self._device)
        # [B L R*T]
        eign_fp = torch.Tensor().to(self._device)
        # [R*T D]
        task_space = torch.Tensor().to(self._device)

        for ifp, ie, itp in zip(self.feature_space, self.eign, self.task_space):
            """
            ifp [R, D]
            ie [R]
            itp [R, D]
            """
            # [B L R]
            fp = nn.functional.linear(x, ifp)
            # [B L R]
            efp = fp * torch.exp(ie[None, None, :])
            # efp = fp
            tp = nn.functional.linear(efp, itp.T)

            feature_space = torch.cat([feature_space, fp], dim=-1)
            eign_fp = torch.cat([eign_fp, efp], dim=-1)
            task_space = torch.cat([task_space, tp], dim=0)

        o = task_space
        o = nn.functional.linear(eign_fp, task_space.T)
        # # fetch topk eign_fp/[B L K]
        # v_topk_eign_fp, in_topk_eign_fp = torch.topk(eign_fp, self.topk, dim=-1)
        # # [B L K D]
        # v_topk_task_space = task_space[in_topk_eign_fp]

        # # [B L D]
        # o = torch.einsum("blk,blkd->bld", v_topk_eign_fp, v_topk_task_space) * self.scale
        return _add_residual(o, add_residual, residual)


class Attention(nn.Module):
    def __init__(
        self,
        dim,
        num_heads=8,
        qkv_bias=False,
        attn_drop=0.0,
        proj_drop=0.0,
    ):
        super().__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.head_dim = dim // num_heads
        self.scale = head_dim**-0.5

        self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
        self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
        self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)

        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return (
            tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
            .transpose(1, 2)
            .contiguous()
        )

    def forward(self, x):
        B, N, C = x.shape

        q = self.q_proj(x)
        k = self._shape(self.k_proj(x), -1, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        v = self._shape(self.v_proj(x), -1, B).view(
            B * self.num_heads, -1, self.head_dim
        )
        q = self._shape(q, N, B).view(B * self.num_heads, -1, self.head_dim)

        attn_weights = torch.bmm(q, k.transpose(1, 2)) * self.scale

        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        attn_probs = self.attn_drop(attn_weights)
        attn_output = torch.bmm(attn_probs, v)

        attn_output = attn_output.view(B, self.num_heads, N, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(B, N, C)

        x = self.proj(attn_output)
        x = self.proj_drop(x)

        return x


class Block(nn.Module):
    layer_id: int

    def __init__(
        self,
        dim,
        num_heads,
        mlp_ratio=4.0,
        qkv_bias=False,
        drop=0.0,
        attn_drop=0.0,
        drop_path=0.0,
        act_layer=nn.GELU,
        norm_layer=nn.LayerNorm,
        config: argparse.Namespace = argparse.Namespace(),
        layer_id=0,
    ):
        super().__init__()
        self.config = config
        self.norm1 = norm_layer(dim)
        self.attn = Attention(
            dim,
            num_heads=num_heads,
            qkv_bias=qkv_bias,
            attn_drop=attn_drop,
            proj_drop=drop,
        )
        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
        self.drop_path = (
            DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        )
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.layer_id = layer_id
        self.fc1 = nn.Linear(dim, mlp_hidden_dim)
        self.fc2 = nn.Linear(mlp_hidden_dim, dim)
        self.act = act_layer()
        self.mlp_drop = nn.Dropout(drop)

    def forward(self, x, adapt=None):
        x = x + self.drop_path(self.attn(self.norm1(x)))
        if adapt is not None:
            adapt_x = adapt(x, add_residual=False)
        else:
            adapt_x = None

        residual = x
        x = self.mlp_drop(self.act(self.fc1(self.norm2(x))))
        x = self.drop_path(self.mlp_drop(self.fc2(x)))

        if adapt_x is not None:
            if self.config.ffn_adapt:
                if self.config.ffn_option == "parallel":
                    x = x + adapt_x
                else:
                    raise ValueError(self.config.ffn_adapt)

        x = residual + x

        return x


class VisionTransformer(nn.Module):
    """Vision Transformer with support for global average pooling"""

    def __init__(
        self,
        global_pool=False,
        img_size=224,
        patch_size=16,
        in_chans=3,
        num_classes=1000,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4.0,
        qkv_bias=True,
        representation_size=None,
        distilled=False,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.0,
        embed_layer=PatchEmbed,
        norm_layer=None,
        act_layer=None,
        weight_init="",
        tuning_config: argparse.Namespace = argparse.Namespace(),
    ):
        super().__init__()
        self.out_dim = embed_dim
        self.sequential = True
        self.img_size = img_size if not tuning_config.cam_visual else 1024

        print("I'm using ViT with MoEAdapters.")
        self.tuning_config = tuning_config
        self.num_classes = num_classes
        self.num_features = self.embed_dim = (
            embed_dim  # num_features for consistency with other models
        )
        self.num_tokens = 2 if distilled else 1
        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
        act_layer = act_layer or nn.GELU

        self.patch_embed = embed_layer(
            img_size=self.img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            embed_dim=embed_dim,
        )
        num_patches = self.patch_embed.num_patches

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_embed = nn.Parameter(
            torch.zeros(1, num_patches + self.num_tokens, embed_dim)
        )
        self.pos_drop = nn.Dropout(p=drop_rate)

        dpr = [
            x.item() for x in torch.linspace(0, drop_path_rate, depth)
        ]  # stochastic depth decay rule
        self.blocks = nn.Sequential(
            *[
                Block(
                    dim=embed_dim,
                    num_heads=num_heads,
                    mlp_ratio=mlp_ratio,
                    qkv_bias=qkv_bias,
                    drop=drop_rate,
                    attn_drop=attn_drop_rate,
                    drop_path=dpr[i],
                    norm_layer=norm_layer,  # type: ignore
                    act_layer=act_layer,
                    config=tuning_config,  # type: ignore
                    layer_id=i,
                )
                for i in range(depth)
            ]
        )
        self.norm = norm_layer(embed_dim)

        # Representation layer
        if representation_size and not distilled:
            self.num_features = representation_size
            self.pre_logits = nn.Sequential(
                OrderedDict(
                    [
                        ("fc", nn.Linear(embed_dim, representation_size)),
                        ("act", nn.Tanh()),
                    ]
                )
            )
        else:
            self.pre_logits = nn.Identity()

        # Classifier head(s)
        self.head = (
            nn.Linear(self.num_features, num_classes)
            if num_classes > 0
            else nn.Identity()
        )
        self.head_dist = None
        if distilled:
            self.head_dist = (
                nn.Linear(self.embed_dim, self.num_classes)
                if num_classes > 0
                else nn.Identity()
            )

        self.global_pool = global_pool
        if self.global_pool:
            self.fc_norm = norm_layer(embed_dim)

            del self.norm  # remove the original norm

        self.config = tuning_config
        self._device = tuning_config._device
        self.cur_adapter = nn.ModuleList(
            [SubspaceDecompositionAndMerge(tuning_config) for _ in range(depth)]
        )

    def init_weights(self, mode=""):
        raise NotImplementedError()

    @torch.jit.ignore  # type:ignore
    def no_weight_decay(self):
        return {"pos_embed", "cls_token", "dist_token"}

    def get_classifier(self):
        if self.dist_token is None:
            return self.head
        else:
            return self.head, self.head_dist

    def reset_classifier(self, num_classes, global_pool=""):
        self.num_classes = num_classes
        self.head = (
            nn.Linear(self.embed_dim, num_classes)
            if num_classes > 0
            else nn.Identity()
        )
        if self.num_tokens == 2:
            self.head_dist = (
                nn.Linear(self.embed_dim, self.num_classes)
                if num_classes > 0
                else nn.Identity()
            )

    def forward_token(self, x):
        x = self.forward_feats(x)
        outcome = x[:, 0]

        return outcome

    def forward(self, x):
        x = self.forward_token(x)
        return x

    def forward_feats(self, x):
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)

        for idx, blk in enumerate(self.blocks):
            x = blk(x, self.cur_adapter[idx])

        x = self.norm(x)

        return x

    def freeze(self):
        pass

    def after_task(self):
        for adapter in self.cur_adapter:
            adapter.update()  # type: ignore

    def compute_loss(self, alpha_fp=0.5, alpha_tp=0.5):
        loss = 0
        for adapter in self.cur_adapter:
            ol_fp, ol_tp = adapter.compute_ortho_loss()  # type: ignore
            loss += alpha_fp * ol_fp + alpha_tp * ol_tp
        return loss / len(self.cur_adapter)


def vit_base_patch16_224_sdm(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4,
        qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        **kwargs
    )

    # checkpoint_model = torch.load('./pretrained_models/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz')
    checkpoint_model = timm.create_model(
        "vit_base_patch16_224", pretrained=True, num_classes=0
    )
    state_dict = checkpoint_model.state_dict()
    # modify the checkpoint state dict to match the model
    # first, split qkv weight into q, k, v
    for key in list(state_dict.keys()):
        if "qkv.weight" in key:
            qkv_weight = state_dict.pop(key)
            q_weight = qkv_weight[:768]
            k_weight = qkv_weight[768 : 768 * 2]
            v_weight = qkv_weight[768 * 2 :]
            state_dict[key.replace("qkv.weight", "q_proj.weight")] = q_weight
            state_dict[key.replace("qkv.weight", "k_proj.weight")] = k_weight
            state_dict[key.replace("qkv.weight", "v_proj.weight")] = v_weight
        elif "qkv.bias" in key:
            qkv_bias = state_dict.pop(key)
            q_bias = qkv_bias[:768]
            k_bias = qkv_bias[768 : 768 * 2]
            v_bias = qkv_bias[768 * 2 :]
            state_dict[key.replace("qkv.bias", "q_proj.bias")] = q_bias
            state_dict[key.replace("qkv.bias", "k_proj.bias")] = k_bias
            state_dict[key.replace("qkv.bias", "v_proj.bias")] = v_bias
    # second, modify the mlp.fc.weight to match fc.weight
    for key in list(state_dict.keys()):
        if "mlp.fc" in key:
            fc_weight = state_dict.pop(key)
            state_dict[key.replace("mlp.", "")] = fc_weight

    if model.img_size == 1024:
        new_pos_embed = interpolate_positional_embeddings(
            state_dict["pos_embed"]
        )
        state_dict["pos_embed"] = new_pos_embed

    msg = model.load_state_dict(state_dict, strict=False)
    print("Missing keys: ", msg.missing_keys)
    print("Unexpected keys: ", msg.unexpected_keys)

    # freeze all but the adapter
    for name, p in model.named_parameters():
        if name in msg.missing_keys:
            p.requires_grad = True
        else:
            p.requires_grad = False
    return model


@register_model
def vit_base_patch16_224_in21k_sdm(pretrained=False, **kwargs):

    model = VisionTransformer(
        patch_size=16,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4,
        qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        **kwargs
    )

    # checkpoint_model = torch.load('./pretrained_models/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz')
    checkpoint_model = timm.create_model(
        "vit_base_patch16_224.augreg_in21k", pretrained=True, num_classes=0
    )
    state_dict = checkpoint_model.state_dict()
    # modify the checkpoint state dict to match the model
    # first, split qkv weight into q, k, v
    for key in list(state_dict.keys()):
        if "qkv.weight" in key:
            qkv_weight = state_dict.pop(key)
            q_weight = qkv_weight[:768]
            k_weight = qkv_weight[768 : 768 * 2]
            v_weight = qkv_weight[768 * 2 :]
            state_dict[key.replace("qkv.weight", "q_proj.weight")] = q_weight
            state_dict[key.replace("qkv.weight", "k_proj.weight")] = k_weight
            state_dict[key.replace("qkv.weight", "v_proj.weight")] = v_weight
        elif "qkv.bias" in key:
            qkv_bias = state_dict.pop(key)
            q_bias = qkv_bias[:768]
            k_bias = qkv_bias[768 : 768 * 2]
            v_bias = qkv_bias[768 * 2 :]
            state_dict[key.replace("qkv.bias", "q_proj.bias")] = q_bias
            state_dict[key.replace("qkv.bias", "k_proj.bias")] = k_bias
            state_dict[key.replace("qkv.bias", "v_proj.bias")] = v_bias
    # second, modify the mlp.fc.weight to match fc.weight
    for key in list(state_dict.keys()):
        if "mlp.fc" in key:
            fc_weight = state_dict.pop(key)
            state_dict[key.replace("mlp.", "")] = fc_weight

    if model.img_size == 1024:
        new_pos_embed = interpolate_positional_embeddings(
            state_dict["pos_embed"]
        )
        state_dict["pos_embed"] = new_pos_embed

    msg = model.load_state_dict(state_dict, strict=False)
    print("Missing keys: ", msg.missing_keys)
    print("Unexpected keys: ", msg.unexpected_keys)

    for name, p in model.named_parameters():
        if name in msg.missing_keys:
            p.requires_grad = True
        else:
            p.requires_grad = False
    return model


def interpolate_positional_embeddings(
    old_pos_embed, patch_size=16, image_size=1024
):
    # Get the original patch grid size
    cls_token = old_pos_embed[:, 0, :].unsqueeze(1)
    old_num_patches = old_pos_embed.shape[1]  # exclude the [CLS] token

    # Compute the new number of patches for the higher-resolution image
    new_num_patches = (image_size // patch_size) ** 2

    # Reshape the positional embeddings to match the old patch grid size
    old_pos_embed = old_pos_embed[:, 1:, :]  # exclude the [CLS] token
    # [1, num_patches, embed_dim] -> [1, 14, 14, embed_dim]
    old_pos_embed = old_pos_embed.reshape(
        1, int(old_num_patches**0.5), int(old_num_patches**0.5), -1
    )
    old_pos_embed = old_pos_embed.permute(0, 3, 1, 2)
    # Use bilinear interpolation to resize the positional embeddings
    # [1, 14, 14, embed_dim] -> [1, 64, 64, embed_dim]
    new_pos_embed = F.interpolate(
        old_pos_embed,
        size=(int(image_size / patch_size), int(image_size / patch_size)),
        mode="bicubic",
    )
    new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
    # Reshape back to the appropriate size
    new_pos_embed = new_pos_embed.reshape(1, new_num_patches, -1)

    # Reinsert the [CLS] token if the model uses one
    new_pos_embed = torch.cat([cls_token, new_pos_embed], dim=1)

    return new_pos_embed
