import torch
import torch.nn as nn
import logging
import math
import timm
from functools import partial
import torch.nn.functional as F

_logger = logging.getLogger(__name__)


def initialize_vit_model(pretrained_model, backbone_fun, **kwargs):
    kwargs.pop("pretrained_cfg_overlay")
    kwargs.pop("cache_dir")
    # FIXME: This is a temporary solution to load the ViT models for fine-tuning
    ft = kwargs.pop("ft", None)
    model = backbone_fun(
        patch_size=16,
        embed_dim=768,
        depth=12,
        num_heads=12,
        mlp_ratio=4,
        qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        **kwargs,
    )

    checkpoint_model = timm.create_model(
        pretrained_model, pretrained=True, num_classes=0
    )
    state_dict = checkpoint_model.state_dict()

    # Modify the state dict (same for both models)
    for key in list(state_dict.keys()):
        if "qkv.weight" in key:
            qkv_weight = state_dict.pop(key)
            q_weight, k_weight, v_weight = (
                qkv_weight[:768],
                qkv_weight[768 : 768 * 2],
                qkv_weight[768 * 2 :],
            )
            state_dict[key.replace("qkv.weight", "q_proj.weight")] = q_weight
            state_dict[key.replace("qkv.weight", "k_proj.weight")] = k_weight
            state_dict[key.replace("qkv.weight", "v_proj.weight")] = v_weight
        elif "qkv.bias" in key:
            qkv_bias = state_dict.pop(key)
            q_bias, k_bias, v_bias = (
                qkv_bias[:768],
                qkv_bias[768 : 768 * 2],
                qkv_bias[768 * 2 :],
            )
            state_dict[key.replace("qkv.bias", "q_proj.bias")] = q_bias
            state_dict[key.replace("qkv.bias", "k_proj.bias")] = k_bias
            state_dict[key.replace("qkv.bias", "v_proj.bias")] = v_bias
    for key in list(state_dict.keys()):
        if "mlp.fc" in key:
            fc_weight = state_dict.pop(key)
            state_dict[key.replace("mlp.", "")] = fc_weight

    # Handle positional embedding if necessary
    if model.img_size == 1024:
        new_pos_embed = interpolate_positional_embeddings(
            state_dict["pos_embed"]
        )
        state_dict["pos_embed"] = new_pos_embed

    # Load the state dict
    msg = model.load_state_dict(state_dict, strict=False)
    print("Missing keys:", msg.missing_keys)
    print("Unexpected keys:", msg.unexpected_keys)

    # Freeze all but the adapter
    for name, p in model.named_parameters():
        p.requires_grad = name in msg.missing_keys

    if ft:
        # Fine-tune the model
        for _, p in model.named_parameters():
            p.requires_grad = True

    return model


def interpolate_positional_embeddings(
    old_pos_embed, patch_size=16, image_size=1024
):
    # Get the original patch grid size
    cls_token = old_pos_embed[:, 0, :].unsqueeze(1)
    old_num_patches = old_pos_embed.shape[1]  # exclude the [CLS] token

    # Compute the new number of patches for the higher-resolution image
    new_num_patches = (image_size // patch_size) ** 2

    # Reshape the positional embeddings to match the old patch grid size
    old_pos_embed = old_pos_embed[:, 1:, :]  # exclude the [CLS] token
    # [1, num_patches, embed_dim] -> [1, 14, 14, embed_dim]
    old_pos_embed = old_pos_embed.reshape(
        1, int(old_num_patches**0.5), int(old_num_patches**0.5), -1
    )
    old_pos_embed = old_pos_embed.permute(0, 3, 1, 2)
    # Use bilinear interpolation to resize the positional embeddings
    # [1, 14, 14, embed_dim] -> [1, 64, 64, embed_dim]
    new_pos_embed = F.interpolate(
        old_pos_embed,
        size=(int(image_size / patch_size), int(image_size / patch_size)),
        mode="bilinear",
    )
    new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
    # Reshape back to the appropriate size
    new_pos_embed = new_pos_embed.reshape(1, new_num_patches, -1)

    # Reinsert the [CLS] token if the model uses one
    new_pos_embed = torch.cat([cls_token, new_pos_embed], dim=1)

    return new_pos_embed


# TODO update pos_emb resize
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
    # Rescale the grid of position embeddings when loading from state_dict. Adapted from
    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
    _logger.info(
        "Resized position embedding: %s to %s", posemb.shape, posemb_new.shape
    )
    ntok_new = posemb_new.shape[1]
    if num_tokens:
        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
        ntok_new -= num_tokens
    else:
        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
    gs_old = int(math.sqrt(len(posemb_grid)))
    if not len(gs_new):  # backwards compatibility
        gs_new = [int(math.sqrt(ntok_new))] * 2
    assert len(gs_new) >= 2
    _logger.info(
        "Position embedding grid-size from %s to %s", [gs_old, gs_old], gs_new
    )
    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
    posemb_grid = F.interpolate(
        posemb_grid, size=gs_new, mode="bicubic", align_corners=False
    )
    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(
        1, gs_new[0] * gs_new[1], -1
    )
    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
    return posemb
