import os
import os.path as osp

import torch
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
from torch import Tensor
from transformers import FocalNetBackbone, FocalNetConfig


def rename_key(name):
    if "patch_embed.proj" in name:
        name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
    if "patch_embed.norm" in name:
        name = name.replace("patch_embed.norm", "embeddings.norm")
    if "layers" in name:
        name = "encoder." + name
    if "encoder.layers" in name:
        name = name.replace("encoder.layers", "encoder.stages")
    if "downsample.proj" in name:
        name = name.replace("downsample.proj", "downsample.projection")
    if "blocks" in name:
        name = name.replace("blocks", "layers")
    if "modulation.f.weight" in name or "modulation.f.bias" in name:
        name = name.replace("modulation.f", "modulation.projection_in")
    if "modulation.h.weight" in name or "modulation.h.bias" in name:
        name = name.replace("modulation.h", "modulation.projection_context")
    if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
        name = name.replace("modulation.proj", "modulation.projection_out")

    if name == "norm.weight":
        name = "layernorm.weight"
    if name == "norm.bias":
        name = "layernorm.bias"

    if "head" in name:
        name = name.replace("head", "classifier")
    else:
        name = "focalnet." + name

    return name


def get_focalnet_config(scale, out_features):
    stage_dict = {
        "res2": "stage1",
        "res3": "stage2",
        "res4": "stage3",
        "res5": "stage4"
    }
    depths = [2, 2, 6, 2] if "tiny" in scale else [2, 2, 18, 2]
    use_conv_embed = True if "large" in scale or "huge" in scale else False
    use_post_layernorm = True if "large" in scale or "huge" in scale else False
    use_layerscale = True if "large" in scale or "huge" in scale else False

    if "large" in scale or "xlarge" in scale or "huge" in scale:
        if "fl3" in scale:
            focal_levels = [3, 3, 3, 3]
            focal_windows = [5, 5, 5, 5]
        elif "fl4" in scale:
            focal_levels = [4, 4, 4, 4]
            focal_windows = [3, 3, 3, 3]

    if "tiny" in scale or "small" in scale or "base" in scale:
        focal_windows = [3, 3, 3, 3]
        if "lrf" in scale:
            focal_levels = [3, 3, 3, 3]
        else:
            focal_levels = [2, 2, 2, 2]

    if "tiny" in scale:
        embed_dim = 96
    elif "small" in scale:
        embed_dim = 96
    elif "base" in scale:
        embed_dim = 128
    elif "large" in scale:
        embed_dim = 192
    elif "xlarge" in scale:
        embed_dim = 256
    elif "huge" in scale:
        embed_dim = 352


    config = FocalNetConfig(
        embed_dim=embed_dim,
        depths=depths,
        focal_levels=focal_levels,
        focal_windows=focal_windows,
        use_conv_embed=use_conv_embed,
        use_post_layernorm=use_post_layernorm,
        use_layerscale=use_layerscale,
    )
    config.out_features = [stage_dict[i] for i in out_features]

    return config

def build_focalnet(scale, ckpt_path, out_features):
    config = get_focalnet_config(scale, out_features)
    if osp.exists(ckpt_path):
        state_dict = torch.load(ckpt_path, map_location="cpu")["model"]
        # rename keys
        for key in state_dict.copy().keys():
            val = state_dict.pop(key)
            state_dict[rename_key(key)] = val
    else:
        state_dict = None
        print(f"convnext pretrain model path: {ckpt_path} is not exists.")
    
    model = FocalNetBackbone(config)

    if state_dict is not None:
        missed_keys = []
        unexpected_keys = []
        model_dict = model.state_dict()
        for m in model_dict:
            if m not in state_dict.keys():
                missed_keys.append(m)
        for m in state_dict:
            if m not in model_dict.keys():
                unexpected_keys.append(m)
        print("missed_keys: ", missed_keys)
        print("unexpected_keys: ", unexpected_keys)
        model.load_state_dict(state_dict, strict=False)
    else:
        model.init_weights()
    
    return model

    

@BACKBONE_REGISTRY.register()
class D2FocalNet(Backbone):
    def __init__(self, cfg, input_shape):
        super().__init__()
        scale = cfg.MODEL.FocalNet.SCALE
        ckpt_path = cfg.MODEL.FocalNet.CKPT

        self._out_features = ["res2", "res3", "res4"]
        self.model = build_focalnet(scale, ckpt_path, self._out_features)
        self._out_feature_strides = {
            "res2": 8,
            "res3": 16,
            "res4": 32,
            "res5": 32,
        }
        hidden_sizes = self.model.config.hidden_sizes
        # assert len(hidden_sizes) == len(self._out_features)
        self._out_feature_channels = {
            "res2": hidden_sizes[0],
            "res3": hidden_sizes[1],
            "res4": hidden_sizes[2],
            "res5": hidden_sizes[3],
        }

    def forward(self,  x: Tensor) -> Tensor:
        assert x.dim() == 4

        outs = self.model(x).feature_maps
        assert len(self._out_features) == len(outs)
        outputs = {}
        for k, v in zip(self._out_features, outs):
            outputs[k] = v
        
        return outputs
        
    def output_shape(self):
        return {
            name: ShapeSpec(
                channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
            )
            for name in self._out_features
        }

    @property
    def size_divisibility(self):
        return 32




def main(checkpoint_path):
    scale = 'tiny'
    out_features = ["res2", "res3", "res4"]
    model = build_focalnet(scale, checkpoint_path, out_features)


    inp = torch.randn(1,3,320,2400)
    out = model(inp)

    for i in range(len(out.feature_maps)):
        print(out.feature_maps[i].shape)



if __name__ == "__main__":
    checkpoint_path = "workspaces/pretrain_weights/focalnet/focalnet_tiny_srf.pth"
    main(checkpoint_path)
