import os
import os.path as osp

import torch
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
from torch import Tensor
from transformers import (AutoBackbone, AutoImageProcessor, ConvNextV2Backbone,
                          ConvNextV2Config, ConvNextV2Model)


def rename_key(name):
    if "downsample_layers.0.0" in name:
        name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
    if "downsample_layers.0.1" in name:
        name = name.replace("downsample_layers.0.1", "embeddings.norm")  # we rename to layernorm later on
    if "downsample_layers.1.0" in name:
        name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
    if "downsample_layers.1.1" in name:
        name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
    if "downsample_layers.2.0" in name:
        name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
    if "downsample_layers.2.1" in name:
        name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
    if "downsample_layers.3.0" in name:
        name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
    if "downsample_layers.3.1" in name:
        name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
    if "stages" in name and "downsampling_layer" not in name:
        # stages.0.0. for instance should be renamed to stages.0.layers.0.
        name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
    if "gamma" in name:
        name = name.replace("gamma", "weight")
    if "beta" in name:
        name = name.replace("beta", "bias")
    if "stages" in name:
        name = name.replace("stages", "encoder.stages")
    if "norm" in name:
        name = name.replace("norm", "layernorm")


    return name

def get_convnextv2_config(scale, out_features):
    stage_dict = {
        "res2": "stage1",
        "res3": "stage2",
        "res4": "stage3",
        "res5": "stage4"
    }
    config = ConvNextV2Config()

    if "atto" == scale:
        depths = [2, 2, 6, 2]
        hidden_sizes = [40, 80, 160, 320]
    elif "femto" == scale:
        depths = [2, 2, 6, 2]
        hidden_sizes = [48, 96, 192, 384]
    elif "pico" == scale:
        depths = [2, 2, 6, 2]
        hidden_sizes = [64, 128, 256, 512]
    elif "nano" == scale:
        depths = [2, 2, 8, 2]
        hidden_sizes = [80, 160, 320, 640]
    elif "tiny" == scale:
        depths = [3, 3, 9, 3]
        hidden_sizes = [96, 192, 384, 768]
    elif "base" == scale:
        depths = [3, 3, 27, 3]
        hidden_sizes = [128, 256, 512, 1024]
    elif "large" == scale:
        depths = [3, 3, 27, 3]
        hidden_sizes = [192, 384, 768, 1536]
    elif "huge" == scale:
        depths = [3, 3, 27, 3]
        hidden_sizes = [352, 704, 1408, 2816]
    else:
        TypeError(f"scale {scale} is not exists.")

    config.hidden_sizes = hidden_sizes
    config.depths = depths
    config.out_features = [stage_dict[i] for i in out_features]

    return config

def build_convnextv2(scale, ckpt_path, out_features):
    config = get_convnextv2_config(scale, out_features)
    if osp.exists(ckpt_path):
        state_dict = torch.load(ckpt_path, map_location="cpu")["model"]
        # rename keys
        for key in state_dict.copy().keys():
            val = state_dict.pop(key)
            if "head" in key:
                continue
            new_key = rename_key(key)
            if new_key == "layernorm.weight" or new_key == "layernorm.bias":
                new_key = new_key.replace("layernorm", "hidden_states_norms.stage4")
            state_dict[new_key] = val
    else:
        state_dict = None
        print(f"convnext pretrain model path: {ckpt_path} is not exists.")
    
    model = ConvNextV2Backbone(config)

    if state_dict is not None:
        missed_keys = []
        unexpected_keys = []
        model_dict = model.state_dict()
        for m in model_dict:
            if m not in state_dict.keys():
                missed_keys.append(m)
        for m in state_dict:
            if m not in model_dict.keys():
                unexpected_keys.append(m)
        print("missed_keys: ", missed_keys)
        print("unexpected_keys: ", unexpected_keys)
        model.load_state_dict(state_dict, strict=False)
    else:
        model.init_weights()
    
    return model

    

@BACKBONE_REGISTRY.register()
class D2ConvnextV2(Backbone):
    def __init__(self, cfg, input_shape):
        super().__init__()
        scale = cfg.MODEL.CONVNEXTV2.SCALE
        ckpt_path = cfg.MODEL.CONVNEXTV2.CKPT

        self._out_features = ["res2", "res3", "res4", "res5"]
        self.model = build_convnextv2(scale, ckpt_path, self._out_features)
        self._out_feature_strides = {
            "res2": 4,
            "res3": 8,
            "res4": 16,
            "res5": 32,
        }
        hidden_sizes = self.model.config.hidden_sizes
        assert len(hidden_sizes) == len(self._out_features)
        self._out_feature_channels = {
            "res2": hidden_sizes[0],
            "res3": hidden_sizes[1],
            "res4": hidden_sizes[2],
            "res5": hidden_sizes[3],
        }

    def forward(self,  x: Tensor) -> Tensor:
        assert x.dim() == 4

        outs = self.model(x).feature_maps
        assert len(self._out_features) == len(outs)
        outputs = {}
        for k, v in zip(self._out_features, outs):
            outputs[k] = v
        
        return outputs
        
    def output_shape(self):
        return {
            name: ShapeSpec(
                channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
            )
            for name in self._out_features
        }

    @property
    def size_divisibility(self):
        return 32




def main(checkpoint_path):
    config = get_convnextv2_config(checkpoint_path)
    # load original state_dict from URL
    state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]

    print("Converting convnextv2 pretrain model parameters...")
    # rename keys
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        if "head" in key:
            continue
        new_key = rename_key(key)
        if new_key == "layernorm.weight" or new_key == "layernorm.bias":
            new_key = new_key.replace("layernorm", "hidden_states_norms.stage4")
        state_dict[new_key] = val

    # load HuggingFace model
    model = ConvNextV2Backbone(config)

    missed_keys = []
    unexpected_keys = []
    model_dict = model.state_dict()
    for m in model_dict:
        if m not in state_dict.keys():
            missed_keys.append(m)
    for m in state_dict:
        if m not in model_dict.keys():
            unexpected_keys.append(m)
    print("missed_keys: ", missed_keys)
    print("unexpected_keys: ", unexpected_keys)

    model.load_state_dict(state_dict, strict=False)


    # print(model)
    # print("="*50)
    # print(model.config)
    # print("="*50)
    inp = torch.randn(1,3,320,2400)
    out = model(inp)

    for i in range(len(out.feature_maps)):
        print(out.feature_maps[i].shape)



if __name__ == "__main__":
    checkpoint_path = "model/convnextv2_tiny_22k_384_ema.pt"
    main(checkpoint_path)
