import torch

from downstream.model_builder import load_state_with_same_shape
from model import (
    DA,
    Classifier,
    DilationFeatureExtractor,
    DinoVitFeatureExtractor,
    MinkUNet,
    PPKTFeatureExtractor,
    Preprocessing,
    ResNetFCN,
    SegNet,
    VoxelNet,
)


def forgiving_state_restore(net, loaded_dict):
    """
    Handle partial loading when some tensors don't match up in size.
    Because we want to use models that were trained off a different
    number of classes.
    """
    loaded_dict = {k.replace("module.", ""): v for k, v in loaded_dict.items()}
    net_state_dict = net.state_dict()
    new_loaded_dict = {}
    for k in net_state_dict:
        new_k = k
        if (
            new_k in loaded_dict
            and net_state_dict[k].size() == loaded_dict[new_k].size()
        ):
            new_loaded_dict[k] = loaded_dict[new_k]
        else:
            print("Skipped loading parameter {}".format(k))
    net_state_dict.update(new_loaded_dict)
    net.load_state_dict(net_state_dict)
    return net


def make_model(config, img_config):
    """
    Build points and image models according to what is in the config
    """
    if config["model_points"] == "minkunet":
        model_points = MinkUNet(1, config["model_n_out"], config)
        # checkpoint = torch.load(
        #     "./output/minkunet_slidr_1gpu.pt", map_location="cpu")
        # key = "model_points" if "model_points" in checkpoint else "state_dict"
        # filtered_weights = load_state_with_same_shape(
        #     model_points, checkpoint[key])
        # model_dict = model_points.state_dict()
        # model_dict.update(filtered_weights)
        # model_points.load_state_dict(model_dict)
    if config["model_images"] == "segnet":
        model_images = SegNet(config, preprocessing=Preprocessing())
    elif config["model_images"] == "resnetfcn":
        model_images = ResNetFCN(config, preprocessing=Preprocessing())
    for param in model_images.parameters():
        param.requires_grad = False  # freeze image network

    model_ori_classifier = Classifier(img_config)
    for param in model_ori_classifier.parameters():
        param.requires_grad = False
    if not config["train_classifier"]:
        model_classifier = model_ori_classifier
    else:
        model_classifier = Classifier(config)

    model_da = DA(config["model_n_out"], config)

    if config["pretrained_ckpt_path"] is not None:
        checkpoint = torch.load(config["pretrained_ckpt_path"], map_location="cpu")

        img_state = {
            k.replace("model_images.", ""): v
            for k, v in checkpoint["state_dict"].items()
            if k.startswith("model_images.")
        }
        model_images.load_state_dict(img_state, strict=True)

        ori_classifier_state = {
            k.replace("model_classifier.", ""): v
            for k, v in checkpoint["state_dict"].items()
            if k.startswith("model_classifier.")
        }
        model_ori_classifier.load_state_dict(ori_classifier_state, strict=False)

    return model_points, model_images, model_classifier, model_ori_classifier, model_da
