import os
import torch
import torch.backends
import torch.backends.cudnn
import random
import numpy as np
import yaml
import types


def mkdirss(dirpath):
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)


def set_seed(seed=0):
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


@torch.no_grad()
def partial_save(
    save_keys,
    model,
    opt,
    scheduler,
    acc,
    ep,
    exp_base_path,
    ckp_name="ckp_best",
):
    """Func: only save the parameters in save_keys

    Args:
        save_keys (list): list of keys to save
        model (nn.Module): model
        ckp name args:
            model_type (str): model type
            task (str): task
            name (str): name of the task, e.g. cifar_mov_001
            acc (float): accuracy
            ep (int): epoch
    """
    model.eval()
    model = model.cpu()
    trainable = {}
    for k, v in model.named_parameters():
        # if save kyes in k, save it
        if any([key in k for key in save_keys]):
            trainable[k] = v.data

    save_dict = {
        "trainable": trainable,
        "opt_st": opt.state_dict(),
        "scheduler_st": scheduler.state_dict(),
        "acc": acc,
        "ep": ep,
    }
    torch.save(
        save_dict,
        f"{exp_base_path}/{ckp_name}.pt",
        # f"./output/{model_type}/{task}/{name}/ckp_{acc}acc_{ep}ep_best.pt"
    )


def partial_load(exp_base_path):
    try:
        save_dict = torch.load(
            os.path.join(exp_base_path, "ckp_best.pt")
        )
        model_st = save_dict["trainable"]
        opt_st = save_dict["opt_st"]
        scheduler_st = save_dict["scheduler_st"]
        acc = save_dict["acc"]
        ep = save_dict["ep"]
        # resume info
        print(f"Resume from {exp_base_path}, acc: {acc}, ep: {ep}")
        return model_st, opt_st, scheduler_st, acc, ep
    except Exception as e:
        return e


@torch.no_grad()
def save(model_type, task, name, model):
    model.eval()
    model = model.cpu()
    trainable = {}
    for n, p in model.named_parameters():
        if (
            "sct_mlp" in n
            or "sct_mlp" in n
            or "head" in n
            or "q_l" in n
            or "k_l" in n
            or "v_l" in n
        ):
            trainable[n] = p.data

    torch.save(
        trainable,
        "./output/%s/%s/%s/ckpt_epoch_best.pt"
        % (model_type, task, name),
    )


def load(model_type, task, name, model):
    model = model.cpu()
    st = torch.load(
        "./output/%s/%s/%s/ckpt_epoch_best.pt"
        % (model_type, task, name)
    )
    model.load_state_dict(st, strict=False)
    return model


def get_config(model_type, task, dataset_name):
    with open(
        "./configs/%s/%s/%s.yaml" % (model_type, task, dataset_name),
        "r",
    ) as f:
        config = yaml.load(f, Loader=yaml.FullLoader)

    # parse bool type vars
    for k, v in config.items():
        if v in ["True", "False"]:
            config[k] = bool(v)
    return config


def merge_config(args, config):
    """merge argparse args to config, replace the value in config if in args"""
    keys = config.keys()
    for key, value in vars(args).items():
        if key not in keys:
            config[key] = value
        else:
            if value is not None:
                config[key] = value
    return types.SimpleNamespace(**config)
