import torch
from os import path

__all__ = ["save_model", "load_model_parameter", "set_optimizer_lr", "load_optimizer_parameter",
           "load_scheduler_parameter"]


def save_model(epoch, model, optimizer, scheduler, save_opt):
    if isinstance(model, torch.nn.DataParallel):
        model = model.module
    if isinstance(optimizer, torch.nn.DataParallel):
        optimizer = optimizer.module
    state = {
        "net": model.state_dict(),
        "optimizer": optimizer.state_dict(),
        "epoch": epoch,
        "scheduler": scheduler.state_dict()
    }

    save_name = save_opt["save_name"]
    model_path = path.join(save_opt["save_path"], f"{save_name}_epoch{epoch}.pth")
    torch.save(state, model_path)


def load_model_parameter(model, model_path, key="net", strict=True):
    # state_list = []
    if key is not None:
        state_dict = torch.load(model_path)[key]  # 加载预训练模型参数
    else:
        state_dict = torch.load(model_path)
    torch.cuda.empty_cache()
    if strict == False:         # 获取名称和尺寸均相同的作为加载部分，其他部分不进行加载
        model_dict = model.state_dict()  # 获取当前模型参数
        # [input((k, v)) for k, v in state_dict.items() if k in model_dict]
        pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict and v.size() == model_dict[k].size()}  # 获取相同部分的参数
        model_dict.update(pretrained_dict)  # 更新参数
        state_dict = model_dict
    model.load_state_dict(state_dict, strict=strict)
    return model, set()


def load_optimizer_parameter(optimizer, model_path, key="optimizer"):
    if key is not None:
        state_dict = torch.load(model_path)[key]  # 加载预训练模型参数
    else:
        state_dict = torch.load(model_path)
    optimizer.load_state_dict(state_dict)
    return optimizer


def load_scheduler_parameter(scheduler, model_path, key="scheduler"):
    if key is not None:
        state_dict = torch.load(model_path)[key]  # 加载预训练模型参数
    else:
        state_dict = torch.load(model_path)
    scheduler.load_state_dict(state_dict)
    return scheduler


def set_optimizer_lr(optimizer, lr):
    for params in optimizer.param_groups:
        params['lr'] = lr
