

def evaluate(model_name, dataset_name):
    printbar()
    x = import_module('models.' + model_name)
    config = (x.Config(dataset_name))
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True
    start_time = time.time()
    print("Loading data...")
    test_dataset = CMNDataset(config, 'test')
    test_dataloader = DataLoader(
        test_dataset, 
        batch_size=config.batch_size, 
        shuffle=True,
        worker_init_fn=np.random.seed(1), 
    )
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)
    printbar()
    model = x.Model(config).to(config.device)
    start_time = time.time()

    print('Start evaluating...')
    ckpt = torch.load(config.save_path, map_location=config.device)  # dict  save在CPU 加载到GPU
    model_sd = ckpt['net']
    model.load_state_dict(model_sd)
    preds, labels = [], []
    for step, (inps, labs) in enumerate(tqdm(test_dataloader)):
        pred, labs = x.eval_step(model, inps, labs)
        preds.append(pred)
        labels.append(labs)

    y_true = torch.cat(labels, dim=0)
    y_pred = torch.cat(preds, dim=0)
    endtime = time.time()
    print('evaluating costs: {:.2f}s'.format(endtime - starttime))
    return y_true.cpu(), y_pred.cpu()
