
import nni
from torch.optim import Adam
import utils
# from tensorboardX import SummaryWriter
import time
# from utils import cprint
from dataloader import check_dataset, getDataset
from models import check_model, getModel
from loss import getLoss
from Procedure import TRAIN, TEST
from pprint import pprint
import os
import torch

from Config import config
import numpy as np
import torch
import utils
import dataloader
from pprint import pprint
from utils import timer
from time import time
from tqdm import tqdm
import models
import multiprocessing
from sklearn.metrics import roc_auc_score
import metrics
import metrics2
from dataloader import BasicDataset

CORES = multiprocessing.cpu_count() // 2


def minibatch(*tensors, **kwargs):
    batch_size = kwargs.get('batch_size', config['batch_size'])

    if len(tensors) == 1:
        tensor = tensors[0]
        for i in range(0, len(tensor), batch_size):
            yield tensor[i:i + batch_size]
    else:
        for i in range(0, len(tensors[0]), batch_size):
            yield tuple(x[i:i + batch_size] for x in tensors)

def test_one_batch(X):
    rating_list = X[0].numpy()
    batch_user_pos = X[1]
    batch_ground_truth = utils.getLabel(batch_user_pos, rating_list)
    pre, recall, ndcg, mrr = [], [], [], []
    for k in config["topks"]:
        ret = metrics.RecallPrecision_ATk(batch_user_pos, batch_ground_truth, k)
        pre.append(ret['precision'])
        recall.append(ret['recall'])
        ndcg.append(metrics.NDCGatK_r(batch_user_pos, batch_ground_truth, k))
        mrr.append(metrics.MRRatK_r(batch_ground_truth, k))
    # recall, ap, ndcg, auc, mrr = metrics2.batch_metrics_at_Ks(batch_ground_truth, rating_list, config["topks"])
    return {'recall': recall,
            'ndcg': ndcg,
            'precision': pre,
            'mrr': mrr,
            }


def TEST(dataset, Recmodel, w=None, multicore=0):
    u_batch_size = config['test_u_batch_size']
    dataset: BasicDataset
    testDict: dict = dataset.testDict
    Recmodel: models.LightGCN
    # eval mode with no dropout
    Recmodel = Recmodel.eval()
    max_K = max(config["topks"])
    if multicore == 1:
        pool = multiprocessing.Pool(CORES)

    with torch.no_grad():
        users = list(testDict.keys())
        try:
            assert u_batch_size <= len(users) / 10
        except AssertionError:
            print(f"test_u_batch_size is too big for this dataset, try a small one {len(users) // 10}")
        users_list = []
        rating_list = []
        batch_user_pos_list = []
        auc_record = []
        # ratings = []
        total_batch = len(users) // u_batch_size + 1
        for batch_users in minibatch(users, batch_size=u_batch_size):
            allPos = dataset.getUserPosItems(batch_users)
            batch_user_pos = [testDict[u] for u in batch_users]
            batch_users_gpu = torch.Tensor(batch_users).long()
            batch_users_gpu = batch_users_gpu.to(config["device"])

            rating = Recmodel.getUsersRating(batch_users_gpu)
            # rating = rating.cpu()
            exclude_index = []
            exclude_items = []
            for range_i, items in enumerate(allPos):
                exclude_index.extend([range_i] * len(items))
                exclude_items.extend(items)
            rating[exclude_index, exclude_items] = -(1 << 10)
            _, rating_K = torch.topk(rating, k=max_K)
            rating = rating.cpu().numpy()
            aucs = [
                    metrics.AUC(rating[i],
                              dataset,
                              test_data) for i, test_data in enumerate(batch_user_pos)
                ]
            auc_record.extend(aucs)
            del rating
            users_list.append(batch_users)
            rating_list.append(rating_K.cpu())
            batch_user_pos_list.append(batch_user_pos)
        assert total_batch == len(users_list)

        X = zip(rating_list, batch_user_pos_list)
        if multicore == 1:
            pre_results = pool.map(test_one_batch, X)
        else:
            pre_results = []
            for x in X:
                pre_results.append(test_one_batch(x))
        scale = float(u_batch_size / len(users))

        results = {
            # 'ap': np.zeros(len(config["topks"])),
            'recall': np.zeros(len(config["topks"])),
            'precision': np.zeros(len(config["topks"])),
            'ndcg': np.zeros(len(config["topks"])),
            'auc': np.sum(auc_record),
            # 'auc': np.zeros(1),
            'mrr': np.zeros(len(config["topks"])),
        }

        for result in pre_results:
            results['recall'] += result['recall']
            results['precision'] += result['precision']
            # results['ap'] += result['ap']
            results['ndcg'] += result['ndcg']
            # results['auc'] += result['auc']
            results['mrr'] += result['mrr']
        results['recall'] /= float(len(users))
        results['precision'] /= float(len(users))
        # results['ap'] /= float(len(users))
        results['ndcg'] /= float(len(users))
        results['auc'] /= float(len(users))
        results['mrr'] /= float(len(users))

        # if world.tensorboard:
        #     w.add_scalars(f'Test/Recall@{world.topks}',
        #                   {str(world.topks[i]): results['recall'][i] for i in range(len(world.topks))}, epoch)
        #     w.add_scalars(f'Test/Precision@{world.topks}',
        #                   {str(world.topks[i]): results['precision'][i] for i in range(len(world.topks))}, epoch)
        #     w.add_scalars(f'Test/NDCG@{world.topks}',
        #                   {str(world.topks[i]): results['ndcg'][i] for i in range(len(world.topks))}, epoch)
        # w.add_scalars(f'Test/auc',
        #               {str(world.topks[i]): results['ndcg'][i] for i in range(len(world.topks))}, epoch)

        if multicore == 1:
            pool.close()
        for name in ["recall", "precision", "ndcg", "auc", "mrr"]:
            print(name, ": ", results[name])
        return results


def get_default_parameters():
    # 关键参数，包括要调参的参数
    params = \
    {
        # decareSample + lightGCN + bpr
        "device": torch.device('cuda:2' if torch.cuda.is_available() else "cpu"),
        "checkpoints_path": "/data/tshuang/Projects/lightGCN/Rec_SAUC/checkpoints",
        "dataset": 'amazon-book',  # [lastfm, gowalla, yelp2018, amazon-book]
        "sample_way": "user_decare",    # [all_pos_random, user_decare]
        "model": 'lgn',  # [mf, lgn, lgn_hash]
        "loss": 'bpr',  # [bce, bpr, sauc_for_sample, sauc_for_user]
        "weight_decay": 0.0001,
        'lr': 0.001,
        'batch_size': 2**18,    # 2048
        "is_use_early_stop": False,
        "TRAIN_epochs": 1000,
    }
    return params

def main(config):

    print('===========config================')
    pprint(config)
    print('===========end===================')
    utils.set_seed(config["seed"])
    print(">>SEED:", config["seed"])

    check_dataset(config["dataset"])
    check_model(config["model"])
    dataset = getDataset(config["data_path"], config["dataset"])
    model = getModel(config["model"], config, dataset).to(config["device"])
    # model.load_state_dict(torch.load(os.path.join(config["checkpoints_path"], "lgn-gowalla-decare-3-64.pth.tar")))
    # model.load_state_dict(torch.load(os.path.join(config["checkpoints_path"], "lgn-yelp2018-random-bpr-3-64.pth.tar")))
    model.load_state_dict(torch.load(os.path.join(config["checkpoints_path"], "lgn-amazon-book-random-bpr-3-64.pth.tar")))

    # print(model.state_dict())
    results = TEST(dataset, model, None, config['multicore'])
if __name__ == "__main__":
    import Config
    config.update(get_default_parameters())
    main(Config.config)
