import logging
import time
import sys
import os
from tqdm import tqdm
import numpy as np
import warnings

import torch
import torch.nn as nn

import math
from models.DyGFormer import DyGFormer
from models.modules import MergeLayer
from utils import DataLoader
from utils.utils import set_random_seed, convert_to_gpu, get_parameter_sizes, create_optimizer, NeighborSampler
from utils.utils import get_neighbor_sampler, NegativeEdgeSampler
from utils.DataLoader import get_idx_data_loader, get_grapg_data, Data
from utils.load_configs import parse_args


def evaluate_model(model: nn.Module, neighbor_sampler: NeighborSampler, evaluate_idx_data_loader: DataLoader,
                   evaluate_neg_edge_sampler: NegativeEdgeSampler, evaluate_data: Data):
    """
    evaluate models on the link prediction task
    :param model: nn.Module, the model to be evaluated
    :param neighbor_sampler: NeighborSampler, neighbor sampler
    :param evaluate_idx_data_loader: DataLoader, evaluate index data loader
    :param evaluate_neg_edge_sampler: NegativeEdgeSampler, evaluate negative edge sampler
    :param evaluate_data: Data, data to be evaluated
    :param loss_func: nn.Module, loss function
    :param num_neighbors: int, number of neighbors to sample for each node
    :param time_gap: int, time gap for neighbors to compute node features
    :return:
    """
    assert evaluate_neg_edge_sampler.seed is not None
    evaluate_neg_edge_sampler.reset_random_state()

    model[0].set_neighbor_sampler(neighbor_sampler)

    model.eval()
    NUM_NODES = 115849
    predict_len = 5000
    with torch.no_grad():
        # store evaluate losses and metrics
        evaluate_idx_data_loader_tqdm = tqdm(evaluate_idx_data_loader, ncols=120)
        all_nodes = np.arange(0, NUM_NODES )
        iter_num = math.ceil(NUM_NODES / predict_len)
        total_mrr = []
        for batch_idx, evaluate_data_indices in enumerate(evaluate_idx_data_loader_tqdm):
            batch_src_node_ids, batch_dst_node_ids, batch_node_interact_times, batch_edge_ids = \
                evaluate_data.src_node_ids[evaluate_data_indices], evaluate_data.dst_node_ids[evaluate_data_indices], \
                evaluate_data.node_interact_times[evaluate_data_indices], evaluate_data.edge_ids[evaluate_data_indices]

            mrr = []
            mrr50 = []
            mrr5 = []
            for src, dst, ts in zip(batch_src_node_ids, batch_dst_node_ids, batch_node_interact_times):

                preds = np.array([], dtype=np.float32)

                for idx in range(iter_num):
                    if idx == iter_num - 1:
                        last_len = NUM_NODES - predict_len * idx
                        test_src_l = [src] * last_len
                        test_ts = [ts] * last_len
                        test_items = all_nodes[start_idx + predict_len:]
                    else:
                        test_src_l = [src] * predict_len
                        test_ts = [ts] * predict_len
                        start_idx = predict_len * idx
                        test_items = all_nodes[start_idx:start_idx + predict_len]
                    test_ts = np.array(test_ts)
                    test_src_list = np.array(test_src_l)
                    test_items = np.array(test_items)
                    # test_items = np.random.choice(origin_users,size=predict_len,replace=False)

                    batch_src_node_embeddings, batch_dst_node_embeddings = \
                        model[0].temporal_embeddings(src_node_ids=test_src_list,
                                                     dst_node_ids=test_items,
                                                     node_interact_times=test_ts)

                    # get positive and negative probabilities, shape (batch_size, )
                    predicts = model[1](input_1=batch_src_node_embeddings, input_2=batch_dst_node_embeddings).squeeze(
                        dim=-1).sigmoid()
                    preds = np.concatenate((preds, predicts.cpu().numpy()), axis=0)
                rankings = np.argsort(-preds)[:100]
                if dst in rankings:
                    mrr.append(1 / (np.where(rankings == dst)[0][0]+1))
                rankings = np.argsort(-preds)[:50]
                if dst in rankings:
                    mrr50.append(1 / (np.where(rankings == dst)[0][0]+1))
                rankings = np.argsort(-preds)[:5]
                if dst in rankings:
                    mrr5.append(1 / (np.where(rankings == dst)[0][0]+1))

            total_mrr += mrr
            print(f'mss@100 = {sum(mrr)/len(batch_src_node_ids):.6f} | mss@50 = {sum(mrr50)/len(batch_src_node_ids):.6f} | mss@5 = {sum(mrr5)/len(batch_src_node_ids):.6f}')
            evaluate_idx_data_loader_tqdm.set_description(f'mss = {sum(mrr)/len(batch_src_node_ids):.6f}')
    print(f" total mrr : {sum(evaluate_data.num_interactions)/evaluate_data.num_interactions}")
    return total_mrr


if __name__ == "__main__":

    warnings.filterwarnings('ignore')

    # get arguments
    args = parse_args(is_evaluation=False)

    # get data for training, validation
    node_raw_features, edge_raw_features, full_data, train_data, val_data = \
        get_grapg_data(dataset_name=args.dataset_name, val_ratio=args.val_ratio, test_ratio=args.test_ratio)

    # initialize training neighbor sampler to retrieve temporal graph
    train_neighbor_sampler = get_neighbor_sampler(data=train_data,
                                                  sample_neighbor_strategy=args.sample_neighbor_strategy,
                                                  time_scaling_factor=args.time_scaling_factor, seed=0)

    # initialize validation and test neighbor sampler to retrieve temporal graph
    full_neighbor_sampler = get_neighbor_sampler(data=full_data, sample_neighbor_strategy=args.sample_neighbor_strategy,
                                                 time_scaling_factor=args.time_scaling_factor, seed=1)

    # initialize negative samplers, set seeds for validation and testing so negatives are the same across different runs
    # in the inductive setting, negatives are sampled only amongst other new nodes
    # train negative edge sampler does not need to specify the seed, but evaluation samplers need to do so
    train_neg_edge_sampler = NegativeEdgeSampler(src_node_ids=train_data.src_node_ids,
                                                 dst_node_ids=train_data.dst_node_ids)
    val_neg_edge_sampler = NegativeEdgeSampler(src_node_ids=full_data.src_node_ids, dst_node_ids=full_data.dst_node_ids,
                                               seed=0)

    # get data loaders
    train_idx_data_loader = get_idx_data_loader(indices_list=list(range(len(train_data.src_node_ids))),
                                                batch_size=args.batch_size, shuffle=False)
    val_idx_data_loader = get_idx_data_loader(indices_list=list(range(len(val_data.src_node_ids))),
                                              batch_size=args.batch_size, shuffle=False)

    val_metric_all_runs = []

    # import submission
    # submission.main()
    for run in range(args.num_runs):
        set_random_seed(seed=run)

        args.seed = run
        args.save_model_name = f'{args.model_name}_seed{args.seed}'

        # set up logger
        logging.basicConfig(level=logging.INFO)
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        os.makedirs(f"./logs/{args.model_name}/{args.dataset_name}/{args.save_model_name}/", exist_ok=True)
        # create file handler that logs debug and higher level messages
        fh = logging.FileHandler(
            f"./logs/{args.model_name}/{args.dataset_name}/{args.save_model_name}/{str(time.time())}.log")
        fh.setLevel(logging.DEBUG)
        # create console handler with a higher log level
        ch = logging.StreamHandler()
        ch.setLevel(logging.WARNING)
        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)
        # add the handlers to logger
        logger.addHandler(fh)
        logger.addHandler(ch)

        run_start_time = time.time()
        logger.info(f"********** Run {run + 1} starts. **********")

        logger.info(f'configuration is {args}')

        # create model
        dynamic_backbone = DyGFormer(node_raw_features=node_raw_features, edge_raw_features=edge_raw_features,
                                     neighbor_sampler=train_neighbor_sampler,
                                     time_feat_dim=args.time_feat_dim, channel_embedding_dim=args.channel_embedding_dim,
                                     patch_size=args.patch_size,
                                     num_layers=args.num_layers, num_heads=args.num_heads, dropout=args.dropout,
                                     max_input_sequence_length=args.max_input_sequence_length, device=args.device)
        link_predictor = MergeLayer(input_dim1=node_raw_features.shape[1], input_dim2=node_raw_features.shape[1],
                                    hidden_dim=node_raw_features.shape[1], output_dim=1)
        model = nn.Sequential(dynamic_backbone, link_predictor)
        logger.info(f'model -> {model}')
        logger.info(f'model name: {args.model_name}, #parameters: {get_parameter_sizes(model) * 4} B, '
                    f'{get_parameter_sizes(model) * 4 / 1024} KB, {get_parameter_sizes(model) * 4 / 1024 / 1024} MB.')

        optimizer = create_optimizer(model=model, optimizer_name=args.optimizer, learning_rate=args.learning_rate,
                                     weight_decay=args.weight_decay)

        model = convert_to_gpu(model, device=args.device)

        model_file = f"./checkpoint/ckpt.pth"

        checkpoint = torch.load(model_file)
        model.load_state_dict(checkpoint['model'])  # 加载模型可学习参数
        loss_func = nn.BCELoss()


        # training, only use training graph
        model[0].set_neighbor_sampler(train_neighbor_sampler)

        mrr = evaluate_model(model=model,
                                                 neighbor_sampler=full_neighbor_sampler,
                                                 evaluate_idx_data_loader=val_idx_data_loader,
                                                 evaluate_neg_edge_sampler=val_neg_edge_sampler,
                                                 evaluate_data=val_data)
        print(mrr)
    sys.exit()
