from Model import Light_GCN
from DataLoader import DataLoader
from parser1 import args
import numpy as np
import torch
import seaborn as sns
from hyper_MODEL import Hyper_only
from Distill_Model import MLP_Diss
from Diss_add_selfA import MLP_Att
from iner_gan import Gan_Att
from Sp_Hyper import Sp_Hyper
from Dragon import Dragon
import random
from time import time
import torch.optim as optim
from tqdm import tqdm
import torch.nn as nn
import multiprocessing
import heapq
import os
from data.logging import Logger
from datetime import datetime
import torch.nn.functional as F
import metrics
import matplotlib.pyplot as plt

dataloader = DataLoader(path=args.data_path + args.dataset, batch_size=args.batch_size)
Ks = eval(args.Ks)
max_inter = dataloader.max_inter


def bpr_loss_calculate(users, pos_items, neg_items):
    pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)
    neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)

    regularizer = 1. / 2 * (users ** 2).sum() + 1. / 2 * (pos_items ** 2).sum() + 1. / 2 * (neg_items ** 2).sum()
    regularizer = regularizer / args.batch_size

    maxi = F.logsigmoid(pos_scores - neg_scores)
    mf_loss = -torch.mean(maxi)

    emb_loss = args.decay * regularizer
    reg_loss = 0.0
    return mf_loss, emb_loss, reg_loss


def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
    item_score = {}
    for i in test_items:
        item_score[i] = rating[i]

    K_max = max(Ks)
    K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)

    r = []
    for i in K_max_item_score:
        if i in user_pos_test:
            r.append(1)
        else:
            r.append(0)
    auc = 0.
    return r, auc

def get_performance(user_pos_test, r, auc, Ks):
    precision, recall, ndcg, hit_ratio = [], [], [], []

    for K in Ks:
        precision.append(metrics.precision_at_k(r, K))
        recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
        ndcg.append(metrics.ndcg_at_k(r, K))
        hit_ratio.append(metrics.hit_at_k(r, K))

    return {'recall': np.array(recall), 'precision': np.array(precision),
            'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}

def test_one_user(x):
    # user u's ratings for user u
    is_val = x[-1]
    rating = x[0]
    #uid
    u = x[1]
    #user u's items in the training set
    try:
        training_items = dataloader.train_items[u]
    except Exception:
        training_items = []
    #user u's items in the test set
    if is_val:
        user_pos_test = dataloader.val_set[u]
    else:
        user_pos_test = dataloader.test_set[u]

    all_items = set(range(dataloader.n_items))

    test_items = list(all_items - set(training_items))
    r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)

    return get_performance(user_pos_test, r, auc, Ks)


def test(test_user,is_val):
    Light_GCN.eval()
    with torch.no_grad():
        user_emb, item_emb = Light_GCN(True)
    result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
              'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
    pool = multiprocessing.Pool(cores)
    count = 0
    n_user = len(test_user)
    n_item = len(item_emb)
    n_batch = n_user//batch_size + 1
    for u_batch in range(n_batch):
        start = u_batch * batch_size
        end = min((u_batch + 1) * batch_size,n_user)
        user_batch = test_user[start: end]
        u_g_embeddings = user_emb[user_batch]
        item_batch = n_item//(batch_size*2) +1
        rat = []
        for i_batch in range(item_batch):
            i_start = i_batch*batch_size*2
            i_end = min((i_batch+1)*batch_size*2,n_item)
            i_g_embeddings = item_emb[i_start:i_end]
            rat.append(torch.matmul(u_g_embeddings, torch.transpose(i_g_embeddings, 0, 1)))
        #i_g_embeddings = item_emb
        #rat = torch.matmul(u_g_embeddings, torch.transpose(i_g_embeddings, 0, 1))
        #print(rat.shape)
        rat = torch.cat(rat,1)
        rat = rat.detach().cpu().numpy()
        rat_uid = zip(rat,user_batch,[is_val]*len(user_batch))
        batch_result = pool.map(test_one_user,rat_uid)
        count += len(batch_result)
        for re in batch_result:
            result['precision'] += re['precision'] / n_user
            result['recall'] += re['recall'] / n_user
            result['ndcg'] += re['ndcg'] / n_user
            result['hit_ratio'] += re['hit_ratio'] / n_user
            result['auc'] += re['auc'] / n_user
    assert count == n_user
    pool.close()
    return result


def train():
    training_time_list = []
    line_bpr_loss, line_reg_loss, line_var_recall, line_var_precision, line_var_ndcg = [], [], [], [], []
    bpr_loss_loger,reg_loss_logger,pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], [],[]
    best_recall = 0
    n_batch = dataloader.n_train//args.batch_size + 1
    stopping_step = 0
    for epoch in range(args.epoch):
        t1 = time()
        batch_bpr_loss,batch_bpr_reg = 0., 0.
        Light_GCN.train()
        Light_GCN.pre_epoch_processing()
        for batch in tqdm(range(n_batch)):
            optim.zero_grad()
            train_user,pos_item,neg_item = dataloader.sample()
            user_emb,item_emb = Light_GCN(False)
            mf_loss,reg_loss,_ = bpr_loss_calculate(user_emb[train_user],item_emb[pos_item],item_emb[neg_item])
            loss = mf_loss+reg_loss
            loss.backward()

            if args.is_grad and epoch==2:
                for name,param in Light_GCN.named_parameters():
                #     if name == 'user_embedding.weight' or name== 'item_embedding.weight':
                #         self.grad_logger.bug_logging(name)
                #         self.grad_logger.bug_logging(param.grad)
                    sns.histplot(param.grad.cpu().detach().numpy().flatten(),label=str(name))
                    plt.legend()
                    plt.show()
                    #print(param.grad.cpu().detach().numpy().sum(),"????")
            optim.step()
            batch_bpr_loss += mf_loss.detach().cpu().numpy()
            batch_bpr_reg += reg_loss.detach().cpu().numpy()
        t2 = time()
        test_user = list(dataloader.test_set.keys())
        val_user = list(dataloader.val_set.keys())
        ret = test(val_user,True)
        t3 = time()
        bpr_loss_loger.append(batch_bpr_loss)
        reg_loss_logger.append(batch_bpr_reg)
        rec_loger.append(ret['recall'].data)
        pre_loger.append(ret['precision'].data)
        ndcg_loger.append(ret['ndcg'].data)
        hit_loger.append(ret['hit_ratio'].data)

        line_var_recall.append(ret['recall'][1])
        line_var_precision.append(ret['precision'][1])
        line_var_ndcg.append(ret['ndcg'][1])
        # line_var_recall.append(ret['recall'][1])
        # line_var_precision.append(ret['precision'][1])
        # line_var_ndcg.append(ret['ndcg'][1])
        perf_str = 'Epoch %d [%.1fs + %.1fs]: train==[%.5f=%.5f + %.5f + %.5f], recall=[%.5f, %.5f, %.5f], ' \
                   'precision=[%.5f, %.5f, %.5f], hit=[%.5f, %.5f, %.5f], ndcg=[%.5f, %.5f, %.5f]' % \
                   (epoch, t2 - t1, t3 - t2, batch_bpr_loss+batch_bpr_reg, batch_bpr_loss, batch_bpr_reg, 0., ret['recall'][0], ret['recall'][1],
                    ret['recall'][2],
                    ret['precision'][0], ret['precision'][1], ret['precision'][2],
                    ret['hit_ratio'][0], ret['hit_ratio'][1], ret['hit_ratio'][2],
                    ret['ndcg'][0], ret['ndcg'][1], ret['ndcg'][2])

        logger.logging(perf_str)

        if ret['recall'][1] > best_recall:
            best_recall = ret['recall'][1]
            test_ret = test(test_user, is_val=False)
            logger.logging("Test_Recall@%d: %.5f,  precision=[%.5f], ndcg=[%.5f]" % (
            eval(args.Ks)[1], test_ret['recall'][1], test_ret['precision'][1], test_ret['ndcg'][1]))
            stopping_step = 0
        elif stopping_step < args.early_stop:
            stopping_step += 1
            logger.logging('#####Early stopping steps: %d #####' % stopping_step)
        else:
            logger.logging('#####Early stop! #####')
            break
    result_logger.logging("Test_Recall@%d: %.5f,  precision=[%.5f], ndcg=[%.5f], epoch=%d, lr=%f, cat_rate=%f " % (
                eval(args.Ks)[1], test_ret['recall'][1], test_ret['precision'][1], test_ret['ndcg'][1], epoch, args.lr, args.model_cat_rate))

    if args.is_vision:
        plt.figure()
        plt.plot([i for i in range(len(bpr_loss_loger))], bpr_loss_loger, color="red", label="bprLoss")
        plt.plot([i for i in range(len(reg_loss_logger))], reg_loss_logger, color="blue", label="cl_Loss")
        # plt.scatter(args.loss_x,args.loss_y,color='red')
        plt.legend()  # 无此语句会不显示右下角label

        plt.figure()
        plt.plot([i for i in range(len(line_var_recall))], line_var_recall, color="green", label="Recall")
        plt.plot([i for i in range(len(line_var_recall))], line_var_ndcg, color="blue", label="NDCG")
        plt.plot([i for i in range(len(line_var_recall))], line_var_precision, color="red", label="precision")
        plt.scatter([i for i in range(len(line_var_recall))], line_var_recall, color="green")
        # plt.scatter([i for i in range(len(line_var_recall))], args.ndcg_y, color="blue")
        plt.legend()  # 无此语句会不显示右上角label

        plt.show()


def set_seed(seed):
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

def initialize_weights(layer):
    if isinstance(layer, nn.Linear):
        nn.init.xavier_uniform_(layer.weight.data)
        if layer.bias is not None:
            nn.init.zeros_(layer.bias.data)

if __name__ == '__main__':
    batch_size = args.batch_size
    cores = multiprocessing.cpu_count() // 5

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
    set_seed(2023)


    #Light_GCN = Light_GCN()
    #Light_GCN = Hyper_only()
    #Light_GCN = MLP_Diss()
    #Light_GCN = MLP_Att()
    #Light_GCN = Gan_Att()
    Light_GCN = Sp_Hyper()
    Light_GCN.apply(initialize_weights)
    optim = optim.Adam(Light_GCN.parameters(),lr=args.lr)
    #Dis_loss = nn.BCELoss(reduction="mean")
    Light_GCN.cuda()

    task_name = "%s_%s_%s" % (datetime.now().strftime('%Y-%m-%d-%H-%M-%S'), args.dataset,args.model)
    logger = Logger(filename=task_name)
    logger.logging(str(args))
    result_logger = Logger(filename=args.model+args.dataset)
    train()
