'''
Created on Mar 1, 2020
Pytorch Implementation of LightGCN in
Xiangnan He et al. LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation

@author: Jianbai Ye (gusye@mail.ustc.edu.cn)
'''
import world
import torch
from torch import nn, optim
import numpy as np
from torch import log
import itertools
from dataloader import Loader
from time import time
import torch.nn.functional as F
from model import LightGCN
from model import PairWiseModel
from sklearn.metrics import roc_auc_score
import random
import os
try:
    from cppimport import imp_from_filepath
    from os.path import join, dirname
    path = join(dirname(__file__), "sources/sampling.cpp")
    sampling = imp_from_filepath(path)
    sampling.seed(world.seed)
    sample_ext = True
except:
    world.cprint("Cpp extension not loaded")
    sample_ext = False


class BPRLoss:
    def __init__(self,
                 recmodel : PairWiseModel,
                 config : dict,
                 doc_vae):
        self.model = recmodel
        self.vae = doc_vae
        self.weight_decay = config['decay']
        self.lr = config['lr']
        self.opt = optim.Adam(recmodel.parameters(), lr=self.lr)
        self.vaeopt = optim.Adam(doc_vae.parameters(), lr=self.lr)#,weight_decay=1e-4)

    def stageOne(self, users, pos, neg, batch_doc):
        loss, reg_loss,item_emb,item_emb_ego = self.model.bpr_loss(users, pos, neg, batch_doc[:,0])   #bpr loss 将新的emb和旧的输出，用新的训练vae，旧的计算InfoNCE
        reg_loss = reg_loss*self.weight_decay
        loss = loss + reg_loss

        #训练VAE
        self.vae.train()
        #print(batch_doc.shape,type(batch_doc),batch_doc[:,0])
        vae_loss = self.vae(item_emb.detach(),batch_doc[:,1:])
        self.vaeopt.zero_grad()
        vae_loss.backward()
        self.vaeopt.step()
        self.vae.eval()

        #对比学习
        unique_labels = torch.unique(batch_doc[:, 1:], dim=0)
        num_classes = unique_labels.size(0)
        anchor, positive, negatives = [],[],[]
        z = torch.randn(num_classes, world.config['latent_dim_vae']).to(world.device)
        vis_embeddings = self.vae.decode(z*world.config['vaerate'], unique_labels)
        for label in unique_labels:
            # 获取当前种类的one-hot向量
            current_label_mask = (batch_doc[:, 1:] == label).all(dim=1)
            current_label_embeddings = item_emb.detach()[current_label_mask]
            if current_label_embeddings.size(0) == 0:
                continue
            # 抽样一个样本
            sample_idx = np.random.choice(current_label_embeddings.size(0), 1, replace=False)
            sample_embedding = current_label_embeddings[sample_idx]
            # 生成虚拟表征
            anchor.append(sample_embedding)
        # 计算损失
        cl_loss = info_nce_loss(torch.cat(anchor,dim=0),vis_embeddings)
        batch_cl_loss = world.config['cl_rate']*cl_loss
        #print(loss.cpu().item(),batch_cl_loss.cpu().item(),'---------------------')
        total_loss = loss + batch_cl_loss

        self.opt.zero_grad()
        total_loss.backward()
        self.opt.step()

        return np.array([total_loss.cpu().item(),loss.cpu().item(),batch_cl_loss.cpu().item(),vae_loss.cpu().item()])


def UniformSample_original(dataset, neg_ratio = 1):
    dataset : Loader
    allPos = dataset.allPos
    start = time()
    if sample_ext:
        S = sampling.sample_negative(dataset.n_users, dataset.m_items,
                                     dataset.trainDataSize, allPos, neg_ratio)
    else:
        S,C = UniformSample_original_python(dataset)
    return S,C

def UniformSample_original_python(dataset):
    """
    the original impliment of BPR Sampling in LightGCN
    :return:
        np.array
    """
    total_start = time()
    dataset : Loader
    user_num = dataset.trainDataSize
    users = np.random.randint(0, dataset.n_users, user_num)
    docs = np.random.randint(0,dataset.vae_datasize,dataset.vae_datasize)
    docs = dataset.doc_cat[docs]
    allPos = dataset.allPos
    S = []
    sample_time1 = 0.
    sample_time2 = 0.
    for i, user in enumerate(users):
        start = time()
        posForUser = allPos[user]
        if len(posForUser) == 0:
            continue
        sample_time2 += time() - start
        posindex = np.random.randint(0, len(posForUser))
        positem = posForUser[posindex]
        while True:
            negitem = np.random.randint(0, dataset.m_items)
            if negitem in posForUser:
                continue
            else:
                break
        S.append([user, positem, negitem])

        end = time()
        sample_time1 += end - start
    total = time() - total_start
    return np.array(S),docs  #(123126,3)

# ===================end samplers==========================
# =====================utils====================================
def initialize_weights(layer):
    if isinstance(layer, nn.Linear):
        nn.init.xavier_uniform_(layer.weight.data)
        if layer.bias is not None:
            nn.init.zeros_(layer.bias.data)

def set_seed(seed):
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.manual_seed(seed)

def getFileName():
    if world.model_name == 'mf':
        file = f"mf-{world.dataset}-{world.config['latent_dim_rec']}.pth.tar"
    elif world.model_name == 'lgn':
        file = f"lgn-{world.dataset}-{world.config['lightGCN_n_layers']}-{world.config['latent_dim_rec']}.pth.tar"
    return os.path.join(world.FILE_PATH,file)

def minibatch(*tensors, **kwargs):

    batch_size = kwargs.get('batch_size', world.config['bpr_batch_size'])

    if len(tensors) == 1:
        tensor = tensors[0]
        for i in range(0, len(tensor), batch_size):
            yield tensor[i:i + batch_size]
    else:
        for i in range(0, len(tensors[0]), batch_size):
            yield tuple(x[i:i + batch_size] for x in tensors)


def shuffle(*arrays, **kwargs):

    require_indices = kwargs.get('indices', False)

    if len(set(len(x) for x in arrays)) != 1:
        raise ValueError('All inputs to shuffle must have '
                         'the same length.')

    shuffle_indices = np.arange(len(arrays[0]))
    np.random.shuffle(shuffle_indices)

    if len(arrays) == 1:
        result = arrays[0][shuffle_indices]
    else:
        result = tuple(x[shuffle_indices] for x in arrays)

    if require_indices:
        return result, shuffle_indices
    else:
        return result

def info_nce_loss(anchor, vis_embeddings, temperature=world.config['t']):
    #anchor:n*1*dim. positive:n*1*dim. negatives:n*(n_cat-1)*dim
    logits = torch.mm(anchor, vis_embeddings.T) / temperature
    labels = torch.Tensor(list(range(len(anchor))))
    labels = labels.to(world.device, dtype=torch.long)
    loss = F.cross_entropy(logits, labels)
    return torch.mean(loss)

class timer:
    """
    Time context manager for code block
        with timer():
            do something
        timer.get()
    """
    from time import time
    TAPE = [-1]  # global time record
    NAMED_TAPE = {}

    @staticmethod
    def get():
        if len(timer.TAPE) > 1:
            return timer.TAPE.pop()
        else:
            return -1

    @staticmethod
    def dict(select_keys=None):
        hint = "|"
        if select_keys is None:
            for key, value in timer.NAMED_TAPE.items():
                hint = hint + f"{key}:{value:.2f}|"
        else:
            for key in select_keys:
                value = timer.NAMED_TAPE[key]
                hint = hint + f"{key}:{value:.2f}|"
        return hint

    @staticmethod
    def zero(select_keys=None):
        if select_keys is None:
            for key, value in timer.NAMED_TAPE.items():
                timer.NAMED_TAPE[key] = 0
        else:
            for key in select_keys:
                timer.NAMED_TAPE[key] = 0

    def __init__(self, tape=None, **kwargs):
        if kwargs.get('name'):
            timer.NAMED_TAPE[kwargs['name']] = timer.NAMED_TAPE[
                kwargs['name']] if timer.NAMED_TAPE.get(kwargs['name']) else 0.
            self.named = kwargs['name']
            if kwargs.get("group"):
                #TODO: add group function
                pass
        else:
            self.named = False
            self.tape = tape or timer.TAPE

    def __enter__(self):
        self.start = timer.time()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.named:
            timer.NAMED_TAPE[self.named] += timer.time() - self.start
        else:
            self.tape.append(timer.time() - self.start)


# ====================Metrics==============================
# =========================================================
def calculate_long_tail_metrics(Rating_list, long_tail_doctors_set,k):
    all_recommended_doctors = set()
    for batch in Rating_list:
        batch = batch[:,:k]
        for user_recommendations in batch:
            all_recommended_doctors.update(user_recommendations.tolist())
    long_tail_coverage = len(all_recommended_doctors.intersection(long_tail_doctors_set)) / len(long_tail_doctors_set)
    coverage = len(all_recommended_doctors)/world.n_doc
    return long_tail_coverage,coverage

def calculate_hmd_average(Rating_list,list_length):
    hmds = []
    for batch in Rating_list:
        batch_size = batch.size(0)
        batch = batch[:,:list_length]
        # 计算每对用户之间的HMD
        for i, j in itertools.combinations(range(batch_size), 2):
            recommendations_a = set(batch[i].tolist())
            recommendations_b = set(batch[j].tolist())
            intersection_count = len(recommendations_a.intersection(recommendations_b))
            hmd = 1 - (intersection_count / list_length)
            hmds.append(hmd)
    # 计算HMD的平均值
    average_hmd = sum(hmds) / len(hmds) if hmds else 0.0

    return average_hmd

def RecallPrecision_ATk(test_data, r, k):
    """
    test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
    pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
    k : top-k
    """
    right_pred = r[:, :k].sum(1)
    precis_n = k
    recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
    recall = np.sum(right_pred/recall_n)
    precis = np.sum(right_pred)/precis_n
    return {'recall': recall, 'precision': precis}

def RecallPrecision_LTk(test_data, r, p, k):
    right_pred = r[:, :k].sum(1)
    tail_pred = p[:,:k].sum(1)
    precis_n = k
    recall_n = np.array([len(test_data[i]) if test_data[i] else 1 for i in range(len(test_data))])
    recall = np.sum(right_pred/recall_n)
    precis = np.sum(tail_pred)/precis_n
    return {'recall': recall, 'precision': precis}

def MRRatK_r(r, k):
    """
    Mean Reciprocal Rank
    """
    pred_data = r[:, :k]
    scores = np.log2(1./np.arange(1, k+1))
    pred_data = pred_data/scores
    pred_data = pred_data.sum(1)
    return np.sum(pred_data)

def NDCGatK_r(test_data,r,k):
    """
    Normalized Discounted Cumulative Gain
    rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
    """
    assert len(r) == len(test_data)
    pred_data = r[:, :k]

    test_matrix = np.zeros((len(pred_data), k))
    for i, items in enumerate(test_data):
        length = k if k <= len(items) else len(items)
        test_matrix[i, :length] = 1
    max_r = test_matrix
    idcg = np.sum(max_r * 1./np.log2(np.arange(2, k + 2)), axis=1)
    dcg = pred_data*(1./np.log2(np.arange(2, k + 2)))
    dcg = np.sum(dcg, axis=1)
    idcg[idcg == 0.] = 1.
    ndcg = dcg/idcg
    ndcg[np.isnan(ndcg)] = 0.
    return np.sum(ndcg)

def AUC(all_item_scores, dataset, test_data):
    """
        design for a single user
    """
    dataset : Loader
    r_all = np.zeros((dataset.m_items, ))
    r_all[test_data] = 1
    r = r_all[all_item_scores >= 0]
    test_item_scores = all_item_scores[all_item_scores >= 0]
    return roc_auc_score(r, test_item_scores)

def getLabel(test_data, pred_data):
    r = []
    for i in range(len(test_data)):
        groundTrue = test_data[i]
        predictTopK = pred_data[i]
        pred = list(map(lambda x: x in groundTrue, predictTopK))
        pred = np.array(pred).astype("float")
        r.append(pred)
    return np.array(r).astype('float')
def longtailLabel(test_data, pred_data):
    r = []
    l = []
    for i in range(len(test_data)):
        groundTrue = test_data[i]
        predictTopK = pred_data[i]
        pred = list(map(lambda x: x in groundTrue, predictTopK))
        pred = np.array(pred).astype("float")
        long = list(map(lambda x: x in world.long_tail_doc,predictTopK))
        long = np.array(long).astype("float")
        r.append(pred)
        l.append(long)
    return np.array(r).astype('float'),np.array(l).astype('float')

# ====================end Metrics=============================
# =========================================================
