import torch
import torch.nn as nn
import torch.nn.functional as F


class BCE(nn.Module):
    '''

    '''
    def __init__(self, model, config):
        super().__init__()
        self.model = model
        self.config = config
        self.bceloss = torch.nn.BCEWithLogitsLoss()

    def forward(self, batch_data):
        batch_user, batch_item, label = batch_data
        batch_user = batch_user.to(self.config["device"])
        batch_item = batch_item.to(self.config["device"])
        label = label.to(self.config["device"])
        (users_emb, items_emb, userEmb0, itemmb0) = self.model.get_user_item_embedding(batch_user.long(), batch_item.long())
        reg_loss = (1 / 2) * (userEmb0.norm(2).pow(2) +
                              itemmb0.norm(2).pow(2)) / float(len(batch_user))
        scores = torch.mul(users_emb, items_emb)
        scores = torch.sum(scores, dim=1)
        loss = self.bceloss(scores, label)
        return loss + reg_loss * self.config["weight_decay"]


class SmoothAUCLoss(torch.nn.Module):
    '''
    input: 每次输入一个用户的所有pos-neg pairs
    '''
    def __init__(self):
        super(SmoothAUCLoss, self).__init__()
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, y_pred, y_true, sigma=500, reduction="sum"):
        assert y_pred.shape == y_true.shape, "smooth auc loss 输入的两个label集合大小不等！！！"
        pos_set = torch.masked_select(y_pred, y_true.bool())
        neg_set = torch.masked_select(y_pred, ~y_true.bool())
        res_matrix = (pos_set.view(pos_set.shape[0], 1) - neg_set.view(1, neg_set.shape[0])).to(torch.float64)
        return 1 - torch.div(torch.sum(self.sigmoid(sigma * res_matrix)), pos_set.shape[0] * neg_set.shape[0])


class BPR(nn.Module):
    def __init__(self, model, config):
        super().__init__()
        self.model = model
        self.config = config

    def forward(self, batch_data):
        batch_user, batch_pos, batch_neg = batch_data
        batch_user = batch_user.to(self.config["device"])
        batch_pos = batch_pos.to(self.config["device"])
        batch_neg = batch_neg.to(self.config["device"])
        (users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.model.getEmbedding(
            batch_user.long(), batch_pos.long(), batch_neg.long()
        )
        reg_loss = (1 / 2) * (userEmb0.norm(2).pow(2) +
                              posEmb0.norm(2).pow(2) +
                              negEmb0.norm(2).pow(2)) / float(len(batch_user))
        pos_scores = torch.mul(users_emb, pos_emb)
        pos_scores = torch.sum(pos_scores, dim=1)
        neg_scores = torch.mul(users_emb, neg_emb)
        neg_scores = torch.sum(neg_scores, dim=1)
        loss = torch.mean(torch.nn.functional.softplus(neg_scores - pos_scores))
        return loss + reg_loss * self.config["weight_decay"]


class SAUC_for_sample(nn.Module):
    '''
    '''
    def __init__(self, model, config, dataset):
        super().__init__()
        self.model = model
        self.config = config
        self.numAllPos = torch.Tensor(dataset._numAllPos).long().to(config["device"])

    def forward(self, batch_data):  #  batch_data: batch_user, batch_pos, batch_neg
        batch_user, batch_pos, batch_neg = batch_data
        batch_user = batch_user.to(self.config["device"])
        batch_pos = batch_pos.to(self.config["device"])
        batch_neg = batch_neg.to(self.config["device"])
        tau = self.config["tau"]
        num_user_iteraction = self.numAllPos[batch_user.long()]
        (users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.model.getEmbedding(
            batch_user.long(), batch_pos.long(), batch_neg.long()
        )
        reg_loss = (1 / 2) * (userEmb0.norm(2).pow(2) +
                              posEmb0.norm(2).pow(2) +
                              negEmb0.norm(2).pow(2)) / float(len(batch_user))
        pos_scores = torch.mul(users_emb, pos_emb)
        pos_scores = torch.sum(pos_scores, dim=1)
        neg_scores = torch.mul(users_emb, neg_emb)
        neg_scores = torch.sum(neg_scores, dim=1)
        loss = -torch.sum(torch.div(torch.sigmoid((pos_scores - neg_scores)/tau), num_user_iteraction**2))
        return loss + reg_loss * self.config["weight_decay"]


class SAUC_for_user(nn.Module):
    '''

    '''
    def __init__(self, model, config):
        super().__init__()
        self.model = model

    def forward(self, users, pos, neg, numAllPos):
        tau = self.config["tau"]
        num_user_iteraction = numAllPos[users]
        (users_emb, pos_emb, neg_emb,
         userEmb0, posEmb0, negEmb0) = self.model.getEmbedding(users.long(), pos.long(), neg.long())
        reg_loss = (1 / 2) * (userEmb0.norm(2).pow(2) +
                              posEmb0.norm(2).pow(2) +
                              negEmb0.norm(2).pow(2)) / float(len(users))
        pos_scores = torch.mul(users_emb, pos_emb)
        pos_scores = torch.sum(pos_scores, dim=1)
        neg_scores = torch.mul(users_emb, neg_emb)
        neg_scores = torch.sum(neg_scores, dim=1)
        loss = 1-torch.mean(torch.div(torch.sigmoid((pos_scores - neg_scores)/tau), num_user_iteraction**2))
        return loss, reg_loss


def getLoss(lossName, model, config, dataset):
    if lossName == "bce":
        return BCE(model, config)
    elif lossName == "bpr":
        return BPR(model, config)
    elif lossName == "sauc_for_sample":
        return SAUC_for_sample(model, config, dataset)
    elif lossName == "sauc_for_user":
        return SAUC_for_user(model, config)
    else:
        raise NotImplementedError(f"loss {lossName} haven't been implemented!!!")
