import torch
import torch.nn as nn


# class MaskedKLDivLoss(nn.Module):
#     def __init__(self):
#         super(MaskedKLDivLoss, self).__init__()
#         self.loss = nn.KLDivLoss(reduction='sum')
#         # self.loss = nn.CrossEntropyLoss(reduction='sum')
#
#     def forward(self, log_pred, target, mask):
#         # 将mask转换为行向量
#         mask_ = mask.view(-1, 1)
#         # 计算KL散度损失
#         loss = self.loss(log_pred * mask_, target * mask_) / torch.sum(mask)
#         return loss



class MaskedKLDivLoss(nn.Module):
    def __init__(self):
        super(MaskedKLDivLoss, self).__init__()
        self.bce_loss = nn.BCEWithLogitsLoss(reduction='mean')
        self.log_softmax = nn.LogSoftmax(dim=1)

    def forward(self, log_pred, target, mask):

        # 计算对数softmax
        log_softmax_pred = self.log_softmax(log_pred)

        # 计算二元交叉熵损失
        loss = self.bce_loss(log_softmax_pred, target)
        return loss


class MaskedNLLLoss(nn.Module):
    def __init__(self, weight=None):
        super(MaskedNLLLoss, self).__init__()
        self.weight = weight
        # 初始化NLL损失
        self.loss = nn.NLLLoss(weight=weight, reduction='sum')
        # self.loss = nn.CrossEntropyLoss(weight=weight,reduction='sum')
        # self.loss = nn.CrossEntropyLoss(reduction='sum')
    def forward(self, pred, target, mask):
        # 将mask转换为行向量
        mask_ = mask.view(-1, 1)
        # 计算NLL损失
        if type(self.weight) == type(None):
            loss = self.loss(pred * mask_, target) / torch.sum(mask)

        else:
            loss = self.loss(pred * mask_, target) \
                   / torch.sum(self.weight[target] * mask_.squeeze())
        return loss