#!/usr/bin/python
# -*- encoding: utf-8 -*-


import torch
import torch.nn as nn
import torch.nn.functional as F

class OhemCELoss(nn.Module):
    def __init__(self, thresh, ignore_lb=255):
        super().__init__()
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.ignore_lb = ignore_lb
        # self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none')
        self.criteria = nn.BCELoss( reduction='none')
        """
        ground_truth=ignore_lb时候,损失直接设置为0
        reduction='none'：返回每个元素的损失
        """

    def forward(self, logits, labels):
        n_min = labels[labels != self.ignore_lb].numel() // 16  # 至少有16个样本参与梯度的反向传播
        """
        .numel():是pytorch中的用法，用于统计tensor中总共有多少个元素
        """
        # loss = self.criteria(logits.float(), labels.long().reshape(labels.size(0),labels.size(2),labels.size(3))).view(-1)
        loss = self.criteria(logits.float(), labels.float()).view(-1)
        loss_hard = loss[loss > self.thresh]  # 损失大于这个值，则被认定为困难样本，才参与困难样本
        if loss_hard.numel() < n_min:  # 如果困难样本的数量很少
            loss_hard, _ = loss.topk(n_min)
            '''
            .topk():统计数据中前n_min个（从大到小）的损失
            '''
        return torch.mean(loss_hard)


import torch.nn as nn
import torch.nn.functional as F


class SoftDiceLoss(nn.Module):
    def __init__(self, weight=None, size_average=True):
        super(SoftDiceLoss, self).__init__()

    def forward(self, logits, targets):
        num = targets.size(0)
        smooth = 1

        probs = F.sigmoid(logits)
        m1 = probs.view(num, -1)
        m2 = targets.view(num, -1)
        intersection = (m1 * m2)

        score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth)
        score = 1 - score.sum() / num
        return score


if __name__ == '__main__':
    pass