import torch


class DiceCoeff():
    """Dice coeff for individual examples
        这个loss 是去算 input 和 target 的重叠程度，分子上面torch.dot 如果越大，
        证明有更多的部分是相同的 都是1， 证明模型效果越好，如果越小，证明越差～
    """
    def forward(self, input, target):
        eps = 0.0001 
        # input 是经过了sigmoid 之后的输出。
        input = (input > 0.5).float()
        target = (target > 0.5).float()
        # print(input.sum())
        # print(target.sum())
        self.inter = torch.dot(input.view(-1), target.view(-1))
        # print(self.inter)
        self.union = torch.sum(input) + torch.sum(target) + eps

        t = (2 * self.inter.float()) / self.union.float()
        return t

def dice_coeff(input, target):
    """Dice coeff for single data"""

    if input.is_cuda:
        s = torch.FloatTensor(1).cuda().zero_()
    else:
        s = torch.FloatTensor(1).zero_()

    # for i, c in enumerate(zip(input, target)):
    s = DiceCoeff().forward(input, target)

    return s 
