import torch.optim as optimizer
import torch
from torch import Tensor
import torch.nn.functional as F


def dice_coeff(input: Tensor, target: Tensor, reduce_batch_first: bool = False, epsilon=1e-6):
    # Average of Dice coefficient for all batches, or for a single mask
    assert input.size() == target.size()
    if input.dim() == 2 and reduce_batch_first:
        raise ValueError(f'Dice: asked to reduce batch but got tensor without batch dimension (shape {input.shape})')

    if input.dim() == 2 or reduce_batch_first:
        inter = torch.dot(input.reshape(-1), target.reshape(-1))
        sets_sum = torch.sum(input) + torch.sum(target)
        if sets_sum.item() == 0:
            sets_sum = 2 * inter

        return (2 * inter + epsilon) / (sets_sum + epsilon)
    else:
        # compute and average metric for each batch element
        dice = 0
        for i in range(input.shape[0]):
            dice += dice_coeff(input[i, ...], target[i, ...])
        return dice / input.shape[0]


def multiclass_dice_coeff(input: Tensor, target: Tensor, reduce_batch_first: bool = False, epsilon=1e-6):
    # Average of Dice coefficient for all classes
    assert input.size() == target.size()
    dice = 0
    for channel in range(input.shape[1]):
        dice += dice_coeff(input[:, channel, ...], target[:, channel, ...], reduce_batch_first, epsilon)

    return dice / input.shape[1]


def dice_loss(input: Tensor, target: Tensor, multiclass: bool = False):
    # Dice loss (objective to minimize) between 0 and 1
    assert input.size() == target.size()
    fn = multiclass_dice_coeff if multiclass else dice_coeff
    return 1 - fn(input, target, reduce_batch_first=True)


def focal_loss(input: Tensor, target: Tensor, multiclass: bool = True):
    alpha = 0.3
    gamma = 2
    # assert input.size() == target.size()
    if multiclass:
        ce_loss = torch.nn.functional.cross_entropy(input, target, reduction='none')
        pt = torch.exp(-ce_loss)
        # mean over the batch
        floss = (alpha * (1 - pt) ** gamma * ce_loss).mean()
    return floss


def MIOU(input: Tensor, target: Tensor, multiclass: bool = True):
    intersection = (input * target).sum(dim=[1, 2, 3])
    union = input.sum(dim=[1, 2, 3]) + target.sum(dim=[1, 2, 3]) - intersection
    iou = intersection / union
    return iou.mean()

class MIOU_CAL(object):
    def __init__(self,num_class,label_pred, label_true):
        super(MIOU_CAL,self).__init__()
        self.num_class=num_class
        self.label_pred=label_pred
        self.label_true=label_true
    @property
    def confuse_matrix(self):
        mask = (self.label_true >= 0) & (self.label_true < self.num_class)
        # # np.bincount计算了从0到n**2-1这n**2个数中每个数出现的次数，返回值形状(n, n)
        print(mask.shape)
        hist = torch.bincount(
            self.num_class * self.label_true[mask].int() +
            self.label_pred[mask], minlength=self.num_class ** 2).reshape(-1,self.num_class, self.num_class)
        return hist

    @property
    def Pixel_Acc(self):
        hist=self.confuse_matrix
        return (hist.diag().sum()/hist.sum(dim=[1,2])).mean()

    @property
    def miou(self):
        total_iou=0.0
        hist=self.confuse_matrix
        for sample in hist:
            MIOU= torch.diag(sample) / (
                    torch.sum(sample, dim=0) +
                    torch.sum(sample, dim=1) -
                    torch.diag(sample))
            total_iou += torch.nanmean(MIOU[1:])  # 跳过0值求mean,shape:[1, 21]
        return total_iou / hist.shape[0]



def rec_criterion(input: Tensor, target: Tensor):
    target_one_hot = F.one_hot(target, 3).permute(0, 3, 1, 2).float()
    kernel_mean = F.avg_pool2d(target_one_hot, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    kernel_mean[kernel_mean==0]=1
    coff = torch.exp(1 / kernel_mean)
    sf = F.log_softmax(input, dim=1)
    loss =-(sf * target_one_hot * coff).sum(dim=1).mean()
    return loss
