# -*- coding: utf-8 -*-
# @Time : 2023/1/4 17:56
# @Author : Zdh
import torch
import torch.nn as nn
from ..utils import get_class
import torch.nn.functional as F


class MixLoss(nn.Module):
    def __init__(self, opt):
        super(MixLoss, self).__init__()
        self.opt = opt
        self.loss_list = []
        for loss_msg in opt:
            recode_dict = {
                "loss_fn": get_class(loss_msg),
                "model_out_idx": loss_msg["model_out_idx"],
                "label_key": loss_msg["label_key"],
                "weight": loss_msg["weight"],
                "name": loss_msg["name"]
            }
            self.loss_list.append(recode_dict)

    def forward(self, model_out, label):
        loss_all = None
        loss_recode = {}
        if isinstance(model_out, torch.Tensor):
            model_out = [model_out]
        for ii, loss_msg in enumerate(self.loss_list):
            loss_fn = loss_msg["loss_fn"]
            model_out_idx = loss_msg["model_out_idx"]
            label_key = loss_msg["label_key"]
            weight = loss_msg["weight"]
            name = loss_msg["name"]
            device = model_out[model_out_idx].device
            temp_loss = weight * loss_fn(model_out[model_out_idx], label[label_key].to(device))
            if ii == 0:
                loss_all = temp_loss
            else:
                loss_all += temp_loss
            loss_recode[name] = temp_loss
        return loss_all, loss_recode


class FocalLoss(nn.Module):
    def __init__(self, alpha: float = 0.25,  gamma: float = 2, reduction: str = "none",):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.reduction = reduction
        self.alpha = alpha

    def forward(self, pre, label):
        ce_loss = F.binary_cross_entropy(pre, label, reduction="none")
        p_t = pre * label + (1 - pre) * (1 - label)
        loss = ce_loss * ((1 - p_t) ** self.gamma)

        if self.alpha >= 0:
            alpha_t = self.alpha * label + (1 - self.alpha) * (1 - label)
            loss = alpha_t * loss

        if self.reduction == "mean":
            loss = loss.mean()
        elif self.reduction == "sum":
            loss = loss.sum()

        return loss


class CharbonnierLoss(nn.Module):
    """
    带掩码的charbonloss， 可以避免重建的结果过度平滑
    """
    def __init__(self, eps=1e-6):
        super(CharbonnierLoss, self).__init__()
        self.eps = eps

    def forward(self, x, y, mask=None):
        diff = x - y
        if mask is not None:
            diff = diff * mask
            loss = torch.sum(torch.sqrt((diff * diff) + self.eps)) / (torch.sum(mask) + 1e-8)   # TODO:为什么最后除了？？？
            return loss
        loss = torch.mean(torch.sqrt((diff * diff) + self.eps))
        return loss


class BinaryDiceLoss(nn.Module):
    def __init__(self):
        super(BinaryDiceLoss, self).__init__()

    def forward(self, pre, label):
        # 获取每个批次的大小 N
        N = label.size()[0]
        # 平滑变量
        smooth = 1
        # 将宽高 reshape 到同一纬度
        input_flat = pre.view(N, -1)
        targets_flat = label.view(N, -1)

        # 计算交集
        intersection = input_flat * targets_flat
        N_dice_eff = (2 * intersection.sum(1) + smooth) / (input_flat.sum(1) + targets_flat.sum(1) + smooth)
        # 计算一个批次中平均每张图的损失
        loss = 1 - N_dice_eff.sum() / N
        return loss