import tensorflow as tf
from tensorflow.keras.losses import Loss

class FocalLoss(Loss):
    def __init__(self, alpha=0.65, gamma=1.0, use_activation=False, name='focal_loss'):
        super().__init__(name=name)
        self.alpha = alpha
        self.gamma = gamma
        self.use_activation = use_activation

    def call(self, y_true, y_pred):
        # 确保标签类型匹配
        y_true = tf.cast(y_true, tf.float32)

        # 根据use_activation决定如何处理输入
        if self.use_activation:
            # 输入是logits，使用内置函数计算交叉熵
            bce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=y_true, logits=y_pred
            )
            # 计算概率用于后续计算
            probs = tf.sigmoid(y_pred)
        else:
            # 输入已经是概率，手动计算交叉熵
            eps = 1e-7  # 防止log(0)
            y_pred = tf.clip_by_value(y_pred, eps, 1.0 - eps)
            bce_loss = -y_true * tf.math.log(y_pred) - (1 - y_true) * tf.math.log(1 - y_pred)
            # 已经是概率，直接使用
            probs = y_pred

        # 计算 pt = p if y=1 else 1-p
        pt = tf.where(
            tf.equal(y_true, 1.0),
            probs,
            1.0 - probs
        )

        # 调制因子与平衡因子
        modulating_factor = tf.pow(1.0 - pt, self.gamma)
        alpha_factor = tf.where(
            tf.equal(y_true, 1.0),
            self.alpha,
            1.0 - self.alpha
        )

        # 组合损失并汇总
        focal_loss = alpha_factor * modulating_factor * bce_loss
        return tf.reduce_mean(focal_loss)


class AsymmetricLoss(tf.keras.losses.Loss):
    def __init__(self, gamma_neg=2, gamma_pos=1, clip=0.05, eps=1e-8,
                 use_activation=False, use_grad_decouple=True, name="asymmetric_loss"):
        super().__init__(name=name)
        self.gamma_neg = gamma_neg
        self.gamma_pos = gamma_pos
        self.clip = clip
        self.eps = eps
        self.use_activation = use_activation
        self.use_grad_decouple = use_grad_decouple

    def call(self, y_true, y_pred):
        """
        Parameters
        ----------
        y_pred: 模型输出logits或概率
        y_true: 真实标签 (multi-label二值化向量)
        """
        y_true = tf.cast(y_true, tf.float32)
        y_pred = tf.cast(y_pred, tf.float32)

        # 根据use_activation决定如何处理输入
        if self.use_activation:
            # 输入是logits，需要转换为概率
            p = tf.sigmoid(y_pred)
        else:
            # 输入已经是概率
            p = y_pred

        # 计算负样本概率并应用截断
        p_neg = 1.0 - p
        if self.clip > 0:
            p_neg = tf.clip_by_value(p_neg, self.clip, 1.0)

        # 根据公式计算损失
        # L_+ = (1-p)^γ+ * log(p)
        # L_- = p^γ- * log(1-p)

        # 计算基础交叉熵损失
        los_pos = y_true * tf.math.log(tf.maximum(p, self.eps))
        los_neg = (1 - y_true) * tf.math.log(tf.maximum(p_neg, self.eps))

        # 应用非对称聚焦
        if self.gamma_neg > 0 or self.gamma_pos > 0:
            if self.use_grad_decouple:
                # 停止梯度传播以解耦
                p_detach = tf.stop_gradient(p)
                pos_weight = tf.pow(1.0 - p_detach, self.gamma_pos) * y_true
                neg_weight = tf.pow(p_detach, self.gamma_neg) * (1 - y_true)
            else:
                pos_weight = tf.pow(1.0 - p, self.gamma_pos) * y_true
                neg_weight = tf.pow(p, self.gamma_neg) * (1 - y_true)

            # 应用权重到损失
            los_pos = los_pos * pos_weight
            los_neg = los_neg * neg_weight

        # 合并损失
        loss = los_pos + los_neg

        # 取负均值作为最终损失
        return -tf.reduce_mean(loss)
