import torch
import torch.nn as nn


class MeanSquaredLogScaledErrorLoss(nn.Module):
    def __init__(self, alpha=1.0, epsilon=1e-8, clamp=1, debug=False):
        """
        :param alpha: 权重参数，用于控制 MSLE 部分的影响
        :param epsilon: 防止对数运算中出现 log(0)
        """
        super(MeanSquaredLogScaledErrorLoss, self).__init__()
        self.alpha = alpha
        self.epsilon = epsilon
        self.debug = debug
        self.clamp = clamp

    def forward(self, y_pred, y_true):
        """
        :param y_pred: 模型预测值 (batch_size,)
        :param y_true: 真实值 (batch_size,)
        :return: 计算的损失值
        """
        # 确保输入的维度一致
        y_pred = y_pred.flatten()
        y_true = y_true.flatten()

        y_pred_clipped = torch.clamp(y_pred, min=self.epsilon)
        y_true_clipped = torch.clamp(y_true, min=self.epsilon)

        mse_loss = torch.mean((y_true - y_pred) ** 2)

        # 对数运算 (确保输入 > 0)
        log_y_true = torch.log(y_true_clipped)
        log_y_pred = torch.log(y_pred_clipped)

        # 均方对数误差 (MSLE) 部分
        msle_loss = torch.mean((log_y_true - log_y_pred) ** 2)

        mse_loss = torch.clamp(mse_loss, -self.clamp, self.clamp)
        msle_loss = torch.clamp(msle_loss, -self.clamp, self.clamp)

        # 结合两个部分
        total_loss = mse_loss + self.alpha * msle_loss

        if self.debug:
            print(f"~~~ MSE Loss: {mse_loss.item()}, MSLE Loss: {msle_loss.item()}, total Loss: {total_loss.item()}")

        return total_loss


# 示例用法
if __name__ == "__main__":
    # 生成示例数据
    y_true = torch.tensor([1.5, 2.0, 3.0], dtype=torch.float32)
    y_pred = torch.tensor([1.4, 2.1, 2.8], dtype=torch.float32)

    # 初始化损失函数
    loss_fn = MeanSquaredLogScaledErrorLoss(alpha=0.5)

    # 计算损失
    loss = loss_fn(y_pred, y_true)
    print(f"Loss: {loss.item()}")
