
import torch
import torch.nn as nn


class SiLogLoss(nn.Module):
    def __init__(self, args=None):
        super().__init__()
        self.variance_focus = args.variance_focus
        self.args = args

    def forward(self, pred, target):
        valid_mask = (target > 0).detach()
        diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
        loss = torch.sqrt(torch.pow(diff_log, 2).mean() -
                          self.variance_focus * torch.pow(diff_log.mean(), 2)) * 10

        return loss


# import torch
# import torch.nn as nn


class SiLogEdgeLoss(nn.Module):
    """
    结合 SILog 损失和 Sobel 边缘 L1 损失的深度估计损失函数，适配 train 和 eval 状态的输入格式。
    支持的输入格式：
        - Train: pred (B, 1, H, W), target (B, H, W)
        - Eval: pred (H, W), target (H, W)
    参数:
        args: 包含 variance_focus 和 edge_weight 的参数
            - variance_focus: SILog 损失的方差聚焦参数
            - edge_weight: 边缘损失的权重
    """

    def __init__(self, args=None):
        super(SiLogEdgeLoss, self).__init__()
        self.variance_focus = args.variance_focus if args else 0.85  # 默认值
        self.edge_weight = args.edge_weight if args else 0.1  # 默认边缘损失权重
        self.args = args

        # Sobel 算子（水平和垂直方向）
        self.sobel_x = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32).view(1, 1, 3, 3)
        self.sobel_y = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=torch.float32).view(1, 1, 3, 3)

    def _normalize_input(self, img):
        """
        规范化输入张量为 (B, 1, H, W) 格式。
        参数:
            img: 输入张量，形状为 (B, 1, H, W), (B, H, W) 或 (H, W)
        返回:
            规范化后的张量，形状为 (B, 1, H, W)
        """
        if img.dim() == 2:  # (H, W) -> (1, 1, H, W)
            img = img.unsqueeze(0).unsqueeze(0)
        elif img.dim() == 3:  # (B, H, W) -> (B, 1, H, W)
            img = img.unsqueeze(1)
        elif img.dim() == 4:  # (B, 1, H, W)
            if img.size(1) != 1:
                raise ValueError(f"Expected single-channel input, got {img.size(1)} channels")
        else:
            raise ValueError(f"Unsupported input shape: {img.shape}")
        return img

    def _compute_sobel_gradients(self, img):
        """
        使用 Sobel 算子计算深度图的梯度。
        参数:
            img: 输入深度图，形状 (B, 1, H, W)
        返回:
            梯度幅度，形状 (B, 1, H, W)
        """
        if img.is_cuda:
            self.sobel_x = self.sobel_x.cuda()
            self.sobel_y = self.sobel_y.cuda()

        # 计算水平和垂直梯度
        grad_x = torch.nn.functional.conv2d(
            img, self.sobel_x, stride=1, padding=1
        )
        grad_y = torch.nn.functional.conv2d(
            img, self.sobel_y, stride=1, padding=1
        )

        # 计算梯度幅度
        grad_magnitude = torch.sqrt(grad_x ** 2 + grad_y ** 2 + 1e-10)
        return grad_magnitude

    def forward(self, pred, target):
        """
        前向传播，计算 SILog 损失和 Sobel 边缘 L1 损失。
        参数:
            pred: 预测深度图，形状 (B, 1, H, W) 或 (H, W)
            target: 目标深度图，形状 (B, H, W) 或 (H, W)
        返回:
            总损失（SILog 损失 + 边缘损失）
        """
        # 规范化输入为 (B, 1, H, W)
        pred_norm = self._normalize_input(pred)  # (B, 1, H, W)
        target_norm = self._normalize_input(target)  # (B, 1, H, W)

        # 1. SILog 损失
        valid_mask = (target_norm > 0).detach()
        diff_log = torch.log(target_norm[valid_mask]) - torch.log(pred_norm[valid_mask])
        silog_loss = torch.sqrt(
            torch.pow(diff_log, 2).mean() -
            self.variance_focus * torch.pow(diff_log.mean(), 2)
        ) * 10

        # 2. Sobel 边缘损失
        pred_grad = self._compute_sobel_gradients(pred_norm)
        target_grad = self._compute_sobel_gradients(target_norm)
        edge_loss = torch.nn.functional.l1_loss(pred_grad, target_grad)

        # 3. 总损失
        total_loss = silog_loss + self.edge_weight * edge_loss
        return total_loss

class filterSiLogLoss(nn.Module):
    def __init__(self, args=None,variance_focus=0.85, lambda_grad=0.1):
        super().__init__()
        self.variance_focus = args.variance_focus
        self.args = args
        self.lambda_grad = lambda_grad

    def forward(self, pred, target):
        # 处理训练模式下 pred 的通道维度
        if pred.dim() == 4 and pred.shape[1] == 1:
            pred = pred.squeeze(1)
        assert pred.shape == target.shape, "pred 和 target 必须具有相同的形状"

        # 确保深度值大于0，避免 log(0)
        pred_clamped = torch.clamp(pred, min=1e-6)
        target_clamped = torch.clamp(target, min=1e-6)

        # SiLogLoss
        valid_mask = (target > 0).detach()
        diff_log = torch.log(target_clamped[valid_mask]) - torch.log(pred_clamped[valid_mask])
        silog_loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.variance_focus * torch.pow(diff_log.mean(), 2)) * 10

        # 梯度损失
        log_pred = torch.log(pred_clamped)
        log_target = torch.log(target_clamped)

        if pred.dim() == 3:  # 训练模式: (B, H, W)
            # 水平梯度
            log_pred_gx = log_pred[:, :, 1:] - log_pred[:, :, :-1]
            log_target_gx = log_target[:, :, 1:] - log_target[:, :, :-1]
            valid_gx = (target[:, :, 1:] > 0) & (target[:, :, :-1] > 0)
            gx_loss = torch.mean(torch.abs(log_pred_gx[valid_gx] - log_target_gx[valid_gx])) if torch.any(valid_gx) else torch.tensor(0.0, device=pred.device)

            # 垂直梯度
            log_pred_gy = log_pred[:, 1:, :] - log_pred[:, :-1, :]
            log_target_gy = log_target[:, 1:, :] - log_target[:, :-1, :]
            valid_gy = (target[:, 1:, :] > 0) & (target[:, :-1, :] > 0)
            gy_loss = torch.mean(torch.abs(log_pred_gy[valid_gy] - log_target_gy[valid_gy])) if torch.any(valid_gy) else torch.tensor(0.0, device=pred.device)

        elif pred.dim() == 2:  # 验证模式: (H, W)
            # 水平梯度
            log_pred_gx = log_pred[:, 1:] - log_pred[:, :-1]
            log_target_gx = log_target[:, 1:] - log_target[:, :-1]
            valid_gx = (target[:, 1:] > 0) & (target[:, :-1] > 0)
            gx_loss = torch.mean(torch.abs(log_pred_gx[valid_gx] - log_target_gx[valid_gx])) if torch.any(valid_gx) else torch.tensor(0.0, device=pred.device)

            # 垂直梯度
            log_pred_gy = log_pred[1:, :] - log_pred[:-1, :]
            log_target_gy = log_target[1:, :] - log_target[:-1, :]
            valid_gy = (target[1:, :] > 0) & (target[:-1, :] > 0)
            gy_loss = torch.mean(torch.abs(log_pred_gy[valid_gy] - log_target_gy[valid_gy])) if torch.any(valid_gy) else torch.tensor(0.0, device=pred.device)

        else:
            raise ValueError("pred 必须是 2D 或 3D 张量")

        total_gradient_loss = gx_loss + gy_loss

        # 总损失
        total_loss = silog_loss + self.lambda_grad * total_gradient_loss
        return total_loss