import torch
import torch.nn as nn
from typing import Optional
torch.autograd.set_detect_anomaly(True)
from dust3r.utils.camera import (
    matrix_to_quaternion,
    relative_pose_absT_quatR,
    quaternion_conjugate,
    quaternion_multiply,
)
from train_utils.general import check_and_fix_inf_nan


eps = 1e-6
class CameraPoseLoss(nn.Module):
    """
    Computes camera pose loss in extrinsic space (3x4 matrices), 
    including an optional relative pose component that encourages 
    geometric consistency among cameras in a scene.
    """
    def __init__(self, alpha: float = 1.0, compute_relative: bool = True, compute_absolute: bool = False):
        super().__init__()
        self.alpha = alpha
        self.compute_relative = compute_relative
        self.compute_absolute = compute_absolute


    def forward(self, pred_poses, gt_poses, compute_relative=None, compute_absolute=True):
        """
        Calculates the pose loss.

        Args:
            pred_poses (torch.Tensor): Predicted poses of shape (B, S, 3, 4) in extrinsic space.
            gt_poses (torch.Tensor): Ground truth poses of shape (B, S, 3, 4) in extrinsic space.

        Returns:
            A tuple containing (total_loss, abs_trans_err, abs_rot_err, rel_trans_err, rel_rot_err).
        """
        if pred_poses.numel() == 0:
            zero = torch.tensor(0.0, device=pred_poses.device)
            return zero, zero, zero, zero, zero

        # Extract translation and rotation components
        pred_trans = pred_poses[..., :3, 3]
        gt_trans = gt_poses[..., :3, 3]
        pred_rot = pred_poses[..., :3, :3]
        gt_rot = gt_poses[..., :3, :3]

        abs_trans_err = 0
        abs_rot_err = 0
        # Absolute pose error
        if compute_absolute:
            abs_trans_err = torch.norm(pred_trans - gt_trans, dim=-1).mean()
            rot_err_rad = self._geodesic_distance_from_matrices(pred_rot, gt_rot)
            abs_rot_err = rot_err_rad.mean()

        # Initialize relative errors to zero
        rel_trans_err = 0.0
        rel_rot_err = 0.0

        # Relative pose error
        if compute_relative is None:
            compute_relative = self.compute_relative
        if compute_relative and pred_poses.shape[1] > 1:
            rel_trans_err, rel_rot_err = self.compute_relative_pose_loss(
                pred_trans, pred_rot, gt_trans, gt_rot
            )

        return abs_trans_err, abs_rot_err, rel_trans_err, rel_rot_err

    @staticmethod
    def _geodesic_distance_from_matrices(R1, R2):
        """Computes the geodesic distance between two batches of rotation matrices."""
        R_rel = torch.matmul(R1, R2.transpose(-1, -2))
        trace = R_rel.diagonal(offset=0, dim1=-2, dim2=-1).sum(-1)
        cos_theta = torch.clamp((trace - 1) / 2.0, -1.0 + eps, 1.0 - eps)
        return torch.acos(cos_theta)

    @staticmethod
    def _geodesic_distance_from_quaternions(q1, q2):
        """Computes the geodesic distance between two batches of quaternions."""
        q_diff = quaternion_multiply(q1, quaternion_conjugate(q2))
        w = q_diff[..., 0].abs()
        cos_half_theta = torch.clamp(w, -1.0 + eps, 1.0 - eps)
        return 2 * torch.acos(cos_half_theta)

    @staticmethod
    def compute_relative_pose_loss(pred_trans, pred_rot, gt_trans, gt_rot):
        """
        Computes the relative pose loss between all pairs of cameras.
        """
        B, S, _ = pred_trans.shape

        # Convert rotations to quaternions
        pred_q = matrix_to_quaternion(pred_rot)
        gt_q = matrix_to_quaternion(gt_rot)

        # Create pairwise matrices
        gt_trans1 = gt_trans.unsqueeze(2).expand(-1, -1, S, -1)
        gt_trans2 = gt_trans.unsqueeze(1).expand(-1, S, -1, -1)
        pred_trans1 = pred_trans.unsqueeze(2).expand(-1, -1, S, -1)
        pred_trans2 = pred_trans.unsqueeze(1).expand(-1, S, -1, -1)

        gt_q1 = gt_q.unsqueeze(2).expand(-1, -1, S, -1)
        gt_q2 = gt_q.unsqueeze(1).expand(-1, S, -1, -1)
        pred_q1 = pred_q.unsqueeze(2).expand(-1, -1, S, -1)
        pred_q2 = pred_q.unsqueeze(1).expand(-1, S, -1, -1)

        # Compute relative poses
        gt_t_rel, gt_q_rel = relative_pose_absT_quatR(gt_trans1, gt_q1, gt_trans2, gt_q2)
        pred_t_rel, pred_q_rel = relative_pose_absT_quatR(pred_trans1, pred_q1, pred_trans2, pred_q2)

        # Compute loss
        rel_trans_err = torch.norm(pred_t_rel - gt_t_rel, dim=-1)
        rel_rot_err_rad = CameraPoseLoss._geodesic_distance_from_quaternions(pred_q_rel, gt_q_rel)

        # We only need the upper triangle of the S x S matrix, excluding the diagonal
        # to avoid double counting and self-comparison.
        mask = torch.triu(torch.ones(S, S, device=pred_trans.device), diagonal=1).bool()
        
        # Mean of the errors
        rel_trans_err_mean = rel_trans_err[:, mask].mean()
        rel_rot_err_mean = rel_rot_err_rad[:, mask].mean()

        return rel_trans_err_mean, rel_rot_err_mean

class PoseEncodingLoss(nn.Module):
    """
    Computes loss for camera intrinsics (e.g., focal length).

    This module is designed to replicate and replace the 'loss_FL' component
    from your original 'camera_loss_single' function, but in a modular
    nn.Module format.

    It operates on the raw pose encoding vector.
    """
    def __init__(self, loss_type: str = "l1"):
        """
        Initializes the loss module.

        Args:
            loss_type (str): "l1" (Mean Absolute Error), 
                             "l2" (Euclidean Norm), 
                             or "smooth_l1".
        """
        super().__init__()
        self.loss_type = loss_type

        if loss_type == "smooth_l1":
            # Use reduction='none' to get per-element loss,
            # which we will then average.
            self.loss_fn = nn.SmoothL1Loss(reduction='none')
        elif loss_type not in ["l1", "l2"]:
            raise ValueError(f"Unknown loss type: {loss_type}")

    def forward(self, pred_pose_enc: torch.Tensor, gt_pose_enc: torch.Tensor, max_val: Optional[float] = None) -> torch.Tensor:
        """
        Calculates the intrinsics loss.

        Args:
            pred_pose_enc (torch.Tensor): (..., D) predicted pose encoding.
            gt_pose_enc (torch.Tensor): (..., D) ground truth pose encoding.
            max_val (float, optional): Maximum value to clamp the loss to. Defaults to None.

        Returns:
            torch.Tensor: The computed scalar loss.
        """
        
        # 2. Handle empty tensor case
        if pred_pose_enc.numel() == 0:
            return (pred_pose_enc.sum() * 0)

        # 3. Calculate the loss based on the specified type
        if self.loss_type == "l1":
            # Mean Absolute Error over all individual parameters.
            # Shape (..., K) -> (..., K)
            loss = (pred_pose_enc - gt_pose_enc).abs()
        
        elif self.loss_type == "l2":
            # Mean Euclidean distance of the intrinsic vectors.
            # Shape (..., K) -> (...)
            loss = torch.norm(pred_pose_enc - gt_pose_enc, p=2, dim=-1)
        
        elif self.loss_type == "smooth_l1":
            # Smooth L1 loss over all individual parameters.
            # Shape (..., K) -> (..., K)
            loss = self.loss_fn(pred_pose_enc, gt_pose_enc)

        loss = check_and_fix_inf_nan(loss, "loss_FL")
        if max_val is not None:
            loss = loss.clamp(max=max_val)

        # 5. Return the final mean scalar loss
        return loss.mean()
