import torch
from typing import Optional, Tuple

def normalize_pointcloud_vggt(pts3d, valid_mask, eps=1e-3, return_pts=True):
    """
    Normalizes a point cloud by its average distance from the origin.

    Args:
        pts3d (torch.Tensor): The 3D points tensor of shape (B, S, H, W, 3).
        valid_mask (torch.Tensor): A boolean or float mask of shape (B, S, H, W) 
                                   indicating which points are valid.
        eps (float): A small epsilon value to prevent division by zero.
        return_pts (bool): If True, returns the normalized points and the scale factor.
                           If False, returns only the scale factor.
    
    Returns:
        If return_pts is True:
            torch.Tensor: The normalized 3D points.
            torch.Tensor: The calculated average scale factor for each item in the batch.
        If return_pts is False:
            torch.Tensor: The calculated average scale factor for each item in the batch.
    """
    # Calculate the Euclidean distance of each point from the origin
    dist = torch.linalg.norm(pts3d, dim=-1)

    # Sum the distances of only the valid points
    dist_sum = (dist * valid_mask).sum(dim=[1, 2, 3])
    # Count the number of valid points
    valid_count = valid_mask.sum(dim=[1, 2, 3])

    # Calculate the average scale (average distance)
    avg_scale = (dist_sum / (valid_count + eps)).clamp(min=eps, max=1e5)

    if return_pts:
        # Normalize the point cloud by the average scale
        # .view() reshapes avg_scale for broadcasting: (B,) -> (B, 1, 1, 1, 1)
        pts3d_normalized = pts3d / avg_scale.view(-1, 1, 1, 1, 1)
        return pts3d_normalized, avg_scale
    else:
        return avg_scale

def normalize_pointclouds(all_pr_pts, all_gt_pts, mask_gt, not_metric_mask=None):
    """
    Normalizes predicted and ground truth point clouds to the same scale.

    The scale is determined by the ground truth points unless otherwise specified
    by not_metric_mask. This version returns tensors directly.

    Args:
        all_pr_pts (torch.Tensor): Predicted 3D points tensor of shape (B, S, H, W, 3).
        all_gt_pts (torch.Tensor): Ground truth 3D points tensor of shape (B, S, H, W, 3).
        mask_gt (torch.Tensor): The validity mask for the ground truth points.
        not_metric_mask (torch.Tensor, optional): A boolean tensor of shape (B,) indicating
                                                  which samples should be scaled by their own
                                                  prediction's metric instead of the GT metric.
    
    Returns:
        torch.Tensor: Normalized predicted points tensor of shape (B, S, H, W, 3).
        torch.Tensor: Normalized ground truth points tensor of shape (B, S, H, W, 3).
        torch.Tensor: The normalization factors used for the predicted points.
        torch.Tensor: The normalization factors derived from the ground truth points.
    """
    # 1. Normalize the ground truth points and get the GT scale factor
    all_gt_pts_normalized, norm_factor_gt = normalize_pointcloud_vggt(all_gt_pts, mask_gt)

    # 2. By default, use the GT scale factor for the predicted points
    norm_factor_pr = norm_factor_gt.clone()
    
    # 3. For samples where GT metric is not available, calculate scale from the prediction
    if not_metric_mask is not None and not_metric_mask.sum() > 0:
        norm_factor_pr[not_metric_mask] = normalize_pointcloud_vggt(
            all_pr_pts[not_metric_mask], mask_gt[not_metric_mask], return_pts=False
        )

    # 4. Normalize the entire predicted point cloud tensor.
    all_pr_pts_normalized = all_pr_pts / norm_factor_pr.view(-1, 1, 1, 1, 1)
    
    return all_pr_pts_normalized, all_gt_pts_normalized, norm_factor_pr, norm_factor_gt

def normalize_pr_pointcloud(all_pr_pts, norm_factor_gt, mask_gt, not_metric_mask=None):
    """
    Normalizes predicted and ground truth point clouds to the same scale.

    The scale is determined by the ground truth points unless otherwise specified
    by not_metric_mask. This version returns tensors directly.

    Args:
        all_pr_pts (torch.Tensor): Predicted 3D points tensor of shape (B, S, H, W, 3).
        norm_factor_gt (torch.Tensor): The normalization factors derived from the ground truth points.
        mask_gt (torch.Tensor): The validity mask for the ground truth points.
        not_metric_mask (torch.Tensor, optional): A boolean tensor of shape (B,) indicating
                                                  which samples should be scaled by their own
                                                  prediction's metric instead of the GT metric.
    
    Returns:
        torch.Tensor: Normalized predicted points tensor of shape (B, S, H, W, 3).
        torch.Tensor: The normalization factors used for the predicted points.
    """

    # 2. By default, use the GT scale factor for the predicted points
    norm_factor_pr = norm_factor_gt.clone()
    
    # 3. For samples where GT metric is not available, calculate scale from the prediction
    if not_metric_mask is not None and not_metric_mask.sum() > 0:
        norm_factor_pr[not_metric_mask] = normalize_pointcloud_vggt(
            all_pr_pts[not_metric_mask], mask_gt[not_metric_mask], return_pts=False
        )

    # 4. Normalize the entire predicted point cloud tensor.
    all_pr_pts_normalized = all_pr_pts / norm_factor_pr.view(-1, 1, 1, 1, 1)
    
    return all_pr_pts_normalized, norm_factor_pr

def normalize_depth_cam_extrinsics(
    norm_factor: torch.Tensor,
    depths: Optional[torch.Tensor]=None,
    cam_points: Optional[torch.Tensor]=None,
    extrinsics: Optional[torch.Tensor]=None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], torch.Tensor]:
    """
    Normalizes depths, camera points, and camera extrinsics by a given scale factor.

    This function scales down the metric values by dividing them by the provided `norm_factor`.
    For extrinsics, only the translation component is scaled, as rotation is scale-invariant.

    Args:
        depths (Optional[torch.Tensor]): Depth maps of shape `(B, S, H, W)`.
        cam_points (Optional[torch.Tensor]): 3D points in camera coordinates of shape `(B, S, H, W, 3)`.
        extrinsics (torch.Tensor): Camera extrinsic matrices of shape `(B, S, 4, 4)`.
        norm_factor (torch.Tensor): A batch-wise normalization factor of shape `(B,)`.

    Returns:
        Tuple containing:
        - `normalized_depths` (torch.Tensor | None): Scaled depths, or None if input was None.
        - `normalized_cam_points` (torch.Tensor | None): Scaled camera points, or None if input was None.
        - `normalized_extrinsics` (torch.Tensor): Extrinsics with a scaled translation component.
    """
    # Initialize return values
    normalized_depths = None
    normalized_cam_points = None
    normalized_extrinsics = None    

    # 1. Normalize depths 📏
    # Reshapes the norm_factor from (B,) to (B, 1, 1, 1) to enable broadcasting.
    if depths is not None:
        normalized_depths = depths / norm_factor.view([norm_factor.shape[0]] + [1] * (depths.ndim - 1))
        # print(f"Scaled by {norm_factor} in normalize_depth_cam_extrinsics")
        # print('before scale', depths.max(), depths.min(), depths.mean())
        # print(normalized_depths.max(), normalized_depths.min(), normalized_depths.mean())

    # 2. Normalize camera points  tọa
    # Reshapes the norm_factor from (B,) to (B, 1, 1, 1, 1) for broadcasting.
    if cam_points is not None:
        normalized_cam_points = cam_points / norm_factor.view([norm_factor.shape[0]] + [1] * (cam_points.ndim - 1))

    if extrinsics is not None:
        # 3. Normalize camera extrinsics 📷
        # It is crucial to clone to avoid modifying the original tensor, which can cause side effects.
        normalized_extrinsics = extrinsics.clone()
        
        # Reshapes norm_factor from (B,) to (B, 1, 1) to broadcast to the translation vector.
        scale_view_3d = norm_factor.view(-1, 1, 1)
        
        # Apply scaling ONLY to the translation part `t` of the [R|t] matrix.
        # The translation vector is in the last column of the top 3 rows.
        normalized_extrinsics[:, :, :3, 3] = normalized_extrinsics[:, :, :3, 3] / scale_view_3d

    return normalized_depths, normalized_cam_points, normalized_extrinsics



