import torch
import roma
from typing import Optional, Tuple, Dict, Union

def align_pred_to_gt_torch_batch_roma(
    pred_points: torch.Tensor,
    gt_points: torch.Tensor,
    valid_mask: Optional[torch.Tensor] = None,
    pred_conf: Optional[torch.Tensor] = None,
    conf_threshold: Optional[float] = None,
    conf_percentage: Optional[float] = None,
    with_scale: bool = True,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
    """
    Processes a batch of predicted 3D points and aligns each to its corresponding
    ground truth points using roma.rigid_points_registration.

    This function iterates over the batch dimension, filters points based on
    validity and confidence, computes the optimal transformation using roma,
    and then aggregates the results.

    Args:
        pred_points (torch.Tensor): Batched predicted 3D points. Shape (B, S, W, H, 3).
        gt_points (torch.Tensor): Batched ground truth 3D points. Shape (B, S, W, H, 3).
        valid_mask (Optional[torch.Tensor]): Batched boolean mask for valid points.
                                            Shape (B, S, W, H). Defaults to None.
        pred_conf (Optional[torch.Tensor]): Batched confidence scores.
                                            Shape (B, S, W, H). Defaults to None.
        conf_threshold (Optional[float]): A confidence score below which points are ignored.
        conf_percentage (Optional[float]): The top percentage of confident points to use for alignment.
                                           For example, 0.9 means use the top 90% of points.
        with_scale (bool): If True, compute and apply scaling. If False,
                           only compute rotation and translation (scale=1.0)
                           and return None for 'scale' in the output dict.

    Returns:
        Tuple[torch.Tensor, Dict[str, Union[list, torch.Tensor]]]: A tuple containing:
            - batch_aligned_points (torch.Tensor): The batch of transformed predicted points.
            - batch_transform_params (Dict): A dictionary with aggregated
                                            'rotation' (tensor), 'translation' (tensor),
                                            and 'scale' (tensor or None).
    """
    if pred_points.dim() != 5:
        raise ValueError(f"Expected pred_points to be a 5D tensor (B, S, W, H, 3), but got {pred_points.dim()}D.")

    batch_size = pred_points.shape[0]
    original_shape = pred_points.shape[1:]  # Keep original (S, W, H, 3) shape for output
    original_dtype = pred_points.dtype

    # Lists to store results from each sample in the batch
    aligned_points_list = []
    scales, rotations, translations = [], [], []

    for i in range(batch_size):
        # --- 1. Slice the batch and flatten points from (S,W,H,3) to (N,3) ---
        pred_sample = pred_points[i].reshape(-1, 3)
        gt_sample = gt_points[i].reshape(-1, 3)

        # --- 2. Build a mask to select points for registration ---
        # Start with a mask of all valid points
        if valid_mask is not None:
            mask = valid_mask[i].reshape(-1)
        else:
            mask = torch.ones(pred_sample.shape[0], dtype=torch.bool, device=pred_points.device)

        # Refine mask and get weights from confidence scores
        weights = None
        if pred_conf is not None:
            conf_sample = pred_conf[i].reshape(-1)

            # Apply confidence threshold
            if conf_threshold is not None:
                mask = mask & (conf_sample > conf_threshold)

            # Apply confidence percentage
            elif conf_percentage is not None:
                valid_confs = conf_sample[mask]
                if valid_confs.numel() > 0:
                    # Calculate the threshold for the top percentage
                    k = max(0.0, 1.0 - conf_percentage)
                    percentile_val = torch.quantile(valid_confs.to(torch.float32), k)
                    mask = mask & (conf_sample >= percentile_val)
            
            # Final weights are the confidence scores of the selected points
            weights = conf_sample[mask]

        # Select the points to be used for computing the transformation
        pts1_for_reg = pred_sample[mask]
        pts2_for_reg = gt_sample[mask]

        # --- 3. Compute transformation using roma ---
        # Need at least 3 points to determine a rigid transformation
        if pts1_for_reg.shape[0] < 3:
            s = torch.tensor(1.0, device=pred_points.device, dtype=original_dtype)
            R = torch.eye(3, device=pred_points.device, dtype=original_dtype)
            T = torch.zeros(3, device=pred_points.device, dtype=original_dtype)
            aligned_sample = pred_sample
        else:
            # roma expects batched input, so we add a temporary batch dimension
            with torch.amp.autocast("cuda", enabled=False):
                # Store result in one variable first
                transform_result = roma.rigid_points_registration(
                    pts1_for_reg.unsqueeze(0).to(torch.float32),
                    pts2_for_reg.unsqueeze(0).to(torch.float32),
                    weights=weights.unsqueeze(0).to(torch.float32) if (weights is not None and weights.numel() > 0) else None,
                    compute_scaling=with_scale,
                )
            
            # Unpack results based on with_scale
            if with_scale:
                R, T, s = transform_result
                R, T = R.squeeze(0), T.squeeze(0)
                s = s.squeeze(0)
                aligned_sample = s * (pred_sample @ R.T) + T
            else:
                R, T = transform_result
                R, T = R.squeeze(0), T.squeeze(0)
                aligned_sample = (pred_sample @ R.T) + T
                
        # Append results to lists
        aligned_points_list.append(aligned_sample.reshape(original_shape))
        
        # Only store scale if it was computed
        if with_scale:
            scales.append(s)
            
        rotations.append(R)
        translations.append(T)

    # --- 5. Aggregate results into batch tensors ---
    batch_aligned_points = torch.stack(aligned_points_list, dim=0)
    batch_transform_params = {
        'scale': torch.stack(scales, dim=0) if with_scale else None,
        'rotation': torch.stack(rotations, dim=0),
        'translation': torch.stack(translations, dim=0),
    }

    return batch_aligned_points, batch_transform_params

def umeyama_alignment_torch(x: torch.Tensor, y: torch.Tensor, with_scale: bool = True) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    Computes the optimal similarity transformation between two sets of corresponding points using PyTorch.

    This function finds the scale, rotation, and translation that minimizes the root-mean-square
    error between the transformed points of x and the points of y.

    Args:
        x (torch.Tensor): The first set of points, shape (N, 3).
        y (torch.Tensor): The second set of corresponding points, shape (N, 3).
        with_scale (bool): Whether to estimate the scale factor.

    Returns:
        Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing:
            - s (torch.Tensor): The scale factor (0-dim tensor).
            - R (torch.Tensor): The 3x3 rotation matrix.
            - t (torch.Tensor): The 3D translation vector.
    """
    if x.device != y.device:
        raise ValueError("Input tensors must be on the same device.")
    device = x.device
    x = x.to(torch.float32)
    y = y.to(torch.float32)

    # Calculate centroids
    mu_x = x.mean(dim=0)
    mu_y = y.mean(dim=0)

    # Center the points
    x_centered = x - mu_x
    y_centered = y - mu_y

    # Calculate the covariance matrix
    # Sigma = x_centered.T @ y_centered
    Sigma = y_centered.T @ x_centered

    # Add a small value to the diagonal for numerical stability of SVD backward
    Sigma = Sigma + torch.eye(Sigma.shape[-1], device=Sigma.device, dtype=Sigma.dtype) * 1e-9

    # Perform Singular Value Decomposition (SVD), casting to float32 for compatibility
    U, D, Vt = torch.linalg.svd(Sigma.to(torch.float32))
    
    # Ensure a right-handed coordinate system
    S = torch.eye(x.shape[1], device=device, dtype=x.dtype)
    # Cast to float32 for determinant calculation
    if torch.linalg.det(U.to(torch.float32)) * torch.linalg.det(Vt.to(torch.float32)) < 0:
        S[-1, -1] = -1
        
    # Calculate the rotation matrix
    R = U @ S @ Vt

    # Calculate the scale factor
    if with_scale:
        var_x = torch.sum(x_centered ** 2)
        s = torch.sum(D * torch.diag(S)) / var_x if var_x > 1e-8 else torch.tensor(1.0, device=device)
    else:
        s = torch.tensor(1.0, device=device)

    # Calculate the translation vector
    t = mu_y - s * (R @ mu_x)

    return s, R, t

def align_pred_to_gt_torch(
    pred_points: torch.Tensor,
    gt_points: torch.Tensor,
    valid_mask: Optional[torch.Tensor] = None,
    pred_conf: Optional[torch.Tensor] = None,
    conf_threshold: Optional[float] = None,
    conf_percentage: Optional[float] = None,
    with_scale: bool = True,
) -> Tuple[torch.Tensor, Dict[str, Union[float, torch.Tensor]]]:
    """
    Aligns predicted 3D points to ground truth points using the Umeyama algorithm (PyTorch version).

    The function filters points based on optional masks/thresholds, calculates the
    optimal similarity transformation on this subset, and applies it to the
    *original, unfiltered* predicted points. All computations are done on the tensor's device (CPU or GPU).

    Args:
        pred_points (torch.Tensor): Predicted 3D points. Shape (S, H, W, 3).
        gt_points (torch.Tensor): Ground truth 3D points. Shape (S, H, W, 3).
        valid_mask (Optional[torch.Tensor]): Boolean mask for valid points. If None, all
                                              points are considered valid. Shape (S, H, W).
        pred_conf (Optional[torch.Tensor]): Confidence scores for predicted points.
                                             Shape (S, H, W). Defaults to None.
        conf_threshold (Optional[float]): Minimum absolute confidence to consider a point.
                                          Defaults to None.
        conf_percentage (Optional[float]): Top percentage of confident points to use.
                                           Value between 0-100. Defaults to None.

    Returns:
        Tuple[torch.Tensor, Dict[str, Union[float, torch.Tensor]]]: A tuple containing:
            - aligned_pred_points (torch.Tensor): Transformed predicted points, on the same device.
            - transform_params (Dict): A dictionary with 'scale', 'rotation', and 'translation'.
    """
    device = pred_points.device
    S, H, W, _ = pred_points.shape
    
    # --- 2. Reshape data for processing ---
    pred_points_flat = pred_points.reshape(-1, 3)
    gt_points_flat = gt_points.reshape(-1, 3)

    # --- 3. Create a combined filter mask ---
    if valid_mask is None:
        combined_mask = torch.full((gt_points_flat.shape[0],), True, dtype=torch.bool, device=device)
    else:
        combined_mask = valid_mask.flatten().bool()

    if pred_conf is not None:
        flat_conf = pred_conf.flatten()
        if conf_threshold is not None:
            combined_mask = combined_mask & (flat_conf >= conf_threshold)
        elif conf_percentage is not None:
            if not (0 < conf_percentage <= 100):
                raise ValueError("conf_percentage must be between 0 and 100.")
            current_valid_conf_scores = flat_conf[combined_mask]
            if current_valid_conf_scores.numel() > 0:
                percentile_value = torch.quantile(current_valid_conf_scores, (100 - conf_percentage) / 100.0)
                combined_mask = combined_mask & (flat_conf >= percentile_value)

    # --- 4. Filter points for finding the alignment ---
    pred_for_align = pred_points_flat[combined_mask]
    gt_for_align = gt_points_flat[combined_mask]

    if pred_for_align.shape[0] < 3:
        print("Warning: Fewer than 3 valid points. Returning identity transformation.")
        identity_transform = {
            'scale': torch.tensor(1.0, device=device),
            'rotation': torch.eye(3, device=device),
            'translation': torch.zeros(3, device=device)
        }
        return pred_points, identity_transform
        
    # --- 5. Use Umeyama algorithm to find the transformation parameters ---
    scale, rotation, translation = umeyama_alignment_torch(x=pred_for_align, y=gt_for_align, with_scale=with_scale)

    # --- 6. Apply transformation to the *original, unfiltered* predicted points ---
    if with_scale:
        aligned_pred_points_flat = (scale * pred_points_flat @ rotation.T) + translation
    else:
        aligned_pred_points_flat = (pred_points_flat @ rotation.T) + translation

    # --- 7. Reshape the aligned points back to the original format ---
    aligned_pred_points = aligned_pred_points_flat.reshape(S, H, W, 3)

    transform_params = {'scale': scale, 'rotation': rotation, 'translation': translation}
    return aligned_pred_points, transform_params


def align_pred_to_gt_torch_batch(
    pred_points: torch.Tensor,
    gt_points: torch.Tensor,
    valid_mask: Optional[torch.Tensor] = None,
    pred_conf: Optional[torch.Tensor] = None,
    conf_threshold: Optional[float] = None,
    conf_percentage: Optional[float] = None,
    with_scale: bool = True,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
    """
    Processes a batch of predicted 3D points and aligns each to its corresponding
    ground truth points using the Umeyama algorithm.

    This function iterates over the batch dimension and calls `align_pred_to_gt_torch`
    for each sample, then aggregates the results.

    Args:
        pred_points (torch.Tensor): Batched predicted 3D points. Shape (B, S, W, H, 3).
        gt_points (torch.Tensor): Batched ground truth 3D points. Shape (B, S, W, H, 3).
        valid_mask (Optional[torch.Tensor]): Batched boolean mask for valid points.
                                              Shape (B, S, W, H). Defaults to None.
        pred_conf (Optional[torch.Tensor]): Batched confidence scores.
                                             Shape (B, S, W, H). Defaults to None.
        conf_threshold (Optional[float]): See `align_pred_to_gt_torch`.
        conf_percentage (Optional[float]): See `align_pred_to_gt_torch`.

    Returns:
        Tuple[torch.Tensor, Dict[str, Union[list, torch.Tensor]]]: A tuple containing:
            - batch_aligned_points (torch.Tensor): The batch of transformed predicted points.
            - batch_transform_params (Dict): A dictionary with aggregated 'scale' (tensor),
                                             'rotation' (tensor), and 'translation' (tensor).
    """
    if pred_points.dim() != 5:
        raise ValueError(f"Expected pred_points to be a 5D tensor (B, S, W, H, 3), but got {pred_points.dim()}D.")
    
    batch_size = pred_points.shape[0]
    # print(pred_points.shape, gt_points.shape)
    
    # Lists to store results from each sample in the batch
    aligned_points_list = []
    transform_params_list = []

    for i in range(batch_size):
        # Slice the batch dimension for all inputs
        pred_points_sample = pred_points[i]
        gt_points_sample = gt_points[i]
        
        valid_mask_sample = valid_mask[i] if valid_mask is not None else None
        pred_conf_sample = pred_conf[i] if pred_conf is not None else None

        # Call the single-sample alignment function
        aligned_points, transform_params = align_pred_to_gt_torch(
            pred_points=pred_points_sample,
            gt_points=gt_points_sample,
            valid_mask=valid_mask_sample,
            pred_conf=pred_conf_sample,
            conf_threshold=conf_threshold,
            conf_percentage=conf_percentage,
            with_scale=with_scale
        )
        
        aligned_points_list.append(aligned_points)
        transform_params_list.append(transform_params)
        # print('gt', torch.linalg.norm(gt_points_sample, dim=-1).mean())
        # print('pred', torch.linalg.norm(pred_points_sample, dim=-1).mean())
        # print('pred2', torch.linalg.norm(aligned_points, dim=-1).mean())

    # Aggregate the results
    batch_aligned_points = torch.stack(aligned_points_list, dim=0)
    
    # Aggregate transform parameters into a single dictionary
    batch_transform_params = {
        'scale': torch.stack([p['scale'] for p in transform_params_list], dim=0),
        'rotation': torch.stack([p['rotation'] for p in transform_params_list], dim=0),
        'translation': torch.stack([p['translation'] for p in transform_params_list], dim=0)
    }
    # print(f"Scaled points by {batch_transform_params['scale']}")
    # print(f"{batch_transform_params=}")
    # diff = (batch_aligned_points - pred_points).abs()
    # print("diff", diff.max(), diff.mean())

    return batch_aligned_points, batch_transform_params

def align_extrinsics_torch(
    extrinsics: torch.Tensor,
    transform_params: Dict[str, torch.Tensor],
    with_scale: bool = True
) -> torch.Tensor:
    """
    Applies a batch of similarity transformations to a batch of camera extrinsics.

    This function updates the camera poses (extrinsics) to align them with the
    transformed point cloud space defined by the `transform_params`.

    Args:
        extrinsics (torch.Tensor): The original camera extrinsics.
                                   Shape can be (B, S, 3, 4) or (B, S, 4, 4).
        transform_params (Dict[str, torch.Tensor]): A dictionary from `align_pred_to_gt_torch_batch`
                                                    containing 'scale', 'rotation', and 'translation'.
                                                    - 'scale': (B,)
                                                    - 'rotation': (B, 3, 3)
                                                    - 'translation': (B, 3)
        with_scale (bool): If True, applies the scaling factor to the translation component.
                           This should match the `with_scale` argument used to generate
                           the `transform_params`.

    Returns:
        torch.Tensor: The aligned camera extrinsics, with the same shape as the input.
    """
    B, S, _, _ = extrinsics.shape
    device = extrinsics.device
    dtype = extrinsics.dtype

    # --- 2. Unpack and Reshape Transformation Parameters for Broadcasting ---
    # The Umeyama parameters are per-batch-item, so we need to unsqueeze
    # them to broadcast across the sequence (S) dimension.
    
    # Rotation: (B, 3, 3) -> (B, 1, 3, 3)
    umeyama_R = transform_params['rotation'].view(B, 1, 3, 3)
    
    # Translation: (B, 3) -> (B, 1, 3, 1)
    umeyama_t = transform_params['translation'].view(B, 1, 3, 1)

    # --- 3. Decompose Original Extrinsics ---
    # R_e: (B, S, 3, 3), t_e: (B, S, 3)
    R_e = extrinsics[..., :3, :3]
    t_e = extrinsics[..., :3, 3]

    # --- 4. Apply the Transformation Formulas ---
    # The new extrinsic rotation R'_e is R_e * R^T
    # The new extrinsic translation t'_e is s * t_e - R'_e * t
    # where s, R, t are the Umeyama parameters.
    
    # R_e (B, S, 3, 3) @ umeyama_R.transpose (B, 1, 3, 3) -> R_aligned (B, S, 3, 3)
    R_aligned = R_e @ umeyama_R.transpose(-2, -1)
    
    # R_aligned (B, S, 3, 3) @ umeyama_t (B, 1, 3, 1) -> (B, S, 3, 1)
    # .squeeze(-1) removes the last dimension to make it (B, S, 3) for subtraction.
    t_offset = (R_aligned @ umeyama_t).squeeze(-1)      
    
    if with_scale:
        # Scale: (B,) -> (B, 1, 1)
        scale = transform_params['scale'].view(B, 1, 1)

        # scale (B, 1, 1) * t_e (B, S, 3) -> (B, S, 3)
        t_aligned = scale * t_e - t_offset
    else:
        t_aligned = t_e - t_offset

    # --- 5. Reconstruct the Aligned Extrinsics Matrix ---
    aligned_extrinsics = extrinsics.clone()
    aligned_extrinsics[..., :3, :3] = R_aligned
    aligned_extrinsics[..., :3, 3] = t_aligned

    return aligned_extrinsics