
from concurrent.futures import ThreadPoolExecutor
import gc
import roma
import torch

from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from torch.distributed import all_gather_object, barrier

import open3d as o3d
from fast3r.eval.recon_metric import accuracy, completion, calculate_corresponding_points_error

from scipy.spatial import cKDTree

from fast3r.dust3r.heads.camera import pose_encoding_to_camera, camera_from_preds, get_local_pts3d_from_depth
from fast3r.dust3r.utils.geometry import inv, geotrf
from fast3r.eval.cam_pose_metric import camera_to_rel_deg, calculate_auc
from fast3r.eval.metrics_nopo import pose_auc_wrap
from fast3r.eval.metrics_nopo_batch import compute_pose_error_batch
from fast3r.dust3r.post_process import estimate_focal_knowing_depth_and_confidence_mask
from fast3r.dust3r.cloud_opt.init_im_poses import fast_pnp
from fast3r.dust3r.loss.vggt_loss import normalize_pointcloud_flat
from torchmetrics.aggregation import BaseAggregator
from fast3r.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
class AccumulatedSum(BaseAggregator):
    def __init__(
        self,
        **kwargs: Any,
    ) -> None:
        super().__init__(
            fn="sum",
            default_value=torch.tensor(0.0, dtype=torch.long),
            nan_strategy='warn',
            state_name="sum_value",
            **kwargs,
        )

    def update(self, value: int) -> None:
        self.sum_value += value

    def compute(self) -> torch.LongTensor:
        return self.sum_value
def calculate_intra_view_proximity(view, distance_threshold):
    """
    Calculates the percentage of point pairs within a single view that are
    closer than a given distance threshold.

    Args:
        view (torch.Tensor): A tensor of shape [H, W, 3] representing the view.
        distance_threshold (float): The radius to check for neighbors.

    Returns:
        float: The percentage of pairs closer than the threshold.
    """
    print(f"Analyzing a single view with a distance threshold of {distance_threshold}...")
    
    # 1. Flatten the point cloud and build the k-d tree
    points = view.reshape(-1, 3).numpy()
    num_points = points.shape[0]
    
    if num_points == 0:
        return 0.0

    print(f"Building k-d tree for {num_points} points...")
    tree = cKDTree(points)

    # 2. Use `query_ball_point` to find all neighbors within the threshold for each point
    # This returns a list of lists. `neighbors[i]` contains the indices of points
    # close to `points[i]`.
    print("Performing radius search (query_ball_point)...")
    neighbors = tree.query_ball_point(points, r=distance_threshold)
    
    # 3. Count the number of pairs found.
    # The `neighbors` list includes each point's self-reference (a point is 0 distance from itself).
    # It also double-counts pairs (if j is in i's list, i will be in j's list).
    # We can sum the length of each list to get the total number of connections.
    total_connections = sum(len(n) for n in neighbors)
    
    # To get the number of unique pairs, we subtract the self-references (N) and divide by 2 for the double counting.
    num_pairs_below_threshold = (total_connections - num_points) / 2
    
    # 4. Calculate the total number of unique pairs possible in the entire point cloud
    total_possible_pairs = num_points * (num_points - 1) / 2
    
    if total_possible_pairs == 0:
        return 0.0

    # 5. Calculate the final percentage
    percentage = (num_pairs_below_threshold / total_possible_pairs) * 100
    
    print("\n--- Analysis Results ---")
    print(f"Total points: {num_points:,}")
    print(f"Total possible unique pairs: {int(total_possible_pairs):,}")
    print(f"Number of pairs below threshold: {int(num_pairs_below_threshold):,}")

    return percentage

def compare_views_by_points(sample, distance_threshold):
    """
    Efficiently calculates the pairwise proximity matrix for a list of views.
    
    This function pre-computes k-d trees to avoid redundant work.
    """
    num_views = len(sample)
    
    # 1. Pre-computation Phase
    print("Step 1: Pre-computing point clouds and k-d trees for all views...")
    # Reshape all point clouds first
    point_clouds = [v.reshape(-1, 3).numpy() for v in sample]
    # Build a k-d tree for each point cloud and store it
    kd_trees = [cKDTree(pc) for pc in point_clouds]
    print("All k-d trees built.")
    
    # Initialize the results matrix
    results_matrix = np.zeros((num_views, num_views))

    # 2. Query Phase
    print("\nStep 2: Running pairwise queries using pre-built trees...")
    for i in range(num_views):
        source_points = point_clouds[i]
        num_source_points = len(source_points)
        
        for j in range(num_views):
            if i == j:
                results_matrix[i, j] = 100.0
                continue
            
            # Retrieve the pre-built tree for the target view
            target_tree = kd_trees[j]
            
            # Perform the query
            distances, _ = target_tree.query(source_points, k=1)
            
            # Count and calculate percentage
            close_points_count = np.sum(distances < distance_threshold)
            percentage = (close_points_count / num_source_points) * 100
            results_matrix[i, j] = percentage
            
    print("All queries complete.")
    return results_matrix

def calculate_close_points_percentage(view_a, view_b, distance_threshold=0.01):
    """
    Calculates the percentage of points in view_a that are close to any point in view_b.

    Args:
        view_a (torch.Tensor): A tensor of shape [H, W, 3] representing the first view.
        view_b (torch.Tensor): A tensor of shape [H, W, 3] representing the second view.
        distance_threshold (float): The maximum distance to consider points "close".

    Returns:
        float: The percentage of close points.
    """
    # 1. Flatten the point clouds from [H, W, 3] to [N, 3] and convert to NumPy
    points_a = view_a.reshape(-1, 3).numpy()
    points_b = view_b.reshape(-1, 3).numpy()
    
    num_points_a = points_a.shape[0]

    # 2. Build a k-d tree from the second view's points for fast lookups.
    # This is a highly efficient data structure for spatial queries.
    tree_b = cKDTree(points_b)

    # 3. For each point in view_a, find the distance to its nearest neighbor in view_b.
    # The `query` method returns the distance and the index of the neighbor.
    distances, _ = tree_b.query(points_a, k=1)

    # 4. Count how many of these minimum distances are within our threshold.
    close_points_count = np.sum(distances < distance_threshold)

    # 5. Calculate the percentage.
    percentage = (close_points_count / num_points_a) * 100
    
    print(f"Total points in View A: {num_points_a}")
    print(f"Number of points in View A close to View B: {close_points_count}")
    print(f"Distance Threshold: {distance_threshold}")
    
    return percentage

def gather_deduplicated_scene_metrics(reconstruction_metrics_per_epoch):
    """Gathers and deduplicates scene-specific metrics across all ranks by dataset."""
    gathered_metrics = [None] * torch.distributed.get_world_size()
    all_gather_object(gathered_metrics, reconstruction_metrics_per_epoch)

    # Flatten and deduplicate metrics across all ranks
    all_metrics = {}
    for rank_metrics in gathered_metrics:
        for dataset_name, scenes in rank_metrics.items():
            if dataset_name not in all_metrics:
                all_metrics[dataset_name] = {}
            all_metrics[dataset_name].update(scenes)  # Keeps the first occurrence of each scene

    return all_metrics

def align_global_pts3d_to_local(preds, views, min_conf_thr_percentile=0, conf_keys=None, result_keys=None):
    """
    Aligns the global point clouds to the local (self) coordinate frame for each view.

    Args:
        preds (List[Dict]): A list of dictionaries containing predictions for each view.
            Each dictionary is expected to have keys specified in `result_keys`
            (for 3D points) and `conf_keys` (for confidences).
        views (List[Dict]): A list of dictionaries containing ground truth data for each view,
            potentially including a 'valid_mask'.
        min_conf_thr_percentile (float): Minimum confidence percentile threshold (default is 0).
            This percentile is applied to the confidences of the local (target) points.
        conf_keys (Dict[str, str]): Dictionary specifying the keys for confidence scores.
            Expected keys: 'local' (for local/self view confidences) and
            'global' (for global/other view confidences).
        result_keys (Dict[str, str]): Dictionary specifying the keys for 3D point data.
            Expected keys: 'local' (for local/self view 3D points) and
            'global' (for global/other view 3D points).

    Modifies:
        preds: Each pred dictionary in the list will have a new key
               'pts3d_in_global_view_aligned_to_self', which contains the
               global points aligned to the local (self) view's coordinate frame.
    """
    if result_keys is None:
        # Provide default keys if None, matching the original function's implicit expectation
        result_keys = {'local': 'pts3d_in_self_view', 'global': 'pts3d_in_other_view'}
    if conf_keys is None:
        conf_keys = {'local': 'conf_self', 'global': 'conf'}

    # Check if required keys are present in preds
    for i, pred in enumerate(preds):
        if result_keys['local'] not in pred:
            raise ValueError(f"Key '{result_keys['local']}' (local points) not found in preds[{i}].")
        if conf_keys['local'] not in pred:
            raise ValueError(f"Key '{conf_keys['local']}' (local confidence) not found in preds[{i}].")
        if result_keys['global'] not in pred:
            raise ValueError(f"Key '{result_keys['global']}' (global points) not found in preds[{i}].")
        # conf_keys['global'] is fetched but not used for thresholding in this func, still good to check
        if conf_keys['global'] not in pred:
            raise ValueError(f"Key '{conf_keys['global']}' (global confidence) not found in preds[{i}].")


    num_views = len(preds)

    # Get batch size and dimensions from the first valid prediction and key
    # Assuming all preds have the same batch structure if preds is not empty
    # Using local points key for shape determination, global could also be used.
    B, H, W, _ = preds[0][result_keys["local"]].shape  # Get batch size and dimensions

    # Function to process a single (view_index, batch_index) pair
    def process_view_batch(view_index, batch_index):
        pred = preds[view_index]
        view = views[view_index]

        # Points to be transformed (source) are the global points.
        # Target coordinate system is defined by local points.
        # Confidence from local points (target frame) is used for filtering.

        pts3d_global_source = pred[result_keys["global"]][batch_index]  # Shape: (H, W, 3) - These are what we align
        # conf_global_source = pred[conf_keys["global"]][batch_index] # Confidence of global points

        pts3d_local_target = pred[result_keys["local"]][batch_index]   # Shape: (H, W, 3) - This is the target frame
        conf_local_target = pred[conf_keys["local"]][batch_index]    # Shape: (H, W)   - Confidence for filtering

        H_cur, W_cur, _ = pts3d_global_source.shape # Current H, W for this item

        # Get valid_mask if it exists
        if 'valid_mask' in view:
            valid_mask = view['valid_mask'][batch_index]           # Shape: (H, W)
        else:
            # Create a default mask of all ones if not provided
            valid_mask = torch.ones_like(conf_local_target, dtype=torch.bool)

        # Flatten the confidences of the local (target) frame to compute the threshold
        conf_local_target_flat = conf_local_target.reshape(-1)      # Shape: (N_points,)

        # Compute the confidence threshold
        conf_threshold_value = torch.quantile(conf_local_target_flat, min_conf_thr_percentile / 100.0)

        # Create a mask for high-confidence points in the local (target) frame
        conf_mask = conf_local_target >= conf_threshold_value

        # Combine masks (confidence and validity)
        final_mask = conf_mask & valid_mask                         # Shape: (H, W)

        # Flatten the points and masks
        pts_global_source_flat = pts3d_global_source.view(-1, 3)    # Shape: (N_points, 3)
        pts_local_target_flat = pts3d_local_target.view(-1, 3)      # Shape: (N_points, 3)
        final_mask_flat = final_mask.view(-1)                       # Shape: (N_points,)

        # Select points for registration based on the final_mask
        # x: source points (from global predictions) to be transformed
        # y: target points (from local predictions) defining the target frame
        x = pts_global_source_flat[final_mask_flat]    # (M, 3)
        y = pts_local_target_flat[final_mask_flat]     # (M, 3)

        # Check if we have enough points after applying confidence threshold
        if x.shape[0] < 3:
            # Not enough points after applying confidence threshold, try with valid_mask only
            final_mask = valid_mask # Revert to using only the valid_mask
            final_mask_flat = final_mask.view(-1)

            x = pts_global_source_flat[final_mask_flat]
            y = pts_local_target_flat[final_mask_flat]

        # Check again if we have enough points
        if x.shape[0] < 3:
            # Not enough points even after using valid_mask only.
            # Use identity transformation.
            R = torch.eye(3, device=pts_global_source_flat.device, dtype=pts_global_source_flat.dtype)
            t = torch.zeros(3, device=pts_global_source_flat.device, dtype=pts_global_source_flat.dtype)
            s = 1.0
        else:
            # Compute the rigid transformation (R, t, s) to align x (global src) to y (local tgt)
            R, t, s = roma.rigid_points_registration(
                x, y, compute_scaling=True
            )

        # Apply the transformation to all original global source points
        # pts_global_aligned = s * (R @ pts_global_source_flat.T).T + t -> This is for column vectors
        # For row vectors (N,3): s * (pts_global_source_flat @ R.T) + t
        pts_global_source_aligned_flat = s * (pts_global_source_flat @ R.T) + t  # Shape: (N_points, 3)

        # Reshape back to (H, W, 3)
        pts_global_aligned_to_local_frame = pts_global_source_aligned_flat.view(H_cur, W_cur, 3)

        return (view_index, batch_index, pts_global_aligned_to_local_frame)

    # Create a list of all tasks (view_index, batch_index) pairs
    tasks = [(view_idx, batch_idx) for view_idx in range(num_views) for batch_idx in range(B)]

    # Use ThreadPoolExecutor to parallelize across tasks
    # Initialize aligned_pts_dict to store results before assigning to preds
    # This ensures thread safety if 'preds' list itself is not modified concurrently by threads
    # (though list append/dict setitem on different indices from different threads is generally safe for CPython GIL)
    aligned_pts_per_view = {view_idx: [None] * B for view_idx in range(num_views)}

    with ThreadPoolExecutor() as executor:
        futures = [executor.submit(process_view_batch, view_idx, batch_idx) for view_idx, batch_idx in tasks]

        for future in futures:
            view_index, batch_index, pts_aligned_result = future.result()
            aligned_pts_per_view[view_index][batch_index] = pts_aligned_result

    # Update preds with the aligned points
    output_key_name = 'pts3d_in_global_view_aligned_to_self'
    for view_index in range(num_views):
        # Stack the aligned points for the current view back into a tensor of shape (B, H, W, 3)
        # This assumes all items in the batch have the same H, W, which was determined by H_cur, W_cur
        # and used for reshaping.
        preds[view_index][output_key_name] = torch.stack(aligned_pts_per_view[view_index], dim=0)

def get_local_pts3d_from_depth_views(pred_views, result_key='local_from_depth', fuv_key=None, depth_key=None, fuv_scaler=1.0, gt_views=None):
        if result_key in pred_views[0]:
            return
        # resolution=[(238, 224)] W H
        # fuv = torch.stack([pred[fuv_key] for pred in pred_views], dim=1)
        # fuv = torch.mean(fuv, dim=1, keepdim=False)
        for i, pred in enumerate(pred_views):
            
            fuv = pred[fuv_key] # (B, 2)
            depth = pred[depth_key] # (B, 224, 238)
            # if gt_views is not None:
            #     from fast3r.dust3r.loss.pose_loss import CameraIntrinsicLoss
            #     from fast3r.dust3r.losses import MultiLoss
            #     fuv_gt = CameraIntrinsicLoss.intri_to_encoding(gt_views[i]["camera_intrinsics"])/fuv_scaler # (B, 2)
            #     p = 0.3
            #     mask = (torch.rand(fuv.size(0), device=fuv.device) < p).unsqueeze(1)    
            #     # Use the mask to select between fuv_gt (where mask is True) and fuv (where mask is False).
            #     fuv = torch.where(mask, fuv_gt, fuv)
            #     # if torch.rand(1).item() < 0.3:
            #     #     depth = gt_views[i]["depthmap"]
            # conf_depth = pred_views[0][self.result_keys['conf_depth']] # (B, 224, 238)
            # true_shape = gt_views[0].get("true_shape") # (B, 2) (224, 238)
            pts_3d_camera = get_local_pts3d_from_depth(depth, fuv, fuv_scaler)  #(B, H, W, 3)
            pred[result_key] = pts_3d_camera

def align_local_pts3d_to_global_with_cam(preds, pose_key='camera_pose', conf_keys=None, pred_key=None, result_key='pts3d_in_self_view_aligned_to_global', gt_views=None):
    if result_key in preds[0]:
        return
    poses_pred = camera_from_preds(preds, pose_key, align=True)
    # if gt_views is not None:
    #     # for pred, gt in zip(preds, gt_views):
    #     #     pred[pred_key] = gt['pts3d_cam']
    #     # inv_matrix_anchor = inv(gt_views[0][pose_key].float())
    #     # poses_gt = [inv_matrix_anchor @ gt['camera_pose'] for gt in gt_views]
    #     poses_gt = [gt['camera_pose'] for gt in gt_views]
    #     p = 0.3
    #     mask = torch.rand(len(poses_pred), device=poses_pred[0].device) < p
    #     for i in range(len(poses_pred)):
    #         if mask[i]:
    #             poses_pred[i] = poses_gt[i]
        
    # pr_pts_list = []
    for pose, pred in zip(poses_pred, preds):
        matrix_global = pose.float() # This is the original camera pose matrix
        pred[result_key] = geotrf(matrix_global, pred[pred_key]) # Transform local points back to global
        # pr_pts_list.append(gt_pts_global_reconstructed)
    # return pr_pts_list

def align_local_pts3d_to_global(preds, views, min_conf_thr_percentile=0, conf_keys=None, result_keys=None):
    """
    Aligns the local point clouds to the global coordinate frame.

    Args:
        preds (List[Dict]): A list of dictionaries containing predictions for each view.
        views (List[Dict]): A list of dictionaries containing ground truth data for each view.
        min_conf_thr_percentile (float): Minimum confidence percentile threshold (default is 0).

    Modifies:
        preds: Each pred dictionary in the list will have a new key 'pts3d_in_self_view_aligned_to_global',
            which contains the aligned local points.
    """
    # Check if required keys are present in preds
    for pred in preds:
        if result_keys['local'] not in pred:
            raise ValueError("Key 'pts3d_in_self_view' not found in preds.")
        if conf_keys['local'] not in pred:
            raise ValueError("Key 'conf_self' not found in preds.")
        if result_keys['global'] not in pred:
            raise ValueError("Key 'pts3d_in_other_view' not found in preds.")
        if conf_keys['global'] not in pred:
            raise ValueError("Key 'conf' (global head confidence) not found in preds.")

    num_views = len(preds)
    B, H, W, _ = preds[0][result_keys["local"]].shape  # Get batch size and dimensions

    # Function to process a single (view_index, batch_index) pair
    def process_view_batch(view_index, batch_index):
        pred = preds[view_index]
        view = views[view_index]

        # Get the predicted points from local and global heads for this sample
        pts3d_in_self_view = pred[result_keys["local"]][batch_index]            # Shape: (H, W, 3)
        conf_self = pred[conf_keys["local"]][batch_index]              # Shape: (H, W)
        pts3d_global = pred[result_keys["global"]][batch_index]   # Shape: (H, W, 3)
        conf_global = pred[conf_keys["global"]][batch_index]                   # Shape: (H, W)

        H_cur, W_cur, _ = pts3d_in_self_view.shape

        # Get valid_mask if it exists
        if 'valid_mask' in view:
            valid_mask = view['valid_mask'][batch_index]          # Shape: (H, W)
        else:
            valid_mask = torch.ones_like(conf_global, dtype=torch.bool)

        # Flatten the confidences to compute the threshold
        conf_global_flat = conf_global.reshape(-1)  # Shape: (N,)

        # Compute the confidence threshold
        conf_threshold_value = torch.quantile(conf_global_flat, min_conf_thr_percentile / 100.0)

        # Create a mask for high-confidence points
        conf_mask = conf_global >= conf_threshold_value

        # Combine masks
        final_mask = conf_mask & valid_mask  # Shape: (H, W)

        # Flatten the points and masks
        pts_local_flat = pts3d_in_self_view.view(-1, 3)   # Shape: (N, 3)
        pts_global_flat = pts3d_global.view(-1, 3) # Shape: (N, 3)
        final_mask_flat = final_mask.view(-1)      # Shape: (N,)

        # Select valid points
        x = pts_local_flat[final_mask_flat]    # Local points (M, 3)
        y = pts_global_flat[final_mask_flat]   # Global points (M, 3)
        # w = conf_global.view(-1)[final_mask_flat]  # Weights (M,)

        # Check if we have enough points after applying confidence threshold
        if x.shape[0] < 3:
            # Not enough points after applying confidence threshold
            # Use only valid_mask
            final_mask = valid_mask
            final_mask_flat = final_mask.view(-1)

            # Re-select points without confidence threshold
            x = pts_local_flat[final_mask_flat]    # Local points (M, 3)
            y = pts_global_flat[final_mask_flat]   # Global points (M, 3)
            # w = conf_global.view(-1)[final_mask_flat]  # Weights (M,)

        # Check again if we have enough points
        if x.shape[0] < 3:
            # Not enough points even after using valid_mask only
            # Use identity transformation
            R = torch.eye(3, device=pts_local_flat.device, dtype=pts_local_flat.dtype)
            t = torch.zeros(3, device=pts_local_flat.device, dtype=pts_local_flat.dtype)
            s = 1.0
        else:
            # Compute the rigid transformation with scaling
            R, t, s = roma.rigid_points_registration(
                x, y, compute_scaling=True
            )

        # Apply the transformation to all local points (including invalid ones)
        pts_local_aligned_flat = s * (pts_local_flat @ R.T) + t  # Shape: (N, 3)

        # Reshape back to (H, W, 3)
        pts_local_aligned = pts_local_aligned_flat.view(H_cur, W_cur, 3)

        return (view_index, batch_index, pts_local_aligned)

    # Create a list of all tasks (view_index, batch_index) pairs
    tasks = [(view_idx, batch_idx) for view_idx in range(num_views) for batch_idx in range(B)]

    # Use ThreadPoolExecutor to parallelize across tasks
    with ThreadPoolExecutor() as executor:
        futures = [executor.submit(process_view_batch, view_idx, batch_idx) for view_idx, batch_idx in tasks]

        # Collect the results
        results = [future.result() for future in futures]

    # Organize the results and update preds
    # Create a dictionary to store aligned points for each view
    aligned_pts_dict = {view_idx: [None] * B for view_idx in range(num_views)}

    for view_index, batch_index, pts_local_aligned in results:
        aligned_pts_dict[view_index][batch_index] = pts_local_aligned

    # Update preds with the aligned points
    for view_index in range(num_views):
        pred = preds[view_index]
        # Stack the aligned points back into a tensor of shape (B, H, W, 3)
        pred['pts3d_in_self_view_aligned_to_global'] = torch.stack(aligned_pts_dict[view_index], dim=0)


def estimate_cam_pose_one_sample(sample_preds, device='cpu', niter_PnP=10, min_conf_thr_percentile=0, conf_keys=None, result_keys=None, focal_length_estimation_method=''):
    poses_c2w = []
    estimated_focals = []

    # Define the function to process each view
    def process_view(view_idx):
        if focal_length_estimation_method == 'first_view_from_local_head':
            local_key = 'local'
        else:
            local_key = 'global'
        pts3d = sample_preds[view_idx][result_keys[local_key]].cpu().numpy().squeeze()  # (H, W, 3)
        valid_mask = sample_preds[view_idx][conf_keys[local_key]].cpu().numpy().squeeze() > 1.0  # Confidence mask
        # use the confidence map to filter out low-confidence points
        # conf_threshold_value = torch.quantile(sample_preds[view_idx][conf_keys["global"]].view(-1), min_conf_thr_percentile / 100.0)
        # valid_mask = sample_preds[view_idx][conf_keys["global"]].cpu().numpy().squeeze() >= float(conf_threshold_value ) # Confidence mask
        focal_length = float(sample_preds[view_idx]["focal_length"]) if "focal_length" in sample_preds[view_idx] else None

        # Call fast_pnp with unflattened pts3d and mask
        focal_length, pose_c2w = fast_pnp(
            torch.tensor(pts3d),
            focal_length,  # Guess focal length
            torch.tensor(valid_mask, dtype=torch.bool),
            "cpu",
            pp=None,  # Use default principal point (center of image)
            niter_PnP=niter_PnP
        )

        if pose_c2w is None or focal_length is None:
            log.warning(f"Failed to estimate pose for view {view_idx}")
            return np.eye(4), focal_length  # Return identity pose in case of failure

        # Return the results for this view
        return pose_c2w.cpu().numpy(), focal_length

    # Use ThreadPoolExecutor to process views in parallel
    with ThreadPoolExecutor() as executor:
        # Map the process_view function to each view index
        results = list(executor.map(process_view, range(len(sample_preds))))

    # Collect the results
    for pose_c2w_result, focal_length_result in results:
        poses_c2w.append(pose_c2w_result)
        estimated_focals.append(focal_length_result)

    return poses_c2w, estimated_focals


def estimate_focal(pts3d_i, conf_i, pp=None, min_conf_thr_percentile=10):
    B, H, W, THREE = pts3d_i.shape
    assert B == 1  # Since we're processing one sample at a time

    if pp is None:
        pp = torch.tensor((W / 2, H / 2), device=pts3d_i.device).view(1, 2)  # Shape: (1, 2)

    # Flatten the confidence map using reshape instead of view
    conf_flat = conf_i.reshape(-1)

    # Compute the confidence threshold based on the percentile
    percentile = min_conf_thr_percentile / 100.0  # Convert to a fraction
    conf_threshold = torch.quantile(conf_flat, percentile)

    # Create the confidence mask based on the computed threshold
    conf_mask = conf_i >= conf_threshold
    conf_mask = conf_mask.view(B, H, W)  # Ensure shape is (B, H, W)

    # Check if there are enough valid points
    if conf_mask.sum() < 10:  # Adjust the minimum number as needed
        print("Not enough high-confidence points for focal estimation.")
        # Optionally, adjust the percentile or set conf_mask to all True
        # For example:
        # conf_mask = torch.ones_like(conf_mask, dtype=torch.bool)

    focal = estimate_focal_knowing_depth_and_confidence_mask(
        pts3d_i, pp.unsqueeze(0), conf_mask, focal_mode="weiszfeld"
    ).ravel()
    return float(focal)

def process_poses(pred_cameras, gt_cameras, RRA_thresholds, RTA_thresholds):

    def process_sample(sample_idx):
        pred_sample = pred_cameras[sample_idx]  # Shape (num_views, 4, 4)
        gt_sample = gt_cameras[sample_idx]      # Shape (num_views, 4, 4)

        # Compute relative rotation and translation errors
        rel_rangle_deg, rel_tangle_deg = camera_to_rel_deg(pred_sample, gt_sample, gt_sample.device, len(pred_sample))

        # Compute metrics for all tau thresholds
        results = {}
        for tau in RRA_thresholds:
            results[f"RRA_{tau}"] = (rel_rangle_deg < tau).float().mean().item()
        for tau in RTA_thresholds:
            results[f"RTA_{tau}"] = (rel_tangle_deg < tau).float().mean().item()

        # Compute mAA(30)
        results['mAA_30'] = calculate_auc(rel_rangle_deg.float(), rel_tangle_deg.float(), max_threshold=30).item()
        
        error_t, error_t_scale, error_R = compute_pose_error_batch(gt_sample, pred_sample)
        # results['e_t_nopo'] = error_t
        results['error_t_scale'] = error_t_scale.mean().item()
        thresholds = [30] #5, 10, 20, 30
        results_auc = pose_auc_wrap(error_t.float(), error_R.float(), thresholds)
        for t, auc in zip(thresholds, results_auc):
            results[f'AUC_{t}'] = auc

        # print(results)
        return results
    batch_size = pred_cameras.shape[0]
    with ThreadPoolExecutor() as executor:
        batch_results = list(executor.map(process_sample, range(batch_size)))
        
    return batch_results


def estimate_camera_poses(preds, views=None, niter_PnP=10, focal_length_estimation_method='individual', conf_keys=None, result_keys=None):
    """Estimate camera poses and focal lengths using fast_pnp in parallel."""
    if result_keys['global'] not in preds[0]:
        batch_size = len(preds[0][result_keys["local"]])  # Get the batch size
    else:
        batch_size = len(preds[0][result_keys["global"]])

    # Prepare data_for_processing
    data_for_processing = []

    for i in range(batch_size):
        # Collect preds for each sample in the batch
        sample_preds = [{key: value[i].cpu() for key, value in view.items()} for view in preds]

        data_for_processing.append(sample_preds)

    # Estimate the focal length
    def estimate_focal_for_sample(sample_preds):
        if focal_length_estimation_method == 'first_view_from_global_head':
            # Use global head outputs for focal length estimation
            pts3d_i = sample_preds[0][result_keys["global"]].unsqueeze(0)  # Shape: (1, H, W, 3)
            conf_i = sample_preds[0][conf_keys["global"]].unsqueeze(0)                  # Shape: (1, H, W)
        elif focal_length_estimation_method == 'first_view_from_local_head':
            # Use local head outputs for focal length estimation
            pts3d_i = sample_preds[0]["pts3d_in_self_view_aligned_to_global"].unsqueeze(0)  # Shape: (1, H, W, 3)
            conf_i = sample_preds[0][conf_keys["local"]].unsqueeze(0)                       # Shape: (1, H, W)
        elif focal_length_estimation_method == 'individual':
            # Focal length will be estimated individually per view
            return sample_preds
        else:
            raise ValueError(f"Unknown focal_length_estimation_method: {focal_length_estimation_method}")

        # Estimate focal length using the provided function and confidence mask
        estimated_focal = estimate_focal(pts3d_i, conf_i, min_conf_thr_percentile=10)

        # Store the estimated focal length in sample_preds
        for view_pred in sample_preds:
            view_pred["focal_length"] = estimated_focal
            # view_pred["focal_length"] = 256.64

        return sample_preds

    with ThreadPoolExecutor() as executor:
        data_for_processing = list(executor.map(estimate_focal_for_sample, data_for_processing))

    # Estimate the camera poses
    # Use ProcessPoolExecutor to parallelize processing across samples in the batch
    poses_c2w_all = []
    estimated_focals_all = []

    # Use partial to fix arguments
    from functools import partial

    estimate_cam_pose_one_sample_partial = partial(estimate_cam_pose_one_sample, niter_PnP=niter_PnP, min_conf_thr_percentile=85, conf_keys=conf_keys, result_keys=result_keys, focal_length_estimation_method=focal_length_estimation_method)

    with ThreadPoolExecutor() as executor:
        results = list(executor.map(estimate_cam_pose_one_sample_partial, data_for_processing))

    # Collect results from all processed samples
    for poses_c2w_sample, estimated_focals_sample in results:
        poses_c2w_all.append(poses_c2w_sample)
        estimated_focals_all.append(estimated_focals_sample)

    return poses_c2w_all, estimated_focals_all

def correct_preds_orientation(preds, views, conf_keys=None, result_keys=None, focal_length_estimation_method='first_view_from_local_head'):
    # *In-place* correction of the orientation of the predicted points and confidence maps

    # correct the shape of the predicted points and confidence maps if the view is portrait
    # this is because the data loader transposed the input images and valid_masks to landscape
    # see datasets/base/base_stereo_view_dataset.py
    if focal_length_estimation_method == 'first_view_from_local_head':
        local_key = 'local'
    else:
        local_key = 'global'
    if views is not None:
        for pred, view in zip(preds, views):
            # debug: use GT point map to estimate poses
            # pred[result_keys["global"]] = view["pts3d"]  # shape (B, H, W, 3)
            # pred[conf_keys["global"]] = view['valid_mask'].float() if "valid_mask" in view else torch.ones_like(pred[conf_keys["global"]])  # shape (B, H, W)
            # pred["focal_length"] = view["camera_intrinsics"][:, 0, :2].sum(1)
            # end debug

            # check if the view is protrait or landscape (true_shape: (H, W))
            conf_list = []
            pts3d_list = []

            for i in range(view["true_shape"].shape[0]):
                H, W = view["true_shape"][i]
                if H > W:  # portrait
                    # Transpose the tensors
                    transposed_conf = pred[conf_keys[local_key]][i].transpose(0, 1)
                    transposed_pts3d = pred[result_keys[local_key]][i].transpose(0, 1)

                    # Append the transposed tensors to the lists
                    conf_list.append(transposed_conf)
                    pts3d_list.append(transposed_pts3d)
                else:
                    # Append the original tensors to the lists
                    conf_list.append(pred[conf_keys[local_key]][i])
                    pts3d_list.append(pred[result_keys[local_key]][i])

            pred[conf_keys[local_key]] = conf_list
            pred[result_keys[local_key]] = pts3d_list

            # if "pts3d_in_self_view" in pred:
            #     conf_self_list = []
            #     pts3d_in_self_view_list = []
            #     if "pts3d_in_self_view_aligned_to_global" in pred:
            #         pts3d_in_self_view_aligned_to_global_list = []

            #     for i in range(view["true_shape"].shape[0]):
            #         H, W = view["true_shape"][i]
            #         if H > W:
            #             # Transpose the tensors
            #             transposed_conf_self = pred[conf_keys["local"]][i].transpose(0, 1)
            #             transposed_pts3d_in_self_view = pred[result_keys["local"]][i].transpose(0, 1)
            #             if "pts3d_in_self_view_aligned_to_global" in pred:
            #                 transposed_pts3d_in_self_view_aligned_to_global = pred["pts3d_in_self_view_aligned_to_global"][i].transpose(0, 1)

            #             # Append the transposed tensors to the lists
            #             conf_self_list.append(transposed_conf_self)
            #             pts3d_in_self_view_list.append(transposed_pts3d_in_self_view)
            #             if "pts3d_in_self_view_aligned_to_global" in pred:
            #                 pts3d_in_self_view_aligned_to_global_list.append(transposed_pts3d_in_self_view_aligned_to_global)
            #         else:
            #             # Append the original tensors to the lists
            #             conf_self_list.append(pred[conf_keys["local"]][i])
            #             pts3d_in_self_view_list.append(pred[result_keys["local"]][i])
            #             if "pts3d_in_self_view_aligned_to_global" in pred:
            #                 pts3d_in_self_view_aligned_to_global_list.append(pred["pts3d_in_self_view_aligned_to_global"][i])

            #     pred[conf_keys["local"]] = conf_self_list
            #     pred[result_keys["local"]] = pts3d_in_self_view_list
            #     if "pts3d_in_self_view_aligned_to_global" in pred:
            #         pred["pts3d_in_self_view_aligned_to_global"] = pts3d_in_self_view_aligned_to_global_list


def process_recon(views, preds, 
                  min_conf_thr_percentile_for_local_alignment_and_icp, 
                  min_conf_thr_percentile_for_metric_cacluation, 
                  use_pts3d_from_local_head, conf_keys=None, result_keys=None):
    if use_pts3d_from_local_head:
        local_key = 'local'
    else:
        local_key = 'global'
    
    def process_sample(i):
        scene_name = "/".join(views[0]['label'][i].split('/')[:-1]) if "label" in views[0] else "unknown"
        pred_pts_list = []
        gt_pts_list_icp = []
        gt_pts_list_metrics = []
        colors_pred_list = []
        colors_gt_list = []
        conf_list = []
        weights_list = []

        for j, (view, pred) in enumerate(zip(views, preds)):
            # Extract predicted points and confidence
            pts_pred = pred['pts3d_in_self_view_aligned_to_global'][i] if use_pts3d_from_local_head else pred[result_keys["global"]][i]  # Shape: (H, W, 3)
            conf = pred[conf_keys["local"]][i] if use_pts3d_from_local_head else pred[conf_keys["global"]][i]  # Shape: (H, W)

            # Extract GT points
            pts_gt = view['pts3d'][i]  # Shape: (H, W, 3)
            valid_mask = view['valid_mask'][i]  # Shape: (H, W)

            # mask out low confidence points
            conf_flat = conf.view(-1)
            conf_threshold_value_metric_calc = torch.quantile(conf_flat, min_conf_thr_percentile_for_metric_cacluation / 100.0)  # Metrics should use all valid points
            conf_mask_metric_calc = conf >= conf_threshold_value_metric_calc

            # Create masks
            final_mask_pred = valid_mask & conf_mask_metric_calc         # Predicted points: valid and high-conf points
            final_mask_gt_icp = valid_mask & conf_mask_metric_calc       # GT points for ICP: all valid and high-conf points
            final_mask_gt_metrics = valid_mask                           # GT points for metrics: all valid points

            # Apply masks to predicted points and conf
            pts_pred_masked = pts_pred[final_mask_pred]      # High-confidence predicted points
            conf_masked = conf[final_mask_pred]              # Corresponding confidence values

            # Apply mask to GT points for ICP
            pts_gt_masked_icp = pts_gt[final_mask_gt_icp]    # GT points corresponding to high-confidence predicted points

            # Apply mask to GT points for metrics
            pts_gt_masked_metrics = pts_gt[final_mask_gt_metrics]  # All valid GT points in this view

            # Get image for colors
            img = view['img'][i]  # Shape: (3, H, W)
            img = img.permute(1, 2, 0)  # Shape: (H, W, 3)
            img = (img + 1.0) / 2.0  # Convert from [-1, 1] to [0, 1]
            colors_pred_masked = img[final_mask_pred]  # Colors at high-confidence predicted points
            colors_gt_masked = img[final_mask_gt_metrics]  # Colors at all valid GT points

            # Weights for ICP alignment (all ones since we've already filtered low-confidence points)
            # Compute the confidence threshold for this view
            conf_threshold_value_for_icp = torch.quantile(conf_flat, min_conf_thr_percentile_for_local_alignment_and_icp / 100.0)  # ICP should use high-confidence points

            weights_masked = conf_masked >= conf_threshold_value_for_icp  # Shape: (H, W)

            # Append to lists
            pred_pts_list.append(pts_pred_masked)              # shape: points above metric calc conf (N_pred)
            gt_pts_list_icp.append(pts_gt_masked_icp)          # shape: points above metric calc conf (N_pred)
            conf_list.append(conf_masked)                      # shape: points above metric calc conf (N_pred)
            colors_pred_list.append(colors_pred_masked)        # shape: points above metric calc conf (N_pred)
            colors_gt_list.append(colors_gt_masked)            # shape: all valid points (N_gt)
            weights_list.append(weights_masked)                # shape: points above metric calc conf (N_pred)
            gt_pts_list_metrics.append(pts_gt_masked_metrics)  # shape: all valid points (N_gt)

        # Concatenate points, colors, confidences, and weights
        if len(pred_pts_list) == 0 or len(gt_pts_list_metrics) == 0:
            # If no valid points, return default metrics
            print(f"Sample {i}: No valid points found.")
            return None, None, None, None, None, None, None, None

        pred_pts_all = torch.cat(pred_pts_list, dim=0)           # Shape: (N_pred, 3)
        gt_pts_all_icp = torch.cat(gt_pts_list_icp, dim=0)       # Shape: (N_pred, 3)
        gt_pts_all_metrics = torch.cat(gt_pts_list_metrics, dim=0)  # Shape: (N_gt, 3)
        colors_pred_all = torch.cat(colors_pred_list, dim=0)               # Shape: (N_pred, 3)
        colors_gt_all = torch.cat(colors_gt_list, dim=0)               # Shape: (N_gt, 3)
        conf_all = torch.cat(conf_list, dim=0)                   # Shape: (N_pred,)
        weights_all = torch.cat(weights_list, dim=0)             # Shape: (N_pred,)

        # Ensure that the data is on CPU for Open3D and numpy operations
        pred_pts_tensor = pred_pts_all.cpu()          # Shape: (N_pred, 3)
        gt_pts_tensor_icp = gt_pts_all_icp.cpu()      # Shape: (N_pred, 3)
        gt_pts_tensor_metrics = gt_pts_all_metrics.cpu()  # Shape: (N_gt, 3)
        colors_pred_tensor = colors_pred_all.cpu()              # Shape: (N_pred, 3)
        colors_gt_tensor = colors_gt_all.cpu()              # Shape: (N_gt, 3)
        conf_tensor = conf_all.cpu()                  # Shape: (N_pred,)
        weights = weights_all.cpu()                   # Shape: (N_pred,)

        # print(f"Sample {i}: Number of high-confidence predicted points: {pred_pts_tensor.shape[0]}")
        # print(f"Sample {i}: Number of GT points for ICP: {gt_pts_tensor_icp.shape[0]}")
        # print(f"Sample {i}: Number of GT points for metrics: {gt_pts_tensor_metrics.shape[0]}")

        # Align predicted points to GT using roma.rigid_points_registration with weights
        # start_time = time.time()

        # Input tensors for ICP alignment (must have the same shape)
        x = pred_pts_tensor          # High-confidence predicted points (N_pred, 3)
        y = gt_pts_tensor_icp        # Corresponding GT points (N_pred, 3)

        # Compute the rigid transformation with scaling and weights
        R, t, s = roma.rigid_points_registration(x, y, weights=weights, compute_scaling=True)

        # alignment_time = time.time() - start_time
        # print(f"Alignment time using roma with weights for sample {i}: {alignment_time:.4f} seconds")

        # Apply the transformation to the predicted points
        pred_aligned = s * (x @ R.T) + t  # Shape: (N_pred, 3)

        # Estimate normals
        # start_time = time.time()
        # Create point clouds in Open3D for normal estimation

        # Predicted point cloud (high-confidence points)
        pred_pcd = o3d.geometry.PointCloud()
        pred_pcd.points = o3d.utility.Vector3dVector(pred_aligned.numpy())
        pred_pcd.colors = o3d.utility.Vector3dVector(colors_pred_tensor.numpy())
        pred_pcd.estimate_normals()

        # Ground truth point cloud for metrics (all valid points)
        gt_pcd = o3d.geometry.PointCloud()
        gt_pcd.points = o3d.utility.Vector3dVector(gt_pts_tensor_metrics.numpy())
        gt_pcd.colors = o3d.utility.Vector3dVector(colors_gt_tensor.numpy())
        gt_pcd.estimate_normals()
        # normals_time = time.time() - start_time
        # print(f"Normal estimation time for sample {i}: {normals_time:.4f} seconds")

        # Get normals
        pred_normals = np.asarray(pred_pcd.normals)
        gt_normals = np.asarray(gt_pcd.normals)

        # Convert point clouds to numpy arrays for evaluation
        pred_points_np = np.asarray(pred_pcd.points)
        gt_points_np = np.asarray(gt_pcd.points)

        # Save the GT and predicted point clouds (separately) for visualization
        # Define file paths
        # gt_pcd_path = f"gt_pcd_sample_{i}.ply"
        # pred_pcd_path = f"pred_pcd_sample_{i}.ply"
        # # Save the GT point cloud
        # o3d.io.write_point_cloud(gt_pcd_path, gt_pcd)
        # # Save the predicted point cloud
        # o3d.io.write_point_cloud(pred_pcd_path, pred_pcd)

        # Compute metrics
        # start_time = time.time()
        acc, acc_med, nc1, nc1_med = accuracy(
            gt_points_np, pred_points_np, gt_normals, pred_normals, device=views[0]['pts3d'].device
        )
        comp, comp_med, nc2, nc2_med = completion(
            gt_points_np, pred_points_np, gt_normals, pred_normals, device=views[0]['pts3d'].device
        )
        absolute_pts_error, relative_pts_error = calculate_corresponding_points_error(gt_points_np, pred_points_np)
        # metrics_time = time.time() - start_time
        # print(f"Metrics computation time for sample {i}: {metrics_time:.4f} seconds. scene_name: {scene_name}")
        # print(f"Accuracy: {acc:.4f}, Accuracy median: {acc_med:.4f}. scene_name: {scene_name}")
        # print(f"Completion: {comp:.4f}, Completion median: {comp_med:.4f}. scene_name: {scene_name}")
        # print(f"Normal consistency 1: {nc1:.4f}, Normal consistency 1 median: {nc1_med:.4f}. scene_name: {scene_name}")
        # print(f"Normal consistency 2: {nc2:.4f}, Normal consistency 2 median: {nc2_med:.4f}. scene_name: {scene_name}")

        # Collect metrics for the scene and return as a dictionary
        # with_median = True
        # if with_median:
        #     return (scene_name, {
        #         "accuracy_median": acc_med,
        #         "completion_median": comp_med,
        #         "nc1_median": nc1_med,
        #         "nc2_median": nc2_med,
        #     })
        del gt_points_np, pred_points_np, gt_normals, pred_normals, pred_aligned
        del pred_pts_all, gt_pts_all_icp, gt_pts_all_metrics, colors_pred_all, colors_gt_all, conf_all, weights_all 
        del x, y, gt_pts_tensor_metrics, colors_gt_tensor, weights
        gc.collect()
        return (scene_name, {
            "accuracy": acc, "accuracy_median": acc_med,
            "completion": comp, "completion_median": comp_med,
            "nc1": nc1, "nc1_median": nc1_med,
            "nc2": nc2, "nc2_median": nc2_med, 
            "pts_err": absolute_pts_error,
            "pts_rel": relative_pts_error,
        })

    batch_size = len(views[0]['img'])  # Assuming batch_size is consistent
    # Use ThreadPoolExecutor to parallelize across samples and gather results
    with ThreadPoolExecutor() as executor:
        results = [future.result() for future in [executor.submit(process_sample, i) for i in range(batch_size)]]

    return results

def process_recon_vggt(views, preds, 
                  min_conf_thr_percentile_for_local_alignment_and_icp, 
                  min_conf_thr_percentile_for_metric_cacluation, 
                  use_pts3d_from_local_head, conf_keys=None, result_keys=None):
    if use_pts3d_from_local_head:
        local_key = 'local'
    else:
        local_key = 'global'
    
    def process_sample(i):
        scene_name = "/".join(views[0]['label'][i].split('/')[:-1]) if "label" in views[0] else "unknown"
        pred_pts_list = []
        gt_pts_list_icp = []
        gt_pts_list_metrics = []
        colors_pred_list = []
        colors_gt_list = []
        conf_list = []
        weights_list = []

       
        for j, view in enumerate(views):
            # Extract predicted points and confidence
            pts_pred = preds['world_points'][i, j] # Shape: (H, W, 3)
            conf = preds["world_points_conf"][i, j] # Shape: (H, W)

            # Extract GT points
            pts_gt = view['pts3d'][i]  # Shape: (H, W, 3)
            valid_mask = view['valid_mask'][i]  # Shape: (H, W)

            # mask out low confidence points
            conf_flat = conf.view(-1)
            conf_threshold_value_metric_calc = torch.quantile(conf_flat, min_conf_thr_percentile_for_metric_cacluation / 100.0)  # Metrics should use all valid points
            conf_mask_metric_calc = conf >= conf_threshold_value_metric_calc

            # Create masks
            final_mask_pred = valid_mask & conf_mask_metric_calc         # Predicted points: valid and high-conf points
            final_mask_gt_icp = valid_mask & conf_mask_metric_calc       # GT points for ICP: all valid and high-conf points
            final_mask_gt_metrics = valid_mask                           # GT points for metrics: all valid points

            # Apply masks to predicted points and conf
            pts_pred_masked = pts_pred[final_mask_pred]      # High-confidence predicted points
            conf_masked = conf[final_mask_pred]              # Corresponding confidence values

            # Apply mask to GT points for ICP
            pts_gt_masked_icp = pts_gt[final_mask_gt_icp]    # GT points corresponding to high-confidence predicted points

            # Apply mask to GT points for metrics
            pts_gt_masked_metrics = pts_gt[final_mask_gt_metrics]  # All valid GT points in this view

            # Get image for colors
            img = view['img'][i]  # Shape: (3, H, W)
            img = img.permute(1, 2, 0)  # Shape: (H, W, 3)
            img = (img + 1.0) / 2.0  # Convert from [-1, 1] to [0, 1]
            colors_pred_masked = img[final_mask_pred]  # Colors at high-confidence predicted points
            colors_gt_masked = img[final_mask_gt_metrics]  # Colors at all valid GT points

            # Weights for ICP alignment (all ones since we've already filtered low-confidence points)
            # Compute the confidence threshold for this view
            conf_threshold_value_for_icp = torch.quantile(conf_flat, min_conf_thr_percentile_for_local_alignment_and_icp / 100.0)  # ICP should use high-confidence points

            weights_masked = conf_masked >= conf_threshold_value_for_icp  # Shape: (H, W)

            # Append to lists
            pred_pts_list.append(pts_pred_masked)              # shape: points above metric calc conf (N_pred)
            gt_pts_list_icp.append(pts_gt_masked_icp)          # shape: points above metric calc conf (N_pred)
            conf_list.append(conf_masked)                      # shape: points above metric calc conf (N_pred)
            colors_pred_list.append(colors_pred_masked)        # shape: points above metric calc conf (N_pred)
            colors_gt_list.append(colors_gt_masked)            # shape: all valid points (N_gt)
            weights_list.append(weights_masked)                # shape: points above metric calc conf (N_pred)
            gt_pts_list_metrics.append(pts_gt_masked_metrics)  # shape: all valid points (N_gt)

        # Concatenate points, colors, confidences, and weights
        if len(pred_pts_list) == 0 or len(gt_pts_list_metrics) == 0:
            # If no valid points, return default metrics
            print(f"Sample {i}: No valid points found.")
            return None, None, None, None, None, None, None, None

        pred_pts_all = torch.cat(pred_pts_list, dim=0)           # Shape: (N_pred, 3)
        gt_pts_all_icp = torch.cat(gt_pts_list_icp, dim=0)       # Shape: (N_pred, 3)
        gt_pts_all_metrics = torch.cat(gt_pts_list_metrics, dim=0)  # Shape: (N_gt, 3)
        colors_pred_all = torch.cat(colors_pred_list, dim=0)               # Shape: (N_pred, 3)
        colors_gt_all = torch.cat(colors_gt_list, dim=0)               # Shape: (N_gt, 3)
        conf_all = torch.cat(conf_list, dim=0)                   # Shape: (N_pred,)
        weights_all = torch.cat(weights_list, dim=0)             # Shape: (N_pred,)

        # Ensure that the data is on CPU for Open3D and numpy operations
        pred_pts_tensor = pred_pts_all.cpu()          # Shape: (N_pred, 3)
        gt_pts_tensor_icp = gt_pts_all_icp.cpu()      # Shape: (N_pred, 3)
        gt_pts_tensor_metrics = gt_pts_all_metrics.cpu()  # Shape: (N_gt, 3)
        colors_pred_tensor = colors_pred_all.cpu()              # Shape: (N_pred, 3)
        colors_gt_tensor = colors_gt_all.cpu()              # Shape: (N_gt, 3)
        conf_tensor = conf_all.cpu()                  # Shape: (N_pred,)
        weights = weights_all.cpu()                   # Shape: (N_pred,)

        # print(f"Sample {i}: Number of high-confidence predicted points: {pred_pts_tensor.shape[0]}")
        # print(f"Sample {i}: Number of GT points for ICP: {gt_pts_tensor_icp.shape[0]}")
        # print(f"Sample {i}: Number of GT points for metrics: {gt_pts_tensor_metrics.shape[0]}")

        # Align predicted points to GT using roma.rigid_points_registration with weights
        # start_time = time.time()

        # Input tensors for ICP alignment (must have the same shape)
        x = pred_pts_tensor          # High-confidence predicted points (N_pred, 3)
        y = gt_pts_tensor_icp        # Corresponding GT points (N_pred, 3)

        # Compute the rigid transformation with scaling and weights
        R, t, s = roma.rigid_points_registration(x, y, weights=weights, compute_scaling=True)

        # alignment_time = time.time() - start_time
        # print(f"Alignment time using roma with weights for sample {i}: {alignment_time:.4f} seconds")

        # Apply the transformation to the predicted points
        pred_aligned = s * (x @ R.T) + t  # Shape: (N_pred, 3)

        # Estimate normals
        # start_time = time.time()
        # Create point clouds in Open3D for normal estimation

        # Predicted point cloud (high-confidence points)
        pred_pcd = o3d.geometry.PointCloud()
        pred_pcd.points = o3d.utility.Vector3dVector(pred_aligned.numpy())
        pred_pcd.colors = o3d.utility.Vector3dVector(colors_pred_tensor.numpy())
        pred_pcd.estimate_normals()

        # Ground truth point cloud for metrics (all valid points)
        gt_pcd = o3d.geometry.PointCloud()
        gt_pcd.points = o3d.utility.Vector3dVector(gt_pts_tensor_metrics.numpy())
        gt_pcd.colors = o3d.utility.Vector3dVector(colors_gt_tensor.numpy())
        gt_pcd.estimate_normals()
        # normals_time = time.time() - start_time
        # print(f"Normal estimation time for sample {i}: {normals_time:.4f} seconds")

        # Get normals
        pred_normals = np.asarray(pred_pcd.normals)
        gt_normals = np.asarray(gt_pcd.normals)

        # Convert point clouds to numpy arrays for evaluation
        pred_points_np = np.asarray(pred_pcd.points)
        gt_points_np = np.asarray(gt_pcd.points)

        # Save the GT and predicted point clouds (separately) for visualization
        # Define file paths
        # gt_pcd_path = f"gt_pcd_sample_{i}.ply"
        # pred_pcd_path = f"pred_pcd_sample_{i}.ply"
        # # Save the GT point cloud
        # o3d.io.write_point_cloud(gt_pcd_path, gt_pcd)
        # # Save the predicted point cloud
        # o3d.io.write_point_cloud(pred_pcd_path, pred_pcd)

        # Compute metrics
        # start_time = time.time()
        acc, acc_med, nc1, nc1_med = accuracy(
            gt_points_np, pred_points_np, gt_normals, pred_normals, device=views[0]['pts3d'].device
        )
        comp, comp_med, nc2, nc2_med = completion(
            gt_points_np, pred_points_np, gt_normals, pred_normals, device=views[0]['pts3d'].device
        )
        # metrics_time = time.time() - start_time
        # print(f"Metrics computation time for sample {i}: {metrics_time:.4f} seconds. scene_name: {scene_name}")
        # print(f"Accuracy: {acc:.4f}, Accuracy median: {acc_med:.4f}. scene_name: {scene_name}")
        # print(f"Completion: {comp:.4f}, Completion median: {comp_med:.4f}. scene_name: {scene_name}")
        # print(f"Normal consistency 1: {nc1:.4f}, Normal consistency 1 median: {nc1_med:.4f}. scene_name: {scene_name}")
        # print(f"Normal consistency 2: {nc2:.4f}, Normal consistency 2 median: {nc2_med:.4f}. scene_name: {scene_name}")

        # Collect metrics for the scene and return as a dictionary
        # with_median = True
        # if with_median:
        #     return (scene_name, {
        #         "accuracy_median": acc_med,
        #         "completion_median": comp_med,
        #         "nc1_median": nc1_med,
        #         "nc2_median": nc2_med,
        #     })
        return (scene_name, {
            "accuracy": acc, "accuracy_median": acc_med,
            "completion": comp, "completion_median": comp_med,
            "nc1": nc1, "nc1_median": nc1_med,
            "nc2": nc2, "nc2_median": nc2_med,
        })

    batch_size = len(views[0]['img'])  # Assuming batch_size is consistent
    # Use ThreadPoolExecutor to parallelize across samples and gather results
    with ThreadPoolExecutor() as executor:
        results = [future.result() for future in [executor.submit(process_sample, i) for i in range(batch_size)]]

    return results