#!/usr/bin/env python3
"""
Farthest Point Sampling (FPS) for Point Cloud Data using PyTorch3D

This script performs Farthest Point Sampling on point cloud data to downsample
while maintaining good spatial distribution using PyTorch3D's optimized implementation.

Usage:
    This module provides fpsPcd function for applying FPS to single frame point cloud data
    within the unified data format.
"""
import numpy as np
import torch
import logging
from pytorch3d.ops import sample_farthest_points
import warnings
import random
warnings.filterwarnings('ignore')
from time_it import timeit
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

@timeit()   
def fpsPcd(unifiedData: dict[str, np.ndarray], num_points: int, use_random_sampling: bool = True) -> dict[str, np.ndarray]:
    """
    Apply Farthest Point Sampling or Random Sampling to all point cloud data in unified data format.
    
    Args:
        unifiedData: dict containing single frame data with keys that may include:
                    ["rgb":np.ndarray,"depth":np.ndarray,"pointcloud":np.ndarray,
                     "segpointcloud":np.ndarray,"repointcloud":np.ndarray,
                     "vggtpointcloud2":np.ndarray,"vggtpointcloud3":np.ndarray,
                     "observer":np.ndarray,"endpose":np.ndarray,"qpos":np.ndarray]
                    where each pointcloud is shape (Np, 6) for single frame
        num_points: Number of points to sample
        use_random_sampling: If True, use random sampling instead of FPS. 
                           Random sampling is much faster but doesn't guarantee 
                           good spatial distribution.
        
    Returns:
        Dictionary containing original data with sampled point clouds
    """
    # print(f"num_points: {num_points}")
    np.random.seed(num_points)
    random.seed(num_points)
    torch.manual_seed(num_points)
    pointcloud_keys = [key for key in unifiedData.keys() if "pointcloud" in key]
    
    if not pointcloud_keys:
        logger.warning("No pointcloud keys found in unifiedData")
        return unifiedData
    input_shapes: dict[str, tuple] = {}
    output_shapes: dict[str, tuple] = {}
    indices_cache={}
    for pcd_key in pointcloud_keys:
        valid_points = unifiedData[pcd_key]
        if valid_points is None:
            continue
        input_shapes[pcd_key] = valid_points.shape
        if len(valid_points.shape) != 2 or valid_points.shape[1] != 6:
            logger.warning(f"Expected point cloud shape (Np, 6) for {pcd_key}, got {valid_points.shape}. Skipping.")
            continue
        actual_num_points = min(num_points, len(valid_points))
        if actual_num_points < num_points:
            logger.debug(f"Requested {num_points} points for {pcd_key} but only {len(valid_points)} available. Using {actual_num_points}.")
        if use_random_sampling:
            if str(len(valid_points)) not in indices_cache:
                indices_cache[str(len(valid_points))] = np.random.choice(len(valid_points), size=actual_num_points, replace=False)
            sampled_indices = indices_cache[str(len(valid_points))]
            print(f"sampled_indices: {sampled_indices.sum()}, indices_cache: ",[f"key:{key}:{val.sum()}" for key,val in indices_cache.items()])
            sampled_points = valid_points[sampled_indices]
            logger.debug(f"Applied random sampling to {pcd_key}")
        else:
            points_tensor = torch.from_numpy(valid_points[:, :3]).float().unsqueeze(0)  # Shape: (1, Np, 3)
            if str(len(valid_points)) not in indices_cache:
                sampled_points_tensor, sampled_indices_tensor = sample_farthest_points(
                    points_tensor, 
                    K=actual_num_points,
                    random_start_point=False
                )
                indices_cache[str(len(valid_points))] =sampled_indices_tensor
            sampled_indices = indices_cache[str(len(valid_points))].squeeze(0).numpy()  # Shape: (K,)
            sampled_points = valid_points[sampled_indices]
            logger.debug(f"Applied FPS to {pcd_key}")
        
        unifiedData[pcd_key] = sampled_points.astype(np.float32)
        output_shapes[pcd_key] = unifiedData[pcd_key].shape
        logger.debug(f"Processed {pcd_key}: {len(valid_points)} -> {len(sampled_points)} points")
    
    if input_shapes:
        processed_shapes_in = input_shapes
        processed_shapes_out = output_shapes
        if processed_shapes_in:
            icon = "🎲 RAND" if use_random_sampling else "🎯 FPS"
            if len(processed_shapes_in) == 1:
                in_shape = list(processed_shapes_in.values())[0]
                out_shape = list(processed_shapes_out.values())[0]
                print(f"    {icon}({num_points}) | {in_shape} → {out_shape}")
            else:
                print(f"    {icon}({num_points}) | {processed_shapes_in} → {processed_shapes_out}")
    return unifiedData

