#!/usr/bin/env python3
"""
Reusable depth unprojection utilities for point cloud generation.

This module provides shared functions for unprojecting depth images to 3D points
with associated colors (either RGB or segmentation colors).
"""
import numpy as np
import cv2
import logging
from typing import Dict, Any, Optional, List, Tuple
from pathlib import Path
from scalePcd import scalePcd
from time_it import timeit
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Target cameras for point cloud generation
TARGET_CAMERAS = ['midLeft_camera', 'midBack_camera', 'midFront_camera', 'midHead_camera', 'midRight_camera']


def sample_cam_data(unifiedData: dict[str, np.ndarray], camera_name: str, depth_scale: float = 0.001):
    """
    Print depth camera values in a sparse 25x25 grid with one decimal place.
    
    Args:
        depth_image: Depth image array, shape (H, W)
        camera_name: Name of the camera for display
        depth_scale: Scale factor for depth values (default 0.001 for mm to meters)
    """
    depth_image = unifiedData['cameras'][camera_name]['depth']
    if depth_image is None or depth_image.size == 0:
        print(f"\n{camera_name} - No depth data available")
        return
    height, width = depth_image.shape
    step_y = max(1, height // 25)
    step_x = max(1, width // 25)
    print(f"\n{camera_name} Depth Values (25x25 sparse sampling, meters):")
    print("=" * 100)
    for i in range(0, min(25, height), 1):
        y = min(i * step_y, height - 1)
        row_values = []
        for j in range(0, min(25, width), 1):
            x = min(j * step_x, width - 1)
            depth_val = depth_image[y, x] * depth_scale
            if depth_val == 0:
                formatted_val = " 0.0"
            elif depth_val >= 100:
                formatted_val = "99.9"  # Cap at 99.9 for display
            else:
                formatted_val = f"{depth_val:4.1f}"
            row_values.append(formatted_val)
        print(" ".join(row_values))
    print("=" * 100)
    print(f"Grid size: {height}x{width} -> 25x25 (step_y={step_y}, step_x={step_x})")
    print(f"Min: {np.min(depth_image) * depth_scale:.1f}m, Max: {np.max(depth_image) * depth_scale:.1f}m, Mean: {np.mean(depth_image) * depth_scale:.1f}m\n")


@timeit()
def unproject_depth_to_3d_with_colors(depth_image: np.ndarray, 
                                      color_image: np.ndarray, 
                                      intrinsic_matrix: np.ndarray, 
                                      extrinsic_matrix: np.ndarray, 
                                      depth_scale: float = 0.001,
                                      mask: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
    """
    Unproject depth image to 3D points in world coordinates with corresponding colors.
    
    Args:
        depth_image: Depth image, shape (H, W) - depth values in millimeters
        color_image: Color image, shape (H, W, 3) - RGB or segmentation colors
        intrinsic_matrix: Camera intrinsic matrix, shape (3, 3)
        extrinsic_matrix: Camera extrinsic matrix, shape (3, 4) or (4, 4)
        depth_scale: Scale factor for depth values (default 0.001 for mm to meters)
        mask: Optional mask to filter depth values (default None)
    Returns:
        tuple: (points_3d, colors)
               points_3d: 3D points in world coordinates, shape (N, 3)
               colors: Corresponding colors, shape (N, 3)
    """
    depth_height, depth_width = depth_image.shape
    color_height, color_width = color_image.shape[:2]
    depth_min, depth_max = np.min(depth_image), np.max(depth_image)
    depth_mean = np.mean(depth_image)
    depth_nonzero_count = np.count_nonzero(depth_image)
    logger.debug(f"Depth image stats: min={depth_min:.4f}, max={depth_max:.4f}, mean={depth_mean:.4f}, nonzero_pixels={depth_nonzero_count}/{depth_height*depth_width}")
    logger.debug(f"Depth image shape: {depth_image.shape}, Color image shape: {color_image.shape}")
    if (depth_height, depth_width) != (color_height, color_width):
        logger.debug(f"Resolution mismatch - Depth: {depth_height}x{depth_width}, Color: {color_height}x{color_width}")
        depth_image = cv2.resize(depth_image, (color_width, color_height), interpolation=cv2.INTER_NEAREST)
        depth_height, depth_width = depth_image.shape
        logger.debug(f"Resized depth image to: {depth_height}x{depth_width}")
    u, v = np.meshgrid(np.arange(depth_width), np.arange(depth_height))
    u = u.flatten()
    v = v.flatten()
    depth_values = depth_image.flatten() * depth_scale
    depth_scaled_min, depth_scaled_max = np.min(depth_values), np.max(depth_values)
    depth_scaled_mean = np.mean(depth_values)
    depth_scaled_nonzero_count = np.count_nonzero(depth_values)
    logger.debug(f"Depth values after scaling: min={depth_scaled_min:.4f}, max={depth_scaled_max:.4f}, mean={depth_scaled_mean:.4f}, nonzero_count={depth_scaled_nonzero_count}")
    valid_mask = depth_values > 0
    if mask is not None:
        if mask.shape != (depth_height, depth_width):
            print("Mask shape mismatch with depth image – ignoring mask for this frame")
        else:
            mask_flat = mask.flatten()
            valid_mask = valid_mask & mask_flat
    u_valid = u[valid_mask]
    v_valid = v[valid_mask]
    depth_valid = depth_values[valid_mask]
    coord_valid_mask = (u_valid >= 0) & (u_valid < color_width) & (v_valid >= 0) & (v_valid < color_height)
    u_valid = u_valid[coord_valid_mask]
    v_valid = v_valid[coord_valid_mask]
    depth_valid = depth_valid[coord_valid_mask]
    logger.debug(f"Valid depth pixels after filtering and bounds checking: {len(depth_valid)}")
    if len(depth_valid) == 0:
        logger.debug("No valid depth values found - returning empty arrays")
        return np.empty((0, 3), dtype=np.float32), np.empty((0, 3), dtype=np.uint8)
    colors = color_image[v_valid, u_valid]  # Shape: (N, 3)
    pixel_coords = np.stack([u_valid, v_valid, np.ones_like(u_valid)], axis=0)  # Shape: (3, N)
    # K^-1 * [u, v, 1]^T * depth = [X, Y, Z]^T in camera frame
    inv_intrinsic = np.linalg.inv(intrinsic_matrix)
    camera_coords_normalized = inv_intrinsic @ pixel_coords  # Shape: (3, N)
    camera_coords = camera_coords_normalized * depth_valid[np.newaxis, :]  # Shape: (3, N)
    camera_coords_homogeneous = np.vstack([camera_coords, np.ones((1, camera_coords.shape[1]))])  # Shape: (4, N)
    if extrinsic_matrix.shape == (3, 4):
        extrinsic_4x4 = np.eye(4)
        extrinsic_4x4[:3, :] = extrinsic_matrix
    else:
        extrinsic_4x4 = extrinsic_matrix
    coord_convert = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
    transform_4x4 = extrinsic_4x4.copy()
    transform_4x4[:3, :3] = extrinsic_4x4[:3, :3] @ coord_convert
    world_coords = transform_4x4 @ camera_coords_homogeneous  # Shape: (4, N)
    return world_coords[:3, :].T, colors  # Shape: (N, 3), (N, 3)

@timeit()
def create_pointcloud_from_depth_and_colors(unifiedData: dict[str, np.ndarray], 
                                           color_key: str,
                                           output_key: str,
                                           depth_scale: float = 0.001, 
                                           min_depth: float = 0.1,
                                           mask_pixels: Optional[dict[str, np.ndarray]] = None,
                                           surpress_print: bool = False) -> dict[str, np.ndarray]:
    if 'cameras' not in unifiedData or not unifiedData['cameras']:
        print("No camera data available for depth unprojection")
        unifiedData[output_key] = np.zeros((1, 6), dtype=np.float32)
        return unifiedData
    # filter_black_pixels = output_key == 'segpointcloud'
    # filter_grey_pixels = output_key == 'repointcloud'
    filter_black_pixels = False
    filter_grey_pixels = False
    cameras_data = unifiedData['cameras']
    inf_depth_vals =[]
    for camera_name in TARGET_CAMERAS:
        if camera_name in cameras_data and cameras_data[camera_name]['depth'] is not None:
            depth = cameras_data[camera_name]['depth']
            inf_depth_vals.append(depth[0, -2])  # Top right corner
        else:
            print(f"{camera_name} camera depth not available")
        
    valid_cameras = []
    for camera_name in TARGET_CAMERAS:
        if camera_name in cameras_data:
            camera_data = cameras_data[camera_name]
            has_transform = (camera_data['cam2world_gl'] is not None or 
                           camera_data['extrinsic_cv'] is not None)
            
            if (camera_data['depth'] is not None and 
                camera_data[color_key] is not None and 
                camera_data['intrinsic_cv'] is not None and 
                has_transform):
                valid_cameras.append(camera_name)
    
    if not valid_cameras:
        print(f"No valid cameras found with depth and {color_key} data from {TARGET_CAMERAS}")
        unifiedData[output_key] = np.zeros((1, 6), dtype=np.float32)
        return unifiedData
    if not surpress_print:
        if filter_black_pixels:
            print(f"    🎨 Creating {output_key} from {len(valid_cameras)} cameras: {valid_cameras}")
        else:
            print(f"    🎨 Creating {output_key} from {len(valid_cameras)} cameras: {valid_cameras}")
    all_points_3d = []
    all_colors = []
    if not surpress_print:
        print("      📷",end="")
    for camera_name in valid_cameras :  
        if camera_name not in cameras_data.keys():
            continue
        camera_data = cameras_data[camera_name]
        depth_image = camera_data['depth']  # Shape: (H, W)
        cam_mask = None
        if mask_pixels is not None and camera_name in mask_pixels:
            cam_mask = mask_pixels[camera_name]
        color_image = camera_data[color_key]  # Shape: (H, W, 3)
        intrinsic_matrix = camera_data['intrinsic_cv']  # Shape: (3, 3)
        
        # Prefer cam2world_gl over extrinsic_cv
        if camera_data['cam2world_gl'] is not None:
            transform_matrix = camera_data['cam2world_gl']  # Shape: (4, 4)
            transform_type = "cam2world_gl"
        else:
            transform_matrix = camera_data['extrinsic_cv']  # Shape: (3, 4) or (4, 4)
            transform_type = "extrinsic_cv"
        depth_filtered = depth_image.copy()
        original_nonzero = np.count_nonzero(depth_image)
        original_min, original_max = np.min(depth_image), np.max(depth_image)
        depth_meters = depth_filtered * depth_scale
        if not surpress_print:
            print(f"Camera {camera_name} - Original depth: min={original_min*depth_scale:.4f}, max={original_max*depth_scale:.4f}, nonzero={original_nonzero}")
        valid_mask = (depth_meters > min_depth) & (depth_meters > 0)
        if not surpress_print:
            print(f"original valid_mask: {np.count_nonzero(valid_mask)} camera mask: {np.count_nonzero(cam_mask)}")
        if cam_mask is not None:
            if cam_mask.shape == depth_filtered.shape:
                valid_mask = valid_mask & cam_mask
            else:
                print(f"Mask shape mismatch for camera {camera_name}: {cam_mask.shape} vs {depth_filtered.shape}")
        depth_after = np.count_nonzero(valid_mask)
        if not surpress_print:
            print(f" {camera_name}(DepthRange): {original_nonzero} → {depth_after} ", end="")
        if filter_black_pixels and camera_name in ["midRight_camera","midLeft_camera","midBack_camera"]:
            black_threshold = 0.001 
            is_black_pixel = np.all(color_image <= black_threshold, axis=2) 
            black_before = np.count_nonzero(valid_mask) 
            valid_mask = valid_mask & (~is_black_pixel)
            black_after = np.count_nonzero(valid_mask)  
            if not surpress_print:
                print(f" {camera_name}(Black): {black_before} → {black_after} ", end="")
        if filter_grey_pixels and camera_name in ["midRight_camera","midLeft_camera","midBack_camera"]:
            grey_before = np.count_nonzero(valid_mask)
            for inf_depth_val in inf_depth_vals:
                is_invalid_depth = np.abs(depth_image - inf_depth_val) < 1e-8
                valid_mask = valid_mask & (~is_invalid_depth)
            grey_after = np.count_nonzero(valid_mask)
            if not surpress_print:
                print(f" {camera_name}(Grey): {grey_before} → {grey_after} ", end="")
        
        mask_before = np.count_nonzero(valid_mask)  
        depth_filtered[~valid_mask] = 0
        mask_after = np.count_nonzero(depth_filtered) 
        if not surpress_print:
            print(f"mask_before: {mask_before},mask_after: {mask_after}")
        if mask_after > 0:
            filtered_min, filtered_max = np.min(depth_filtered[depth_filtered > 0]), np.max(depth_filtered[depth_filtered > 0])
            filtered_min_m, filtered_max_m = filtered_min * depth_scale, filtered_max * depth_scale
            if not surpress_print:
                print(f"Camera {camera_name} - After filtering: min={filtered_min:.4f}({filtered_min_m:.3f}m), max={filtered_max:.4f}({filtered_max_m:.3f}m), kept={mask_after}")
        else:
            print(f"Camera {camera_name} - No valid depth values after filtering")
            continue
        points_3d, colors = unproject_depth_to_3d_with_colors(depth_filtered, color_image, intrinsic_matrix, transform_matrix, depth_scale, cam_mask)
        if len(points_3d) == 0:
            print(f"No valid 3D points generated from camera {camera_name}")
            continue
        all_points_3d.append(points_3d)
        all_colors.append(colors)
    if not surpress_print:
        print()
    if not all_points_3d:
        print("No valid points generated from any camera")
        unifiedData[output_key] = np.zeros((1, 6), dtype=np.float32)
        return unifiedData
    
    combined_points_3d = np.vstack(all_points_3d)
    combined_colors = np.vstack(all_colors)
    pointcloud = np.hstack([combined_points_3d, combined_colors]).astype(np.float32)
    pointcloud = scalePcd(pointcloud, tag="create_pointcloud_from_depth_and_colors")
    unifiedData[output_key] = pointcloud
    return unifiedData 