#!/usr/bin/env python3
"""
VGGT point cloud generation utilities for the data processing pipeline.

This module provides a function to generate point clouds using VGGT:
- Mode 2: Unproject VGGT depth predictions to point clouds with RGB
- Mode 3: Use VGGT point head to directly generate point clouds
"""
import traceback
import numpy as np
import sys
import logging
from typing import Dict, Any, Tuple
import cv2
from PIL import Image
import tempfile
from pathlib import Path
import os

# Add the path to the VGGT interface module (now local to the package)
from vggt.vggtinterface import VGGTmodel

from unproject import unproject_depth_to_3d_with_colors

# Set up logging
logger = logging.getLogger(__name__)

def save_vggt_depth(save_path: str, depth_array: np.ndarray, colormap: str = 'jet'):
    """
    Save VGGT depth data as a colored image.
    Copied and adapted from save_file.py save_depth function.
    
    Args:
        save_path: Path to save the depth image
        depth_array: Depth data array of shape [H, W] with float values
        colormap: Colormap to apply to depth data ('jet', 'viridis', 'plasma', 'hot', etc.)
    """
    # Ensure directory exists
    directory = Path(save_path).parent
    directory.mkdir(parents=True, exist_ok=True)
    
    # Handle invalid depth values (set them to 0)
    depth_clean = np.copy(depth_array)
    depth_clean[np.isnan(depth_clean)] = 0
    depth_clean[np.isinf(depth_clean)] = 0
    
    # Normalize depth values to 0-255 range for visualization
    if depth_clean.max() > depth_clean.min():
        depth_normalized = ((depth_clean - depth_clean.min()) / 
                          (depth_clean.max() - depth_clean.min()) * 255).astype(np.uint8)
    else:
        depth_normalized = np.zeros_like(depth_clean, dtype=np.uint8)
    
    # Apply colormap
    if colormap == 'jet':
        colored_depth = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_JET)
    elif colormap == 'viridis':
        colored_depth = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_VIRIDIS)
    elif colormap == 'plasma':
        colored_depth = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_PLASMA)
    elif colormap == 'hot':
        colored_depth = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_HOT)
    else:
        colored_depth = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_JET)
    
    # Convert BGR to RGB for PIL
    colored_depth_rgb = cv2.cvtColor(colored_depth, cv2.COLOR_BGR2RGB)
    
    # Save as image
    img = Image.fromarray(colored_depth_rgb)
    img.save(save_path)
    logger.info(f"Saved VGGT depth visualization to: {save_path}")


def unproject_vggt_depth_to_3d_with_colors(depth_image: np.ndarray, color_image: np.ndarray, 
                                          intrinsic_matrix: np.ndarray, extrinsic_matrix: np.ndarray, 
                                          depth_scale: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
    """
    Unproject VGGT depth image to 3D points with proper coordinate system conversion.
    This function matches the coordinate transformation used in segPcd.py for consistency.
    
    Args:
        depth_image: Depth image, shape (H, W) - depth values from VGGT
        color_image: Color image, shape (H, W, 3) - RGB colors
        intrinsic_matrix: Camera intrinsic matrix from VGGT, shape (3, 3)
        extrinsic_matrix: Camera extrinsic matrix from VGGT, shape (3, 4) or (4, 4)
        depth_scale: Scale factor for depth values (default 1.0 for VGGT depth in meters)
        
    Returns:
        tuple: (points_3d, colors)
               points_3d: 3D points in world coordinates, shape (N, 3)
               colors: Corresponding colors, shape (N, 3)
    """
    depth_height, depth_width = depth_image.shape
    color_height, color_width = color_image.shape[:2]
    
    # DEBUG: Add depth image statistics
    depth_min, depth_max = np.min(depth_image), np.max(depth_image)
    depth_mean = np.mean(depth_image)
    depth_nonzero_count = np.count_nonzero(depth_image)
    logger.debug(f"VGGT Depth stats: min={depth_min:.4f}, max={depth_max:.4f}, mean={depth_mean:.4f}, nonzero_pixels={depth_nonzero_count}/{depth_height*depth_width}")
    logger.debug(f"Depth image shape: {depth_image.shape}, Color image shape: {color_image.shape}")
    
    # Handle resolution mismatch between depth and color images
    if (depth_height, depth_width) != (color_height, color_width):
        logger.debug(f"Resolution mismatch - Depth: {depth_height}x{depth_width}, Color: {color_height}x{color_width}")
        # Resize color image to match depth image resolution
        color_image = cv2.resize(color_image, (depth_width, depth_height), interpolation=cv2.INTER_LINEAR)
        logger.debug(f"Resized color image to match depth: {color_image.shape}")
    
    # Create pixel coordinate grids based on depth image dimensions
    u, v = np.meshgrid(np.arange(depth_width), np.arange(depth_height))
    u = u.flatten()
    v = v.flatten()
    depth_values = depth_image.flatten() * depth_scale
    
    # Filter out invalid depth values (zero, negative, or unreasonably large)
    # VGGT depth should be reasonable for indoor/robotic scenarios
    valid_mask = (depth_values > 0.01) & (depth_values < 10.0)  # 1cm to 10m range
    u_valid = u[valid_mask]
    v_valid = v[valid_mask]
    depth_valid = depth_values[valid_mask]
    
    logger.debug(f"Valid VGGT depth pixels after filtering: {len(depth_valid)}")
    
    if len(depth_valid) == 0:
        logger.debug("No valid VGGT depth values found - returning empty arrays")
        return np.empty((0, 3), dtype=np.float32), np.empty((0, 3), dtype=np.uint8)
    
    # Get corresponding colors for valid pixels
    colors = color_image[v_valid, u_valid]  # Shape: (N, 3)
    
    # Convert to homogeneous pixel coordinates
    pixel_coords = np.stack([u_valid, v_valid, np.ones_like(u_valid)], axis=0)  # Shape: (3, N)
    
    # Unproject to camera coordinates
    # K^-1 * [u, v, 1]^T * depth = [X, Y, Z]^T in camera frame
    inv_intrinsic = np.linalg.inv(intrinsic_matrix)
    camera_coords_normalized = inv_intrinsic @ pixel_coords  # Shape: (3, N)
    camera_coords = camera_coords_normalized * depth_valid[np.newaxis, :]  # Shape: (3, N)
    
    # Convert to homogeneous coordinates for world transformation
    camera_coords_homogeneous = np.vstack([camera_coords, np.ones((1, camera_coords.shape[1]))])  # Shape: (4, N)
    
    # Ensure extrinsic matrix is 4x4
    if extrinsic_matrix.shape == (3, 4):
        # Convert [R|t] to 4x4 homogeneous transformation matrix
        extrinsic_4x4 = np.eye(4)
        extrinsic_4x4[:3, :] = extrinsic_matrix
    else:
        extrinsic_4x4 = extrinsic_matrix
    
    # CRITICAL: Apply coordinate system conversion for OpenGL to OpenCV compatibility
    # This matches the transformation used in segPcd.py and ensures consistent coordinate system
    coord_convert = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
    
    # Create the full transformation matrix with coordinate conversion
    transform_4x4 = extrinsic_4x4.copy()
    transform_4x4[:3, :3] = extrinsic_4x4[:3, :3] @ coord_convert
    
    # Transform to world coordinates using the camera-to-world matrix with coordinate conversion
    world_coords = transform_4x4 @ camera_coords_homogeneous  # Shape: (4, N)
    
    # Return 3D points (drop homogeneous coordinate) and corresponding colors
    return world_coords[:3, :].T, colors  # Shape: (N, 3), (N, 3)


def vggtPcd(unifiedData: Dict[str, Any], mode: int = 2) -> Dict[str, Any]:
    """
    Generate point cloud using VGGT with specified mode.
    
    Args:
        unifiedData: Dictionary containing unified frame data
        mode: VGGT mode (2 for depth unprojection, 3 for direct point head)
        
    Returns:
        Updated unifiedData with 'vggtpointcloud2' or 'vggtpointcloud3' field containing point cloud
    """
    if mode not in [2, 3]:
        logger.error(f"Invalid VGGT mode: {mode}. Must be 2 or 3.")
        return unifiedData
    
    field_name = f'vggtpointcloud{mode}'
    logger.info(f"Starting VGGT Mode {mode}: {'Depth unprojection to PCD' if mode == 2 else 'Direct point head prediction'}")
    
    # Check if camera data is available
    if 'cameras' not in unifiedData or not unifiedData['cameras']:
        logger.warning(f"No camera data available for VGGT Mode {mode}")
        unifiedData[field_name] = np.zeros((1, 6), dtype=np.float32)
        return unifiedData
    
    cameras_data = unifiedData['cameras']
    
    # Get list of available cameras with RGB data (VGGT only needs RGB images)
    available_cameras = []
    for camera_name, camera_data in cameras_data.items():
        if camera_name in ["midHead_camera"] and camera_data.get('rgb') is not None:
            available_cameras.append(camera_name)
    
    
    # Take all available cameras for VGGT
    selected_cameras = available_cameras
    logger.info(f"Using cameras for VGGT: {selected_cameras}")
    
    try:
        # Initialize VGGT model
        vggt_model = VGGTmodel()
        
        # Prepare image paths (save temporary images)
        temp_dir = Path(tempfile.mkdtemp())
        image_paths = []
        
        for i, camera_name in enumerate(selected_cameras):
            rgb_image = cameras_data[camera_name]['rgb']
            # Convert from RGB to BGR for OpenCV
            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            image_path = temp_dir / f"temp_image_{i}.png"
            cv2.imwrite(str(image_path), bgr_image)
            image_paths.append(str(image_path))
        
        # Run VGGT inference
        logger.info("Running VGGT inference...")
        vggt_model.infer(image_paths)
        
        if mode == 2:
            # Mode 2: Depth unprojection using VGGT's predicted camera parameters
            # Predict camera parameters and depth using VGGT
            extrinsic, intrinsic = vggt_model.predict_camera()
            depth_map, depth_conf = vggt_model.predict_depth()
            
            logger.info(f"VGGT depth prediction shape: {depth_map.shape}")
            logger.info(f"VGGT intrinsic shape: {intrinsic.shape}, extrinsic shape: {extrinsic.shape}")
            
            # Collect 3D points from all cameras using VGGT depth and VGGT camera parameters
            all_points_3d = []
            all_colors = []
            
            for i, camera_name in enumerate(selected_cameras):
                rgb_image = cameras_data[camera_name]['rgb']
                
                # Get VGGT predicted depth and camera parameters for this image
                vggt_depth = depth_map[0, i, :, :, 0].detach().cpu().numpy()  # Shape: (H, W)
                vggt_intrinsic = intrinsic[0, i].detach().cpu().numpy()  # Shape: (3, 3)  
                vggt_extrinsic = extrinsic[0, i].detach().cpu().numpy()  # Shape: (3, 4)
                
                # Save VGGT predicted depth as colored image
                save_vggt_depth(f"vggt_depth_{camera_name}_{i}.png", vggt_depth)
                
                # Validate VGGT depth values and adjust scale if needed
                vggt_depth_min, vggt_depth_max = np.min(vggt_depth), np.max(vggt_depth)
                vggt_depth_mean = np.mean(vggt_depth[vggt_depth > 0]) if np.any(vggt_depth > 0) else 0
                
                logger.info(f"Camera {camera_name} - Raw VGGT depth range: {vggt_depth_min:.3f} to {vggt_depth_max:.3f}, mean: {vggt_depth_mean:.3f}")
                
                # Auto-detect and adjust depth scale based on reasonable expectations
                # If depth values are very small (< 0.1), they might be in different units
                if vggt_depth_max < 0.1 and vggt_depth_max > 0:
                    depth_scale = 100.0  # Scale up if values are too small
                    logger.warning(f"VGGT depth values seem too small, applying scale factor: {depth_scale}")
                elif vggt_depth_max > 1000:
                    depth_scale = 0.001  # Scale down if values are too large (maybe in mm)
                    logger.warning(f"VGGT depth values seem too large, applying scale factor: {depth_scale}")
                else:
                    depth_scale = 1.0  # Assume VGGT outputs depth in meters
                
                # Validate VGGT predicted intrinsic matrix
                logger.debug(f"Camera {camera_name} - VGGT predicted intrinsic matrix:\n{vggt_intrinsic}")
                logger.debug(f"Camera {camera_name} - VGGT predicted extrinsic matrix:\n{vggt_extrinsic}")
                
                # Ensure VGGT intrinsic matrix looks reasonable (focal lengths should be positive)
                if vggt_intrinsic[0, 0] <= 0 or vggt_intrinsic[1, 1] <= 0:
                    logger.error(f"Invalid VGGT predicted intrinsic matrix for camera {camera_name}, skipping")
                    continue
                
                # Unproject using VGGT depth and VGGT predicted camera parameters with proper coordinate system conversion
                points_3d, colors = unproject_vggt_depth_to_3d_with_colors(
                    depth_image=vggt_depth,
                    color_image=rgb_image,
                    intrinsic_matrix=vggt_intrinsic,
                    extrinsic_matrix=vggt_extrinsic,
                    depth_scale=depth_scale
                )
                
                if len(points_3d) > 0:
                    all_points_3d.append(points_3d)
                    all_colors.append(colors)
                    logger.info(f"Generated {len(points_3d)} valid points from camera {camera_name}")
                else:
                    logger.warning(f"No valid points generated from camera {camera_name}")
        
        else:  # mode == 3
            # Mode 3: Direct point head prediction
            # Use point head to directly generate 3D points
            point_map, point_conf = vggt_model.predict_points()
            
            logger.info(f"VGGT point prediction shape: {point_map.shape}")
            
            # Extract points and colors from VGGT point predictions
            all_points_3d = []
            all_colors = []
            
            for i, camera_name in enumerate(selected_cameras):
                rgb_image = cameras_data[camera_name]['rgb']
                
                # Get predicted points for this image
                vggt_points = point_map[0, i].detach().cpu().numpy()  # Shape: (H, W, 3)
                vggt_conf = point_conf[0, i].detach().cpu().numpy()   # Shape: (H, W)
                
                H, W = vggt_points.shape[:2]
                
                # Reshape to get point coordinates
                points_3d = vggt_points.reshape(-1, 3)  # Shape: (H*W, 3)
                confidence = vggt_conf.reshape(-1)       # Shape: (H*W,)
                
                # Get corresponding colors from RGB image
                rgb_resized = cv2.resize(rgb_image, (W, H)) if rgb_image.shape[:2] != (H, W) else rgb_image
                colors = rgb_resized.reshape(-1, 3)     # Shape: (H*W, 3)
                
                # Filter by confidence (keep points with high confidence)
                conf_threshold = 0.5  # Adjust as needed
                valid_mask = confidence > conf_threshold
                
                if valid_mask.sum() > 0:
                    valid_points = points_3d[valid_mask]
                    valid_colors = colors[valid_mask]
                    
                    # Additional filtering: remove points that are too close to origin or have invalid coordinates
                    valid_coords_mask = (
                        np.isfinite(valid_points).all(axis=1) & 
                        (np.linalg.norm(valid_points, axis=1) > 0.01)  # Remove points too close to origin
                    )
                    
                    if valid_coords_mask.sum() > 0:
                        final_points = valid_points[valid_coords_mask]
                        final_colors = valid_colors[valid_coords_mask]
                        
                        all_points_3d.append(final_points)
                        all_colors.append(final_colors)
                        logger.info(f"Generated {len(final_points)} valid points from camera {camera_name}")
                    else:
                        logger.warning(f"No valid coordinate points from camera {camera_name}")
                else:
                    logger.warning(f"No high-confidence points from camera {camera_name}")
        
        # Clean up temporary files
        for image_path in image_paths:
            Path(image_path).unlink()
        temp_dir.rmdir()
        
        if not all_points_3d:
            logger.warning("No valid points generated from any camera")
            unifiedData[field_name] = np.zeros((1, 6), dtype=np.float32)
            return unifiedData
        
        # Combine all points and colors
        combined_points_3d = np.vstack(all_points_3d)
        combined_colors = np.vstack(all_colors)
        
        # Final validation: ensure combined point cloud is reasonable
        combined_distances = np.linalg.norm(combined_points_3d, axis=1)
        logger.info(f"Combined point cloud distance range: {combined_distances.min():.3f} to {combined_distances.max():.3f} meters")
        
        # Create point cloud
        pointcloud = np.hstack([combined_points_3d, combined_colors]).astype(np.float32)
        
        unifiedData[field_name] = pointcloud
        logger.info(f"VGGT Mode {mode}: Created point cloud with {len(pointcloud)} points")
        
    except Exception as e:
        traceback.print_exc()
        logger.error(f"Error in VGGT Mode {mode}: {e}")
        unifiedData[field_name] = np.zeros((1, 6), dtype=np.float32)
    
    return unifiedData 