import os
import os.path as osp
import sys
import logging
import numpy as np
import cv2
import torch
from tqdm import tqdm

# --- 1. Define Success Criteria (Thresholds) ---
MAE_THRESHOLD = 0.01                # meters (1 cm)
RMSE_THRESHOLD = 0.02               # meters (2 cm)
INLIER_METRIC_ERROR_THRESH = 0.05   # meters (5 cm)
INLIER_METRIC_PERCENT_THRESH = 99.0 # % of pixels
### NEW: Color verification threshold ###
COLOR_MAE_THRESHOLD = 0.05          # MAE on color values normalized to [0, 1]

# --- 2. Helper Functions & Data Loading ---
def imread_cv2(path, flags=cv2.IMREAD_COLOR):
    return cv2.imread(path, flags)

def depthmap_to_camera_coordinates(depthmap, camera_intrinsics):
    """Converts a depth map to a 3D point cloud in camera coordinates."""
    camera_intrinsics = np.float32(camera_intrinsics)
    H, W = depthmap.shape
    fu, fv = camera_intrinsics[0, 0], camera_intrinsics[1, 1]
    cu, cv = camera_intrinsics[0, 2], camera_intrinsics[1, 2]

    u, v = np.meshgrid(np.arange(W), np.arange(H))
    z_cam = depthmap
    x_cam = (u - cu) * z_cam / fu
    y_cam = (v - cv) * z_cam / fv
    X_cam = np.stack((x_cam, y_cam, z_cam), axis=-1).astype(np.float32)

    valid_mask = depthmap > 0.0
    return X_cam, valid_mask

def depthmap_to_absolute_camera_coordinates(depthmap, camera_intrinsics, camera_pose):
    """Converts a depth map to a 3D point cloud in world coordinates."""
    X_cam, valid_mask = depthmap_to_camera_coordinates(depthmap, camera_intrinsics)
    R_cam2world = camera_pose[:3, :3]
    t_cam2world = camera_pose[:3, 3]
    X_world = np.einsum("ik, vuk -> vui", R_cam2world, X_cam) + t_cam2world[None, None, :]
    return X_world, valid_mask, X_cam

# --- 3. GPU-Accelerated Projection and Verification ---

### NEW: Renderer that handles both Color and Depth ###
def render_color_and_depth(points_and_attrs, camera_pose, intrinsics, image_size, device, has_color=False):
    """
    Renders depth and color using the painter's algorithm (sorting).
    `points_and_attrs` is a numpy array (N, 3) for depth or (N, 6) for depth+color.
    """
    H, W = image_size
    
    if points_and_attrs is None or points_and_attrs.size == 0:
        return np.zeros((H, W), dtype=np.float32), \
               np.zeros((H, W, 3), dtype=np.float32) if has_color else None

    world_to_cam = torch.inverse(torch.tensor(camera_pose, dtype=torch.float32, device=device))
    points_and_attrs_tensor = torch.tensor(points_and_attrs, dtype=torch.float32, device=device)
    
    points = points_and_attrs_tensor[:, :3]
    points_h = torch.cat([points, torch.ones(points.shape[0], 1, device=device)], dim=1)
    points_cam = (world_to_cam @ points_h.T).T[:, :3]
    
    intrinsics_tensor = torch.tensor(intrinsics, dtype=torch.float32, device=device)
    points_proj = (intrinsics_tensor @ points_cam.T).T
    
    # Z-divide to get pixel coordinates
    d = points_proj[:, 2]
    u = points_proj[:, 0] / d
    v = points_proj[:, 1] / d
    
    # Create mask for valid projections
    valid_mask = (u >= 0) & (u < W) & (v >= 0) & (v < H) & (d > 0)
    
    # Filter to valid points
    u_valid, v_valid, d_valid = u[valid_mask].long(), v[valid_mask].long(), d[valid_mask]

    rendered_depth = torch.zeros((H, W), dtype=torch.float32, device=device)
    rendered_color = torch.zeros((H, W, 3), dtype=torch.float32, device=device) if has_color else None
    
    if u_valid.numel() > 0:
        # Sort by depth (descending) so closest points are rendered last
        depth_sorted_indices = torch.argsort(d_valid, descending=True)
        u_sorted = u_valid[depth_sorted_indices]
        v_sorted = v_valid[depth_sorted_indices]
        d_sorted = d_valid[depth_sorted_indices]
        
        # Flatten pixel indices for scatter
        pixel_indices = v_sorted * W + u_sorted
        
        # Scatter depth values
        rendered_depth.view(-1).scatter_(0, pixel_indices, d_sorted)
        
        if has_color:
            # Scatter color values
            colors = points_and_attrs_tensor[:, 3:]
            colors_sorted = colors[valid_mask][depth_sorted_indices]
            rendered_color.view(-1, 3).scatter_(0, pixel_indices.unsqueeze(1).expand(-1, 3), colors_sorted)
            
    rendered_depth_np = rendered_depth.cpu().numpy()
    rendered_color_np = rendered_color.cpu().numpy() if has_color else None
    
    return rendered_depth_np, rendered_color_np

### MODIFIED: Voxel downsampling now supports arbitrary point attributes (like color) ###
def voxel_downsample_pytorch(points_tensor, voxel_size=0.01):
    """
    Performs voxel downsampling on a point cloud tensor with attributes.
    `points_tensor` is a (N, D) tensor where D>=3.
    """
    if points_tensor.numel() == 0:
        return points_tensor

    # Use only XYZ for voxel grid calculation
    xyz = points_tensor[:, :3]
    voxel_indices = torch.floor(xyz / voxel_size).long()
    
    hasher = torch.tensor([1, 41, 16777619], device=points_tensor.device, dtype=torch.long)
    voxel_hash = torch.sum(voxel_indices * hasher, dim=1)

    unique_hashes, inverse_indices = torch.unique(voxel_hash, return_inverse=True)
    num_unique_voxels = unique_hashes.shape[0]
    num_attrs = points_tensor.shape[1]

    summed_points = torch.zeros((num_unique_voxels, num_attrs), dtype=points_tensor.dtype, device=points_tensor.device)
    point_counts = torch.zeros(num_unique_voxels, dtype=torch.int64, device=points_tensor.device)

    summed_points.scatter_add_(0, inverse_indices.unsqueeze(1).expand(-1, num_attrs), points_tensor)
    point_counts.scatter_add_(0, inverse_indices, torch.ones_like(inverse_indices, dtype=torch.int64))
    
    downsampled_points = summed_points / point_counts.unsqueeze(1).float()
    return downsampled_points

# Assign the active functions
downsample_f = voxel_downsample_pytorch

def calculate_metrics(rendered_depth, original_depth, valid_mask):
    """Calculates MAE, RMSE, and Inlier Percentage for depth maps."""
    mask = (original_depth > 0) & (rendered_depth > 0) & valid_mask
    if np.sum(mask) == 0:
        return float('inf'), float('inf'), 0.0
    diff = np.abs(rendered_depth[mask] - original_depth[mask])
    mae = np.mean(diff)
    rmse = np.sqrt(np.mean(diff ** 2))
    inliers = np.sum(diff < INLIER_METRIC_ERROR_THRESH)
    inlier_percent = (inliers / np.sum(mask)) * 100.0
    return mae, rmse, inlier_percent

### NEW: Metric calculation function for color images ###
def calculate_color_metrics(rendered_color, original_color, valid_mask):
    """Calculates Mean Absolute Error (MAE) for color images."""
    if rendered_color is None or original_color is None or np.sum(valid_mask) == 0:
        return float('inf')
    
    # Ensure images are float and mask is boolean
    rendered_color = rendered_color.astype(np.float32)
    original_color = original_color.astype(np.float32)
    valid_mask = valid_mask.astype(bool)

    # Calculate absolute difference on valid pixels
    diff = np.abs(rendered_color[valid_mask] - original_color[valid_mask])
    
    # Compute MAE across all valid pixels and channels
    mae = np.mean(diff)
    return mae

def setup_logging(log_file='verification.log'):
    """Configures logging to write to a file and the console."""
    logger = logging.getLogger('VerificationLogger')
    logger.setLevel(logging.INFO)
    if logger.hasHandlers():
        logger.handlers.clear()
    file_handler = logging.FileHandler(log_file, mode='w')
    file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(file_formatter)
    logger.addHandler(file_handler)
    console_handler = logging.StreamHandler()
    console_formatter = logging.Formatter('%(message)s')
    console_handler.setFormatter(console_formatter)
    logger.addHandler(console_handler)
    return logger

### MODIFIED: This check is now depth-only, as color is handled in the global checks ###
def verify_depth_and_world_positions(subscene_path, all_views_data, logger):
    """
    Verifies the consistency between depth_meters.hdf5 and position.hdf5
    for each view, using the provided camera pose.
    """
    logger.info(f"\nVerifying subscene (Per-View Depth vs. World Position): {subscene_path}")
    is_consistent_wp = True
    failed_views_wp = []

    if not all_views_data:
        return True, "SKIPPED_WP"

    for view_data in tqdm(all_views_data, file=sys.stdout):
        if "world_positions" not in view_data or view_data["world_positions"] is None:
            continue

        original_depth = view_data["depth"]
        world_points_frame = view_data["world_positions"]
        pose = view_data["pose"]
        H, W = original_depth.shape
        
        valid_world_points_mask = np.any(world_points_frame != 0, axis=-1)
        valid_world_points_flat = world_points_frame[valid_world_points_mask]
        
        if valid_world_points_flat.size == 0:
            continue
            
        world_to_cam = np.linalg.inv(pose)
        points_h = np.hstack([valid_world_points_flat, np.ones((len(valid_world_points_flat), 1))])
        points_cam = (world_to_cam @ points_h.T).T[:, :3]
        
        # The Z-component of points_cam is the planar depth
        projected_z_depth_flat = points_cam[:, 2]

        rendered_depth_from_world_pos = np.zeros_like(original_depth, dtype=np.float32)
        rows_valid, cols_valid = np.where(valid_world_points_mask)
        
        # Only consider points that are in front of the camera
        positive_depth_mask = projected_z_depth_flat > 0
        rows_valid = rows_valid[positive_depth_mask]
        cols_valid = cols_valid[positive_depth_mask]
        projected_z_depth_flat = projected_z_depth_flat[positive_depth_mask]
        
        rendered_depth_from_world_pos[rows_valid, cols_valid] = projected_z_depth_flat

        comparison_mask = (original_depth > 0) & (rendered_depth_from_world_pos > 0)
        mae_wp, rmse_wp, inlier_percent_wp = calculate_metrics(
            rendered_depth_from_world_pos, original_depth, comparison_mask
        )

        pass_mae_wp = mae_wp <= MAE_THRESHOLD
        pass_rmse_wp = rmse_wp <= RMSE_THRESHOLD
        pass_inliers_wp = inlier_percent_wp >= INLIER_METRIC_PERCENT_THRESH

        if not (pass_mae_wp and pass_rmse_wp and pass_inliers_wp):
            is_consistent_wp = False
            failed_views_wp.append({
                "path": osp.basename(view_data["path"]),
                "mae": mae_wp, "rmse": rmse_wp, "inliers": inlier_percent_wp,
                "pass_mae": pass_mae_wp, "pass_rmse": pass_rmse_wp, "pass_inliers": pass_inliers_wp,
                "type": "depth_vs_world_pos"
            })
            
    return is_consistent_wp, failed_views_wp