import os
import os.path as osp
import sys
import glob
import logging
import argparse # Import argparse
import numpy as np

import pandas as pd
import h5py
from scipy.spatial.transform import Slerp, Rotation

os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"

import cv2
import torch
from tqdm import tqdm

# --- 1. Define Success Criteria (Thresholds) ---
MAE_THRESHOLD = 0.01          # meters (1 cm)
RMSE_THRESHOLD = 0.02         # meters (2 cm)
INLIER_METRIC_ERROR_THRESH = 0.05 # meters (5 cm)
INLIER_METRIC_PERCENT_THRESH = 99.0 # % of pixels
from verify_utils import imread_cv2, setup_logging, verify_subscene, verify_depth_and_world_positions, depthmap_to_absolute_camera_coordinates

# It's efficient to cache metadata once per scene/camera
SCENE_METADATA_CACHE = {}

# ======================================================================================
# --- 2. REVISED HYPERSIM DATA LOADING (Based on preprocess_hypersim.py analysis) ---
# ======================================================================================

SCENE_METADATA_CACHE = {}

def opengl_to_intrinsics(proj_matrix, width_pixels, height_pixels):
    """Converts an OpenGL projection matrix to a 3x3 intrinsic matrix."""
    K00 = proj_matrix[0, 0] * width_pixels / 2.0
    K01 = -proj_matrix[0, 1] * width_pixels / 2.0
    K02 = (1.0 - proj_matrix[0, 2]) * width_pixels / 2.0
    K11 = proj_matrix[1, 1] * height_pixels / 2.0
    K12 = (1.0 + proj_matrix[1, 2]) * height_pixels / 2.0
    return np.array([[K00, K01, K02], [0.0, K11, K12], [0.0, 0.0, 1.0]])

def get_scene_metadata(root_dir, scene_name, logger):
    """Loads and caches scene-level metadata from original Hypersim files."""
    scene_dir = osp.join(root_dir, scene_name)
    if scene_dir in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[scene_dir]

    # --- Load global camera parameters for this scene ---
    all_metafile = osp.join(root_dir, "metadata_camera_parameters.csv")
    try:
        df_camera_parameters = pd.read_csv(all_metafile, index_col="scene_name")
        df_ = df_camera_parameters.loc[scene_name]
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load camera metadata for scene '{scene_name}'. Error: {e}")
        return None

    width = int(df_["settings_output_img_width"])
    height = int(df_["settings_output_img_height"])
    M_proj = np.array([
        [df_["M_proj_00"], df_["M_proj_01"], df_["M_proj_02"], df_["M_proj_03"]],
        [df_["M_proj_10"], df_["M_proj_11"], df_["M_proj_12"], df_["M_proj_13"]],
        [df_["M_proj_20"], df_["M_proj_21"], df_["M_proj_22"], df_["M_proj_23"]],
        [df_["M_proj_30"], df_["M_proj_31"], df_["M_proj_32"], df_["M_proj_33"]],
    ])
    intrinsics = opengl_to_intrinsics(M_proj, width, height).astype(np.float32)

    if intrinsics[0, 1] != 0:
        logger.warning(f"Skipping scene '{scene_name}' due to non-zero camera skew, which is unsupported.")
        SCENE_METADATA_CACHE[scene_dir] = None
        return None

    # --- Load scene-specific world scale (CORRECTED based on your CSV format) ---
    metadata_scene_path = osp.join(scene_dir, '_detail', 'metadata_scene.csv')
    try:
        # Set 'parameter_name' as the index
        df_scene = pd.read_csv(metadata_scene_path, index_col='parameter_name')
        # Get the value from the 'parameter_value' column where the index is 'meters_per_asset_unit'
        asset_to_meter_scale = df_scene.loc['meters_per_asset_unit', 'parameter_value']
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load or parse world scale from '{metadata_scene_path}'. Check for key 'meters_per_asset_unit' and column 'parameter_value'. Error: {e}")
        return None

    metadata = {
        # This is the direct scale factor. No inversion needed.
        "scale": float(asset_to_meter_scale),
        "intrinsics": intrinsics,
        "width": width,
        "height": height,
        "focal": (intrinsics[0, 0] + intrinsics[1, 1]) / 2.0
    }
    SCENE_METADATA_CACHE[scene_dir] = metadata
    return metadata

def get_camera_trajectory(scene_dir, cam_id):
    """Loads and caches the raw keyframe trajectory for a specific camera."""
    cam_key = (scene_dir, cam_id)
    if cam_key in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[cam_key]

    cam_detail_path = osp.join(scene_dir, '_detail', f'cam_{cam_id}')
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_positions.hdf5'), 'r') as f:
        positions = f['dataset'][:]
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_orientations.hdf5'), 'r') as f:
        orientations = f['dataset'][:]
    
    trajectory = {"positions": positions, "orientations": orientations}
    SCENE_METADATA_CACHE[cam_key] = trajectory
    return trajectory

def load_original_hypersim_view_data(frame_depth_path, scene_meta, trajectory_data, logger):
    """
    Loads and transforms all necessary data for a single frame,
    including Euclidean depth, camera pose, intrinsics, and world-space positions.
    """
    try:
        frame_id = int(osp.basename(frame_depth_path).split('.')[1])

        # Determine the base path for other frame-specific files
        frame_base_path = frame_depth_path.replace('.depth_meters.hdf5', '')

        # --- 1. Load Euclidean distance map ---
        with h5py.File(frame_depth_path, 'r') as f:
            distance = f['dataset'][:].astype(np.float32)
            distance[~np.isfinite(distance)] = 0

        # --- 2. Load World Positions (ADDED for verification) ---
        world_pos_path = f"{frame_base_path}.position.hdf5"
        world_positions_meters = None
        try:
            with h5py.File(world_pos_path, 'r') as f:
                # world_positions are already (H, W, 3) in asset units
                world_positions_asset = f['dataset'][:].astype(np.float32)
            # Apply scene scale to world positions to convert to meters
            world_positions_meters = world_positions_asset * scene_meta['scale']
        except FileNotFoundError:
            logger.warning(f"'{world_pos_path}' not found. Skipping world position loading for this view.")
        except Exception as e:
            logger.error(f"Error loading '{world_pos_path}': {e}. Skipping world position loading for this view.", exc_info=True)


        # --- 3. Calculate Pose (with correct transforms) ---
        R_cam2world_raw = trajectory_data["orientations"][frame_id]
        t_cam2world_asset = trajectory_data["positions"][frame_id]

        # Apply coordinate system transform to match the preprocessor
        R_cam2world = R_cam2world_raw @ np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])

        # Apply scale factor to convert position to meters
        t_cam2world = t_cam2world_asset * scene_meta['scale']

        pose = np.eye(4, dtype=np.float32)
        pose[:3, :3] = R_cam2world
        pose[:3, 3] = t_cam2world

        # --- 4. Convert Euclidean distance to Planar Z-depth ---
        H, W = scene_meta['height'], scene_meta['width']
        focal = scene_meta['focal']

        ImageplaneX = np.linspace((-0.5 * W) + 0.5, (0.5 * W) - 0.5, W).reshape(1, W).repeat(H, 0)[:, :, None]
        ImageplaneY = np.linspace((-0.5 * H) + 0.5, (0.5 * H) - 0.5, H).reshape(H, 1).repeat(W, 1)[:, :, None]
        ImageplaneZ = np.full([H, W, 1], focal, np.float32)
        Imageplane = np.concatenate([ImageplaneX, ImageplaneY, ImageplaneZ], axis=2)

        # This is the crucial conversion
        planar_depth = distance * focal / np.linalg.norm(Imageplane, axis=2)

        return {
            "depth": planar_depth,
            "intrinsics": scene_meta['intrinsics'],
            "pose": pose,
            "path": frame_depth_path,
            "world_positions": world_positions_meters, # Include world_positions
            "image_size": (H, W) # Add image size for convenience
        }

    except Exception as e:
        logger.error(f"Could not process view for {frame_depth_path}. Skipping. Details: {e}", exc_info=True)
        return None

from verify_utils import downsample_f, render_f, calculate_metrics
def verify_subscene_hypersim(subscene_path, device, logger, all_views_data_preloaded, ext='.depth_meters.hdf5', voxel_size=0.0):
    """
    Runs the full verification protocol on a single subscene directory,
    using pre-loaded view data.
    """
    logger.info(f"\nVerifying subscene (Global Consistency): {subscene_path}")
    if not all_views_data_preloaded:
        logger.warning("No view data available. Skipping global consistency check.")
        return True, "SKIPPED_GLOBAL" # Return status for this type of check

    all_world_points = []
    logger.info("Step 1/2 (Global): Building global point cloud from pre-loaded views...")
    for view_data in tqdm(all_views_data_preloaded, file=sys.stdout):
        # We assume valid_mask is already computed and stored in view_data by the caller
        world_points, valid_mask, _ = depthmap_to_absolute_camera_coordinates(
            view_data["depth"], view_data["intrinsics"], view_data["pose"] # Ensure pose is passed
        )
        # Re-assert valid_mask for safety, though it should be already correct
        view_data["valid_mask"] = valid_mask # Make sure this is updated for the upcoming loop if needed
        all_world_points.append(world_points[valid_mask])

    if not all_world_points:
        logger.error("No valid data found in subscene for global consistency check after pre-loading.")
        return False, "ERROR_GLOBAL"

    global_points = np.concatenate(all_world_points, axis=0)

    if voxel_size > 0:
        logger.info(f"Original {len(global_points)} points for global consistency check.")
        points_tensor_raw = torch.tensor(global_points, dtype=torch.float32, device=device)
        global_points = downsample_f(points_tensor_raw, voxel_size)

    logger.info(f"Global model constructed with {len(global_points)} points for global consistency check.")
    logger.info("Step 2/2 (Global): Projecting global model to each view for verification...")
    is_consistent_global = True
    failed_views_global = []

    for view_data in tqdm(all_views_data_preloaded, file=sys.stdout):
        image_size = view_data["depth"].shape
        rendered_depth = render_f(
            global_points, view_data["pose"], view_data["intrinsics"], image_size, device
        )
        original_depth = view_data["depth"]
        mask = view_data["valid_mask"] # Use the valid mask from pre-loaded data
        mae, rmse, inlier_percent = calculate_metrics(rendered_depth, original_depth, mask)
        pass_mae, pass_rmse, pass_inliers = mae <= MAE_THRESHOLD, rmse <= RMSE_THRESHOLD, inlier_percent >= INLIER_METRIC_PERCENT_THRESH
        if not (pass_mae and pass_rmse and pass_inliers):
            is_consistent_global = False
            failed_views_global.append({
                "path": osp.basename(view_data["path"]), "mae": mae, "rmse": rmse, "inliers": inlier_percent,
                "pass_mae": pass_mae, "pass_rmse": pass_rmse, "pass_inliers": pass_inliers
            })

    return is_consistent_global, failed_views_global
# NEW SEPARATE VALIDATION FUNCTION: Global consistency from World Positions
def verify_global_consistency_from_world_positions(subscene_path, device, logger, all_views_data, voxel_size=0.0):
    """
    Verifies global consistency by building a point cloud from all world_positions.hdf5 files,
    then projecting this global cloud to each view.
    """
    logger.info(f"\nVerifying subscene (Global Consistency from World Positions): {subscene_path}")
    is_consistent_wp_global = True
    failed_views_wp_global = []

    if not all_views_data:
        logger.warning("No view data available. Skipping global consistency from world positions check.")
        return True, "SKIPPED_WP_GLOBAL"

    all_world_positions_combined = []
    logger.info("Step 1/2 (Global from World Positions): Building global point cloud...")
    for view_data in tqdm(all_views_data, file=sys.stdout):
        if "world_positions" not in view_data or view_data["world_positions"] is None:
            logger.warning(f"Skipping view {osp.basename(view_data['path'])} due to missing world_positions.")
            continue

        world_points_frame = view_data["world_positions"] # (H, W, 3) in meters

        valid_mask_wp = np.any(view_data["world_positions"] != 0, axis=2) & (view_data["depth"] > 0.0)
    
        if np.sum(valid_mask_wp) > 0:
            all_world_positions_combined.append(world_points_frame[valid_mask_wp].reshape(-1, 3))
        else:
            logger.warning(f"No valid world positions found in {osp.basename(view_data['path'])}.")

    if not all_world_positions_combined:
        logger.error("No valid world position data found across all views to build global model.")
        return False, "ERROR_WP_GLOBAL"

    global_points_from_wp = np.concatenate(all_world_positions_combined, axis=0)

    if voxel_size > 0:
        logger.info(f"Original {len(global_points_from_wp)} points for global world positions check.")
        points_tensor_raw = torch.tensor(global_points_from_wp, dtype=torch.float32, device=device)
        global_points_from_wp = downsample_f(points_tensor_raw, voxel_size)

    logger.info(f"Global world position model constructed with {len(global_points_from_wp)} points.")
    logger.info("Step 2/2 (Global from World Positions): Projecting global model to each view for verification...")

    for view_data in tqdm(all_views_data, file=sys.stdout):
        image_size = view_data["image_size"]
        original_depth = view_data["depth"]
        mask_original = view_data["valid_mask"] # Mask from original depth

        rendered_depth = render_f(
            global_points_from_wp, view_data["pose"], view_data["intrinsics"], image_size, device
        )

        mae, rmse, inlier_percent = calculate_metrics(rendered_depth, original_depth, mask_original)
        pass_mae, pass_rmse, pass_inliers = mae <= MAE_THRESHOLD, rmse <= RMSE_THRESHOLD, inlier_percent >= INLIER_METRIC_PERCENT_THRESH

        if not (pass_mae and pass_rmse and pass_inliers):
            is_consistent_wp_global = False
            failed_views_wp_global.append({
                "path": osp.basename(view_data["path"]),
                "mae": mae, "rmse": rmse, "inliers": inlier_percent,
                "pass_mae": pass_mae, "pass_rmse": pass_rmse, "pass_inliers": pass_inliers,
                "type": "global_from_world_positions"
            })

    return is_consistent_wp_global, failed_views_wp_global
def verify_global_consistency_from_depth(subscene_path, device, logger, all_views_data_preloaded, voxel_size=0.0):
    """
    Runs global consistency check by building point cloud from depth + pose.
    """
    logger.info(f"\nVerifying subscene (Global Consistency from Depth): {subscene_path}")
    if not all_views_data_preloaded:
        logger.warning("No view data available. Skipping global consistency from depth check.")
        return True, "SKIPPED_GLOBAL_DEPTH"

    all_world_points = []
    logger.info("Step 1/2 (Global from Depth): Building global point cloud...")
    for view_data in tqdm(all_views_data_preloaded, file=sys.stdout):
        world_points, valid_mask, _ = depthmap_to_absolute_camera_coordinates(
            view_data["depth"], view_data["intrinsics"], view_data["pose"]
        )
        view_data["valid_mask"] = valid_mask
        all_world_points.append(world_points[valid_mask])

    if not all_world_points:
        logger.error("No valid data found in subscene for global consistency (from depth) check.")
        return False, "ERROR_GLOBAL_DEPTH"

    global_points = np.concatenate(all_world_points, axis=0)

    if voxel_size > 0:
        logger.info(f"Original {len(global_points)} points for global consistency check.")
        points_tensor_raw = torch.tensor(global_points, dtype=torch.float32, device=device)
        global_points = downsample_f(points_tensor_raw, voxel_size)

    logger.info(f"Global model (from depth) constructed with {len(global_points)} points.")
    logger.info("Step 2/2 (Global from Depth): Projecting global model to each view...")
    is_consistent = True
    failed_views = []

    for view_data in tqdm(all_views_data_preloaded, file=sys.stdout):
        rendered_depth = render_f(
            global_points, view_data["pose"], view_data["intrinsics"], view_data["image_size"], device
        )
        mae, rmse, inlier_percent = calculate_metrics(rendered_depth, view_data["depth"], view_data["valid_mask"])
        pass_mae, pass_rmse, pass_inliers = mae <= MAE_THRESHOLD, rmse <= RMSE_THRESHOLD, inlier_percent >= INLIER_METRIC_PERCENT_THRESH
        if not (pass_mae and pass_rmse and pass_inliers):
            is_consistent = False
            failed_views.append({
                "path": osp.basename(view_data["path"]), "mae": mae, "rmse": rmse, "inliers": inlier_percent,
                "pass_mae": pass_mae, "pass_rmse": pass_rmse, "pass_inliers": pass_inliers,
                "type": "global_from_depth"
            })
    return is_consistent, failed_views

def log_failed_views(logger, subscene_key, check_name, failed_views):
    """Helper function to log detailed failure information."""
    if failed_views:
        log_message = [f"\n--- VERDICT: FAIL ({check_name}) ---"]
        log_message.append(f"Subscene '{subscene_key}' failed {check_name} check.")
        log_message.append("The following views failed to meet the thresholds:")
        for fail in failed_views:
            if "reason" in fail:
                log_message.append(f"  - View: {fail['path']} (Reason: {fail['reason']})")
            else:
                log_message.append(f"  - View: {fail['path']}")
                log_message.append(f"    MAE: {fail['mae']:.4f}m (Threshold: {MAE_THRESHOLD}, Pass: {fail['pass_mae']})")
                log_message.append(f"    RMSE: {fail['rmse']:.4f}m (Threshold: {RMSE_THRESHOLD}, Pass: {fail['pass_rmse']})")
                log_message.append(f"    Inliers (< {INLIER_METRIC_ERROR_THRESH}m): {fail['inliers']:.2f}% (Threshold: {INLIER_METRIC_PERCENT_THRESH}%, Pass: {fail['pass_inliers']})")
        logger.info('\n'.join(log_message))

# ======================================================================================
# --- 3. MAIN EXECUTION BLOCK ---
# ======================================================================================
# ======================================================================================
# --- 3. MAIN EXECUTION BLOCK ---
# ======================================================================================

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="Verify the geometric consistency of an original Hypersim dataset."
    )
    parser.add_argument(
        "--data_root", type=str, default='/lc/data/3D/hypersim_test',
        help="Path to the root directory of the original Hypersim dataset."
    )
    parser.add_argument(
        "--log_file", type=str, default="verification.log",
        help="Path to the output log file. Defaults to 'verification.log'."
    )
    parser.add_argument(
        "--scene", type=str, default=None,
        help="Optional: Specify a single scene name (e.g., 'ai_001_001') to verify only that scene."
    )
    # --- NEW: Add flags to control verification types ---
    parser.add_argument(
        '--verify_global_depth', action='store_true',
        help='Run global consistency check using depth maps and poses.'
    )
    parser.add_argument(
        '--verify_global_world_pos', action='store_true',
        help='Run global consistency check using position.hdf5 files.'
    )
    parser.add_argument(
        '--verify_per_view_world_pos', action='store_true',
        help='Run per-view consistency check between depth and position files.'
    )
    args = parser.parse_args()
    
    logger = setup_logging(args.log_file)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f"Using device: {device}")

    if not any([args.verify_global_depth, args.verify_global_world_pos, args.verify_per_view_world_pos]):
        logger.error("No verification task specified. Please enable at least one verification flag:")
        logger.error("  --verify_global_depth")
        logger.error("  --verify_global_world_pos")
        logger.error("  --verify_per_view_world_pos")
        sys.exit(1)
    
    # --- Modified scene finding logic ---
    if args.scene:
        scene_names = [args.scene]
    else:
        scene_names = sorted([d for d in os.listdir(args.data_root) if d.startswith('ai_') and osp.isdir(osp.join(args.data_root, d))])
    
    logger.info(f"Found {len(scene_names)} scenes to verify in '{args.data_root}'.")

    all_results = {}
    for scene_name in scene_names:
        scene_dir = osp.join(args.data_root, scene_name)
        
        scene_meta = get_scene_metadata(args.data_root, scene_name, logger)
        if scene_meta is None:
            all_results[scene_dir] = "SKIPPED (Unsupported)"
            continue

        subscene_dirs = sorted([d for d in glob.glob(osp.join(scene_dir, 'images', 'scene_cam_*_geometry_hdf5')) if osp.isdir(d)])
        for subscene_dir in subscene_dirs:
            subscene_key = f"{scene_name}/{osp.basename(subscene_dir)}"
            all_results[subscene_key] = {} # Initialize results for this subscene

            try:
                cam_id = osp.basename(subscene_dir).split('_')[2]
                trajectory_data = get_camera_trajectory(scene_dir, cam_id)

                loader_func = lambda path, log: load_original_hypersim_view_data(
                    path, scene_meta, trajectory_data, log
                )

                # Pre-load all view data for the subscene once
                rgb_paths = sorted(glob.glob(osp.join(subscene_dir, f"*.depth_meters.hdf5")))
                current_subscene_views_data = []
                logger.info(f"Pre-loading data for subscene: {subscene_key}")
                for path in tqdm(rgb_paths, file=sys.stdout):
                    view_data = loader_func(path, logger)
                    if view_data:
                        # Pre-calculate the valid mask needed by all checks
                        _, valid_mask, _ = depthmap_to_absolute_camera_coordinates(
                            view_data["depth"], view_data["intrinsics"], view_data["pose"]
                        )
                        view_data["valid_mask"] = valid_mask
                        current_subscene_views_data.append(view_data)
                
                if not current_subscene_views_data:
                    logger.warning(f"No valid view data found for {subscene_dir}. Skipping all verifications for this subscene.")
                    all_results[subscene_key] = {
                        "GLOBAL_DEPTH": "SKIPPED (No data)",
                        "GLOBAL_WORLD_POS": "SKIPPED (No data)",
                        "PER_VIEW_WORLD_POS": "SKIPPED (No data)"
                    }
                    continue

                # --- Run Verifications Based on Flags ---

                if args.verify_global_depth:
                    is_consistent, failed_views = verify_global_consistency_from_depth(
                        subscene_path=subscene_dir, device=device, logger=logger,
                        all_views_data_preloaded=current_subscene_views_data,
                        voxel_size=0.00
                    )
                    all_results[subscene_key]["GLOBAL_DEPTH"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Global Consistency (from Depth)", failed_views)
                else:
                    all_results[subscene_key]["GLOBAL_DEPTH"] = "SKIPPED"

                if args.verify_global_world_pos:
                    is_consistent, failed_views = verify_global_consistency_from_world_positions(
                        subscene_path=subscene_dir, device=device, logger=logger,
                        all_views_data=current_subscene_views_data,
                        voxel_size=0.0
                    )
                    all_results[subscene_key]["GLOBAL_WORLD_POS"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Global Consistency (from World Pos)", failed_views)
                else:
                    all_results[subscene_key]["GLOBAL_WORLD_POS"] = "SKIPPED"

                if args.verify_per_view_world_pos:
                    is_consistent, failed_views = verify_depth_and_world_positions(
                        subscene_path=subscene_dir, device=device, logger=logger,
                        all_views_data=current_subscene_views_data
                    )
                    all_results[subscene_key]["PER_VIEW_WORLD_POS"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Per-View Depth vs World Pos", failed_views)
                else:
                    all_results[subscene_key]["PER_VIEW_WORLD_POS"] = "SKIPPED"

            except Exception as e:
                logger.error(f"An unexpected error occurred processing {subscene_dir}: {e}", exc_info=True)
                all_results[subscene_key] = {
                    "GLOBAL_DEPTH": "ERROR",
                    "GLOBAL_WORLD_POS": "ERROR",
                    "PER_VIEW_WORLD_POS": "ERROR"
                }

    # --- Final Summary ---
    summary = ["\n\n--- FINAL SUMMARY ---"]
    # Initialize counters for each check type
    counters = {
        "GLOBAL_DEPTH": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0, "N/A": 0},
        "GLOBAL_WORLD_POS": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0, "N/A": 0},
        "PER_VIEW_WORLD_POS": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0, "N/A": 0}
    }
    check_keys = {
        "GLOBAL_DEPTH": "Global Consistency (from Depth)",
        "GLOBAL_WORLD_POS": "Global Consistency (from World Pos)",
        "PER_VIEW_WORLD_POS": "Per-View Depth vs World Pos"
    }

    for subscene, results_dict in all_results.items():
        summary.append(f"\n{subscene}:")
        if isinstance(results_dict, str): # Handles cases like "SKIPPED (Unsupported)"
            summary.append(f"  - Status: {results_dict}")
            continue
        
        for key, name in check_keys.items():
            status = results_dict.get(key, "N/A")
            summary.append(f"  - {name}: {status}")
            counters[key][status] += 1
            
    summary.append(f"\n--- VERIFICATION OVERVIEW ---")
    for key, name in check_keys.items():
        if args.verify_global_depth and key == "GLOBAL_DEPTH" or \
           args.verify_global_world_pos and key == "GLOBAL_WORLD_POS" or \
           args.verify_per_view_world_pos and key == "PER_VIEW_WORLD_POS":
            
            summary.append(f"\n{name}:")
            summary.append(f"  - Passed:  {counters[key]['PASS']}")
            summary.append(f"  - Failed:  {counters[key]['FAIL']}")
            summary.append(f"  - Errored: {counters[key]['ERROR']}")
            summary.append(f"  - Skipped: {counters[key]['SKIPPED'] + counters[key]['N/A']}")

    logger.info('\n'.join(summary))