import os
import os.path as osp
import sys
import glob
import logging
import argparse
import numpy as np
import pandas as pd
import h5py
from scipy.spatial.transform import Slerp, Rotation

os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2
import torch
from tqdm import tqdm

# --- 1. Define Success Criteria (Thresholds) ---
MAE_THRESHOLD = 0.01                # meters (1 cm)
RMSE_THRESHOLD = 0.1               # meters (2 cm)
INLIER_METRIC_ERROR_THRESH = 0.07   # meters (5 cm)
INLIER_METRIC_PERCENT_THRESH = 99.0 # % of pixels
### NEW: Color verification threshold ###
COLOR_MAE_THRESHOLD = 0.1          # MAE on color values normalized to [0, 1] (5% avg error)

### MODIFIED: Import new utility functions ###
from verify_color_util import (
    imread_cv2, setup_logging, verify_depth_and_world_positions,
    depthmap_to_absolute_camera_coordinates, downsample_f,
    render_color_and_depth, calculate_metrics, calculate_color_metrics
)

SCENE_METADATA_CACHE = {}

# ======================================================================================
# --- 2. REVISED HYPERSIM DATA LOADING (Based on preprocess_hypersim.py analysis) ---
# ======================================================================================

def opengl_to_intrinsics(proj_matrix, width_pixels, height_pixels):
    """Converts an OpenGL projection matrix to a 3x3 intrinsic matrix."""
    K00 = proj_matrix[0, 0] * width_pixels / 2.0
    K01 = -proj_matrix[0, 1] * width_pixels / 2.0
    K02 = (1.0 - proj_matrix[0, 2]) * width_pixels / 2.0
    K11 = proj_matrix[1, 1] * height_pixels / 2.0
    K12 = (1.0 + proj_matrix[1, 2]) * height_pixels / 2.0
    return np.array([[K00, K01, K02], [0.0, K11, K12], [0.0, 0.0, 1.0]])

def get_scene_metadata(root_dir, scene_name, logger):
    """Loads and caches scene-level metadata from original Hypersim files."""
    scene_dir = osp.join(root_dir, scene_name)
    if scene_dir in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[scene_dir]

    all_metafile = osp.join(root_dir, "metadata_camera_parameters.csv")
    try:
        df_camera_parameters = pd.read_csv(all_metafile, index_col="scene_name")
        df_ = df_camera_parameters.loc[scene_name]
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load camera metadata for scene '{scene_name}'. Error: {e}")
        return None

    width = int(df_["settings_output_img_width"])
    height = int(df_["settings_output_img_height"])
    M_proj = np.array([
        [df_["M_proj_00"], df_["M_proj_01"], df_["M_proj_02"], df_["M_proj_03"]],
        [df_["M_proj_10"], df_["M_proj_11"], df_["M_proj_12"], df_["M_proj_13"]],
        [df_["M_proj_20"], df_["M_proj_21"], df_["M_proj_22"], df_["M_proj_23"]],
        [df_["M_proj_30"], df_["M_proj_31"], df_["M_proj_32"], df_["M_proj_33"]],
    ])
    intrinsics = opengl_to_intrinsics(M_proj, width, height).astype(np.float32)

    if intrinsics[0, 1] != 0:
        logger.warning(f"Skipping scene '{scene_name}' due to non-zero camera skew, which is unsupported.")
        SCENE_METADATA_CACHE[scene_dir] = None
        return None

    metadata_scene_path = osp.join(scene_dir, '_detail', 'metadata_scene.csv')
    try:
        df_scene = pd.read_csv(metadata_scene_path, index_col='parameter_name')
        asset_to_meter_scale = df_scene.loc['meters_per_asset_unit', 'parameter_value']
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load or parse world scale from '{metadata_scene_path}'. Error: {e}")
        return None

    metadata = {
        "scale": float(asset_to_meter_scale), "intrinsics": intrinsics,
        "width": width, "height": height,
        "focal": (intrinsics[0, 0] + intrinsics[1, 1]) / 2.0
    }
    SCENE_METADATA_CACHE[scene_dir] = metadata
    return metadata

def get_camera_trajectory(scene_dir, cam_id):
    """Loads and caches the raw keyframe trajectory for a specific camera."""
    cam_key = (scene_dir, cam_id)
    if cam_key in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[cam_key]

    cam_detail_path = osp.join(scene_dir, '_detail', f'cam_{cam_id}')
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_positions.hdf5'), 'r') as f:
        positions = f['dataset'][:]
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_orientations.hdf5'), 'r') as f:
        orientations = f['dataset'][:]

    trajectory = {"positions": positions, "orientations": orientations}
    SCENE_METADATA_CACHE[cam_key] = trajectory
    return trajectory

gamma = 1.0 / 2.2  # Standard gamma correction exponent.
inv_gamma = 1.0 / gamma
percentile = 90  # Desired percentile brightness in the unmodified image.
brightness_nth_percentile_desired = 0.8  # Desired brightness after scaling.
### MODIFIED: This function now loads color images as well ###
def load_original_hypersim_view_data(frame_depth_path, scene_meta, trajectory_data, logger, load_color=False):
    """
    Loads and transforms all necessary data for a single frame,
    including color, Euclidean depth, camera pose, intrinsics, and world-space positions.
    """
    try:
        frame_id = int(osp.basename(frame_depth_path).split('.')[1])
        frame_base_path = frame_depth_path.replace('.depth_meters.hdf5', '')

        # --- 1. Load Euclidean distance map ---
        with h5py.File(frame_depth_path, 'r') as f:
            distance = f['dataset'][:].astype(np.float32)
            distance[~np.isfinite(distance)] = 0

        # --- 2. Load World Positions ---
        world_pos_path = f"{frame_base_path}.position.hdf5"
        world_positions_meters = None
        try:
            with h5py.File(world_pos_path, 'r') as f:
                world_positions_asset = f['dataset'][:].astype(np.float32)
            world_positions_meters = world_positions_asset * scene_meta['scale']
        except FileNotFoundError:
            logger.debug(f"'{world_pos_path}' not found.")
        except Exception as e:
            logger.error(f"Error loading '{world_pos_path}': {e}.", exc_info=True)
        
        render_entity = frame_depth_path.replace("depth_meters.hdf5", "render_entity_id.hdf5")
        with h5py.File(render_entity, "r") as f:
            render_entity_id = f["dataset"][:].astype(np.int32)
        assert (render_entity_id != 0).all()
        valid_mask = render_entity_id != -1

        # --- NEW: 3. Load Color Image if requested ---
        color = None
        if load_color:
            # Hypersim paths are often parallel, e.g., _geometry_hdf5 -> _final_hdf5
            color_path = frame_depth_path.replace('_geometry_hdf5', '_final_hdf5').replace('.depth_meters.hdf5', '.color.hdf5')
            
            if color_path:
                with h5py.File(color_path, "r") as f:
                    color = f["dataset"][:]
            else:
                 logger.warning(f"Could not find color image for {osp.basename(frame_depth_path)}")
                 sys.exit(0)

            if np.sum(valid_mask) == 0:
                scale = 1.0  # If there are no valid pixels, set scale to 1.0.
            else:
                brightness = (
                    0.3 * color[:, :, 0] + 0.59 * color[:, :, 1] + 0.11 * color[:, :, 2]
                )
                brightness_valid = brightness[valid_mask]
                eps = 0.0001  # Avoid division by zero.
                brightness_nth_percentile_current = np.percentile(
                    brightness_valid.astype(np.float32), percentile
                )
                if brightness_nth_percentile_current < eps:
                    scale = 0.0
                else:
                    scale = (
                        np.power(brightness_nth_percentile_desired, inv_gamma)
                        / brightness_nth_percentile_current
                    )

            color = np.power(np.maximum(scale * color, 0), gamma)
            color = np.clip(color, 0.0, 1.0)

        # --- 4. Calculate Pose (with correct transforms) ---
        R_cam2world_raw = trajectory_data["orientations"][frame_id]
        t_cam2world_asset = trajectory_data["positions"][frame_id]
        R_cam2world = R_cam2world_raw @ np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
        t_cam2world = t_cam2world_asset * scene_meta['scale']
        pose = np.eye(4, dtype=np.float32)
        pose[:3, :3] = R_cam2world
        pose[:3, 3] = t_cam2world

        # --- 5. Convert Euclidean distance to Planar Z-depth ---
        H, W = scene_meta['height'], scene_meta['width']
        focal = scene_meta['focal']
        ImageplaneX = np.linspace((-0.5 * W) + 0.5, (0.5 * W) - 0.5, W).reshape(1, W).repeat(H, 0)[:, :, None]
        ImageplaneY = np.linspace((-0.5 * H) + 0.5, (0.5 * H) - 0.5, H).reshape(H, 1).repeat(W, 1)[:, :, None]
        ImageplaneZ = np.full([H, W, 1], focal, np.float32)
        Imageplane = np.concatenate([ImageplaneX, ImageplaneY, ImageplaneZ], axis=2)
        planar_depth = distance * focal / np.linalg.norm(Imageplane, axis=2) * valid_mask

        return {
            "depth": planar_depth, "intrinsics": scene_meta['intrinsics'],
            "pose": pose, "path": frame_depth_path,
            "world_positions": world_positions_meters, "image_size": (H, W),
            "rgb": color, ### NEW: Add RGB image to the returned data
            "valid_mask": valid_mask,
        }
    except Exception as e:
        logger.error(f"Could not process view for {frame_depth_path}. Skipping. Details: {e}", exc_info=True)
        return None

### NEW: Generic function to run global consistency checks ###
def run_global_consistency_check(
    subscene_path, device, logger, all_views_data, point_cloud_source,
    voxel_size=0.0, verify_color=False
):
    check_name = f'global consistency check from {point_cloud_source}'
    logger.info(f"\nVerifying subscene ({check_name}): {subscene_path}")

    if not all_views_data:
        logger.warning(f"No view data available. Skipping {check_name} check.")
        return True, "SKIPPED"

    # --- Step 1: Build Global Point Cloud (with optional color) ---
    logger.info(f"Step 1/2 ({check_name}): Building global point cloud...")
    all_points_and_attrs = []
    
    for view_data in tqdm(all_views_data, file=sys.stdout):
        valid_mask = view_data.get("valid_mask")
            
        points_to_add = None
        if point_cloud_source == 'depth':
            world_points, valid_mask2, _ = depthmap_to_absolute_camera_coordinates(
                view_data["depth"], view_data["intrinsics"], view_data["pose"]
            )
            # print(world_points.shape, valid_mask2.shape, valid_mask.shape, view_data.get("world_positions").shape, valid_mask.sum(), valid_mask2.sum(), (valid_mask&valid_mask2).sum())
            valid_mask = valid_mask & valid_mask2
            points_to_add = world_points[valid_mask]
        elif point_cloud_source == 'world_pos':
            if view_data.get("world_positions") is not None:
                # Use the valid_mask from depth to ensure a 1-to-1 correspondence
                valid_mask = np.any(view_data["world_positions"] != 0, axis=2) & valid_mask
                points_to_add = view_data["world_positions"][valid_mask]
            else:
                continue
        
        if verify_color:
            if view_data.get('rgb') is not None:
                colors = view_data['rgb'][valid_mask]
                points_and_colors = np.hstack([points_to_add, colors])
                all_points_and_attrs.append(points_and_colors)
            # If color is required but not available, we don't add the points
        else:
            all_points_and_attrs.append(points_to_add)

    if not all_points_and_attrs:
        logger.error(f"No valid data found in subscene for {check_name} check.")
        return False, "ERROR"

    global_points_and_attrs = np.concatenate(all_points_and_attrs, axis=0)

    if voxel_size > 0:
        logger.info(f"Original {len(global_points_and_attrs)} points.")
        points_tensor_raw = torch.tensor(global_points_and_attrs, dtype=torch.float32, device=device)
        global_points_and_attrs = downsample_f(points_tensor_raw, voxel_size)
        # downsample_f returns a tensor, convert back to numpy for the renderer
        if isinstance(global_points_and_attrs, torch.Tensor):
             global_points_and_attrs = global_points_and_attrs.cpu().numpy()

    logger.info(f"Global model constructed with {len(global_points_and_attrs)} points.")

    # --- Step 2: Project global model to each view for verification ---
    logger.info(f"Step 2/2 ({check_name}): Projecting global model to each view...")
    is_consistent = True
    failed_views = []

    for view_data in tqdm(all_views_data, file=sys.stdout):
        rendered_depth, rendered_color = render_color_and_depth(
            global_points_and_attrs, view_data["pose"], view_data["intrinsics"],
            view_data["image_size"], device, has_color=verify_color
        )
        
        # --- Depth Metrics ---
        mae, rmse, inlier_percent = calculate_metrics(rendered_depth, view_data["depth"], view_data["valid_mask"])
        pass_mae = mae <= MAE_THRESHOLD
        pass_rmse = rmse <= RMSE_THRESHOLD
        pass_inliers = inlier_percent >= INLIER_METRIC_PERCENT_THRESH
        
        # --- Color Metrics ---
        pass_color = True # Default to pass if color isn't checked
        color_mae = 0.0
        if verify_color and view_data.get('rgb') is not None:
            color_mae = calculate_color_metrics(rendered_color, view_data['rgb'], view_data['valid_mask'])
            pass_color = color_mae <= COLOR_MAE_THRESHOLD

        if not (pass_mae and pass_rmse and pass_inliers and pass_color):
            is_consistent = False
            fail_details = {
                "path": osp.basename(view_data["path"]),
                "mae": mae, "rmse": rmse, "inliers": inlier_percent,
                "pass_mae": pass_mae, "pass_rmse": pass_rmse, "pass_inliers": pass_inliers,
                "type": check_name
            }
            if verify_color:
                fail_details.update({"color_mae": color_mae, "pass_color": pass_color})
            failed_views.append(fail_details)
            
    return is_consistent, failed_views

def log_failed_views(logger, subscene_key, check_name, failed_views, verify_color=False):
    """Helper function to log detailed failure information."""
    if failed_views:
        log_message = [f"\n--- VERDICT: FAIL ({check_name}) ---"]
        log_message.append(f"Subscene '{subscene_key}' failed {check_name} check.")
        log_message.append("The following views failed to meet the thresholds:")
        for fail in failed_views:
            print(fail)
            if "reason" in fail:
                log_message.append(f"  - View: {fail['path']} (Reason: {fail['reason']})")
            else:
                log_message.append(f"  - View: {fail['path']}")
                log_message.append(f"    MAE: {fail['mae']:.4f}m (Threshold: {MAE_THRESHOLD}, Pass: {fail['pass_mae']})")
                log_message.append(f"    RMSE: {fail['rmse']:.4f}m (Threshold: {RMSE_THRESHOLD}, Pass: {fail['pass_rmse']})")
                log_message.append(f"    Inliers (< {INLIER_METRIC_ERROR_THRESH}m): {fail['inliers']:.2f}% (Threshold: {INLIER_METRIC_PERCENT_THRESH}%, Pass: {fail['pass_inliers']})")
                if verify_color:
                    log_message.append(f"    Color MAE: {fail['color_mae']:.4f} (Threshold: {COLOR_MAE_THRESHOLD}, Pass: {fail['pass_color']})")
        logger.info('\n'.join(log_message))

# ======================================================================================
# --- 3. MAIN EXECUTION BLOCK ---
# ======================================================================================

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Verify the geometric and color consistency of an original Hypersim dataset.")
    parser.add_argument("--data_root", type=str, default='/lc/data/3D/hypersim_test', help="Path to the root directory of the original Hypersim dataset.")
    parser.add_argument("--log_file", type=str, default="verification.log", help="Path to the output log file.")
    parser.add_argument("--scene", type=str, default=None, help="Optional: Specify a single scene name (e.g., 'ai_001_001') to verify only that scene.")
    
    # --- Verification type flags ---
    parser.add_argument('--verify_global_depth', action='store_true', default=True, help='Run global consistency check using depth maps and poses.')
    parser.add_argument('--verify_global_world_pos', action='store_true', default=True, help='Run global consistency check using position.hdf5 files.')
    parser.add_argument('--verify_per_view_world_pos', action='store_true', default=True, help='Run per-view consistency check between depth and position files.')
    ### NEW: Color verification flag ###
    parser.add_argument('--verify_color', action='store_true', default=True, help='Run color consistency check alongside depth checks. Requires a global check to be enabled.')

    args = parser.parse_args()
    logger = setup_logging(args.log_file)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f"Using device: {device}")

    verify_tasks = [args.verify_global_depth, args.verify_global_world_pos, args.verify_per_view_world_pos]
    if not any(verify_tasks):
        logger.error("No verification task specified. Please enable at least one verification flag (e.g., --verify_global_depth).")
        sys.exit(1)
    
    if args.verify_color and not (args.verify_global_depth or args.verify_global_world_pos):
        logger.error("Color verification (--verify_color) requires a global consistency check (--verify_global_depth or --verify_global_world_pos) to be enabled.")
        sys.exit(1)

    if args.scene:
        scene_names = [args.scene]
    else:
        scene_names = sorted([d for d in os.listdir(args.data_root) if d.startswith('ai_') and osp.isdir(osp.join(args.data_root, d))])
    
    logger.info(f"Found {len(scene_names)} scenes to verify in '{args.data_root}'.")

    all_results = {}
    for scene_name in scene_names:
        scene_dir = osp.join(args.data_root, scene_name)
        scene_meta = get_scene_metadata(args.data_root, scene_name, logger)
        if scene_meta is None:
            all_results[scene_dir] = "SKIPPED (Unsupported)"
            continue

        subscene_dirs = sorted([d for d in glob.glob(osp.join(scene_dir, 'images', 'scene_cam_*_geometry_hdf5')) if osp.isdir(d)])
        for subscene_dir in subscene_dirs:
            subscene_key = f"{scene_name}/{osp.basename(subscene_dir)}"
            all_results[subscene_key] = {}

            try:
                cam_id = osp.basename(subscene_dir).split('_')[2]
                trajectory_data = get_camera_trajectory(scene_dir, cam_id)
                
                # --- Pre-load all view data for the subscene once ---
                depth_paths = sorted(glob.glob(osp.join(subscene_dir, "*.depth_meters.hdf5")))
                current_subscene_views_data = []
                logger.info(f"Pre-loading data for subscene: {subscene_key}")
                for path in tqdm(depth_paths, file=sys.stdout):
                    view_data = load_original_hypersim_view_data(
                        path, scene_meta, trajectory_data, logger, load_color=args.verify_color
                    )
                    # if view_data:
                    #     # Pre-calculate the valid mask needed by all checks
                    #     _, valid_mask, _ = depthmap_to_absolute_camera_coordinates(
                    #         view_data["depth"], view_data["intrinsics"], view_data["pose"]
                    #     )
                    #     view_data["valid_mask"] = valid_mask
                    current_subscene_views_data.append(view_data)
                
                if not current_subscene_views_data:
                    logger.warning(f"No valid view data found for {subscene_dir}. Skipping.")
                    continue

                # --- Run Verifications Based on Flags ---
                if args.verify_global_depth:
                    is_consistent, failed_views = run_global_consistency_check(
                        subscene_dir, device, logger, current_subscene_views_data,
                        point_cloud_source='depth', verify_color=args.verify_color
                    )
                    all_results[subscene_key]["GLOBAL_DEPTH"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Global Consistency (from Depth)", failed_views, args.verify_color)
                
                if args.verify_global_world_pos:
                    is_consistent, failed_views = run_global_consistency_check(
                        subscene_dir, device, logger, current_subscene_views_data,
                        point_cloud_source='world_pos', verify_color=args.verify_color
                    )
                    all_results[subscene_key]["GLOBAL_WORLD_POS"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Global Consistency (from World Pos)", failed_views, args.verify_color)

                if args.verify_per_view_world_pos:
                    # Note: This check does not include color verification as it's a direct depth comparison.
                    is_consistent, failed_views = verify_depth_and_world_positions(
                        subscene_path=subscene_dir,
                        all_views_data=current_subscene_views_data,
                        logger=logger
                    )
                    all_results[subscene_key]["PER_VIEW_WORLD_POS"] = "PASS" if is_consistent else "FAIL"
                    if not is_consistent:
                        log_failed_views(logger, subscene_key, "Per-View Depth vs World Pos", failed_views)

            except Exception as e:
                logger.error(f"An unexpected error occurred processing {subscene_dir}: {e}", exc_info=True)
                all_results[subscene_key] = {"status": "ERROR"}

    # --- Final Summary ---
    summary = ["\n\n--- FINAL SUMMARY ---"]
    counters = {
        "GLOBAL_DEPTH": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0},
        "GLOBAL_WORLD_POS": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0},
        "PER_VIEW_WORLD_POS": {"PASS": 0, "FAIL": 0, "ERROR": 0, "SKIPPED": 0}
    }
    
    for subscene, results_dict in all_results.items():
        summary.append(f"\n{subscene}:")
        if not isinstance(results_dict, dict) or "status" in results_dict:
             summary.append(f"  - Status: {results_dict}")
             continue
        
        for key, name in [
            ("GLOBAL_DEPTH", "Global Consistency (from Depth)"),
            ("GLOBAL_WORLD_POS", "Global Consistency (from World Pos)"),
            ("PER_VIEW_WORLD_POS", "Per-View Depth vs World Pos")
        ]:
            status = results_dict.get(key, "SKIPPED")
            summary.append(f"  - {name}: {status}")
            counters[key][status] += 1
            
    summary.append(f"\n--- VERIFICATION OVERVIEW ---")
    for key, name in [
            ("GLOBAL_DEPTH", "Global Consistency (from Depth)"),
            ("GLOBAL_WORLD_POS", "Global Consistency (from World Pos)"),
            ("PER_VIEW_WORLD_POS", "Per-View Depth vs World Pos")
    ]:
        if counters[key]["PASS"] + counters[key]["FAIL"] + counters[key]["ERROR"] > 0:
            summary.append(f"\n{name}:")
            summary.append(f"  - Passed:  {counters[key]['PASS']}")
            summary.append(f"  - Failed:  {counters[key]['FAIL']}")
            summary.append(f"  - Errored: {counters[key]['ERROR']}")
            summary.append(f"  - Skipped: {counters[key]['SKIPPED']}")

    logger.info('\n'.join(summary))