# visual_verify_hypersim.py

import os
import os.path as osp
import sys
import glob
import logging
import argparse
import numpy as np
import pandas as pd
import h5py

os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2
from tqdm import tqdm

# --- Import from our new visual utility file ---
from visual_util import launch_visualizer

# --- Import from the original utility file for data loading helpers ---
# Note: Assumes verify_utils.py is in the same directory or accessible in the python path.
try:
    from verify_utils import imread_cv2, setup_logging, depthmap_to_absolute_camera_coordinates
except ImportError:
    print("FATAL ERROR: Could not import from 'verify_utils.py'.")
    print("Please ensure 'verify_utils.py' from the original verification script is in the same directory.")
    sys.exit(1)


SCENE_METADATA_CACHE = {}

# --- Data Loading Functions (Adapted from verify_original_hypersim.py) ---

def opengl_to_intrinsics(proj_matrix, width_pixels, height_pixels):
    """Converts an OpenGL projection matrix to a 3x3 intrinsic matrix."""
    K00 = proj_matrix[0, 0] * width_pixels / 2.0
    K01 = -proj_matrix[0, 1] * width_pixels / 2.0
    K02 = (1.0 - proj_matrix[0, 2]) * width_pixels / 2.0
    K11 = proj_matrix[1, 1] * height_pixels / 2.0
    K12 = (1.0 + proj_matrix[1, 2]) * height_pixels / 2.0
    return np.array([[K00, K01, K02], [0.0, K11, K12], [0.0, 0.0, 1.0]])

def get_scene_metadata(root_dir, scene_name, logger):
    """Loads and caches scene-level metadata from original Hypersim files."""
    scene_dir = osp.join(root_dir, scene_name)
    if scene_dir in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[scene_dir]

    all_metafile = osp.join(root_dir, "metadata_camera_parameters.csv")
    try:
        df_camera_parameters = pd.read_csv(all_metafile, index_col="scene_name")
        df_ = df_camera_parameters.loc[scene_name]
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load camera metadata for scene '{scene_name}'. Error: {e}")
        return None

    width = int(df_["settings_output_img_width"])
    height = int(df_["settings_output_img_height"])
    M_proj = np.array([
        [df_["M_proj_00"], df_["M_proj_01"], df_["M_proj_02"], df_["M_proj_03"]],
        [df_["M_proj_10"], df_["M_proj_11"], df_["M_proj_12"], df_["M_proj_13"]],
        [df_["M_proj_20"], df_["M_proj_21"], df_["M_proj_22"], df_["M_proj_23"]],
        [df_["M_proj_30"], df_["M_proj_31"], df_["M_proj_32"], df_["M_proj_33"]],
    ])
    intrinsics = opengl_to_intrinsics(M_proj, width, height).astype(np.float32)

    metadata_scene_path = osp.join(scene_dir, '_detail', 'metadata_scene.csv')
    try:
        df_scene = pd.read_csv(metadata_scene_path, index_col='parameter_name')
        asset_to_meter_scale = df_scene.loc['meters_per_asset_unit', 'parameter_value']
    except (FileNotFoundError, KeyError) as e:
        logger.error(f"Could not load world scale from '{metadata_scene_path}'. Error: {e}")
        return None

    metadata = {
        "scale": float(asset_to_meter_scale), "intrinsics": intrinsics,
        "width": width, "height": height,
        "focal": (intrinsics[0, 0] + intrinsics[1, 1]) / 2.0
    }
    SCENE_METADATA_CACHE[scene_dir] = metadata
    return metadata

def get_camera_trajectory(scene_dir, cam_id):
    """Loads and caches the raw keyframe trajectory for a specific camera."""
    cam_key = (scene_dir, cam_id)
    if cam_key in SCENE_METADATA_CACHE:
        return SCENE_METADATA_CACHE[cam_key]

    cam_detail_path = osp.join(scene_dir, '_detail', f'cam_{cam_id}')
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_positions.hdf5'), 'r') as f:
        positions = f['dataset'][:]
    with h5py.File(osp.join(cam_detail_path, 'camera_keyframe_orientations.hdf5'), 'r') as f:
        orientations = f['dataset'][:]

    trajectory = {"positions": positions, "orientations": orientations}
    SCENE_METADATA_CACHE[cam_key] = trajectory
    return trajectory

gamma = 1.0 / 2.2  # Standard gamma correction exponent.
inv_gamma = 1.0 / gamma
percentile = 90  # Desired percentile brightness in the unmodified image.
brightness_nth_percentile_desired = 0.8  # Desired brightness after scaling.
### MODIFIED: This function now loads color images as well ###
def load_hypersim_view_for_visualizer(frame_depth_path, scene_meta, trajectory_data, logger, load_color=True):
    """
    Loads and transforms all necessary data for a single frame,
    including color, Euclidean depth, camera pose, intrinsics, and world-space positions.
    """
    try:
        frame_id = int(osp.basename(frame_depth_path).split('.')[1])
        frame_base_path = frame_depth_path.replace('.depth_meters.hdf5', '')

        # --- 1. Load Euclidean distance map ---
        with h5py.File(frame_depth_path, 'r') as f:
            distance = f['dataset'][:].astype(np.float32)
            distance[~np.isfinite(distance)] = 0

        # --- 2. Load World Positions ---
        world_pos_path = f"{frame_base_path}.position.hdf5"
        world_positions_meters = None
        try:
            with h5py.File(world_pos_path, 'r') as f:
                world_positions_asset = f['dataset'][:].astype(np.float32)
            world_positions_meters = world_positions_asset * scene_meta['scale']
        except FileNotFoundError:
            logger.debug(f"'{world_pos_path}' not found.")
        except Exception as e:
            logger.error(f"Error loading '{world_pos_path}': {e}.", exc_info=True)
        
        render_entity = frame_depth_path.replace("depth_meters.hdf5", "render_entity_id.hdf5")
        with h5py.File(render_entity, "r") as f:
            render_entity_id = f["dataset"][:].astype(np.int32)
        assert (render_entity_id != 0).all()
        valid_mask = render_entity_id != -1

        # --- NEW: 3. Load Color Image if requested ---
        color = None
        if load_color or True:
            # Hypersim paths are often parallel, e.g., _geometry_hdf5 -> _final_hdf5
            color_path = frame_depth_path.replace('_geometry_hdf5', '_final_hdf5').replace('.depth_meters.hdf5', '.color.hdf5')
            
            if color_path:
                with h5py.File(color_path, "r") as f:
                    color = f["dataset"][:]
            else:
                 logger.warning(f"Could not find color image for {osp.basename(frame_depth_path)}")
                 sys.exit(0)

            if np.sum(valid_mask) == 0:
                scale = 1.0  # If there are no valid pixels, set scale to 1.0.
            else:
                brightness = (
                    0.3 * color[:, :, 0] + 0.59 * color[:, :, 1] + 0.11 * color[:, :, 2]
                )
                brightness_valid = brightness[valid_mask]
                eps = 0.0001  # Avoid division by zero.
                brightness_nth_percentile_current = np.percentile(
                    brightness_valid.astype(np.float32), percentile
                )
                if brightness_nth_percentile_current < eps:
                    scale = 0.0
                else:
                    scale = (
                        np.power(brightness_nth_percentile_desired, inv_gamma)
                        / brightness_nth_percentile_current
                    )

            color = np.power(np.maximum(scale * color, 0), gamma)
            color = np.clip(color, 0.0, 1.0)

        # --- Calculate Pose ---
        R_cam2world_raw = trajectory_data["orientations"][frame_id]
        t_cam2world_asset = trajectory_data["positions"][frame_id]
        R_cam2world = R_cam2world_raw @ np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
        t_cam2world = t_cam2world_asset * scene_meta['scale']
        pose = np.eye(4, dtype=np.float32)
        pose[:3, :3] = R_cam2world
        pose[:3, 3] = t_cam2world

        # --- Convert Euclidean distance to Planar Z-depth ---
        H, W = scene_meta['height'], scene_meta['width']
        focal = scene_meta['focal']
        ImageplaneX = np.linspace((-0.5 * W) + 0.5, (0.5 * W) - 0.5, W).reshape(1, W).repeat(H, 0)[:, :, None]
        ImageplaneY = np.linspace((-0.5 * H) + 0.5, (0.5 * H) - 0.5, H).reshape(H, 1).repeat(W, 1)[:, :, None]
        ImageplaneZ = np.full([H, W, 1], focal, np.float32)
        Imageplane = np.concatenate([ImageplaneX, ImageplaneY, ImageplaneZ], axis=2)
        planar_depth = distance * focal / np.linalg.norm(Imageplane, axis=2) * valid_mask

        return {
            "depth": planar_depth, "intrinsics": scene_meta['intrinsics'],
            "pose": pose, "path": frame_depth_path,
            "world_positions": world_positions_meters, "image_size": (H, W),
            "rgb": color, ### NEW: Add RGB image to the returned data
            "valid_mask": valid_mask,
        }
    except Exception as e:
        logger.error(f"Could not process view for {frame_depth_path}. Skipping. Details: {e}", exc_info=True)
        return None

# ======================================================================================
# --- Main Execution Block ---
# ======================================================================================

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Visually verify the consistency of an original Hypersim dataset.")
    parser.add_argument("--data_root", type=str, default='/lc/data/3D/hypersim_test', help="Path to the root directory of the original Hypersim dataset.")
    parser.add_argument("--log_file", type=str, default="visual_verification.log", help="Path to the output log file.")
    parser.add_argument("--scene", type=str, default='ai_001_001', help="Optional: Specify a single scene name (e.g., 'ai_001_001') to visualize.")
    parser.add_argument("--subscene", type=str, default='scene_cam_00_geometry_hdf5', help="Optional: Specify a single sub scene name (e.g., 'scene_cam_00_geometry_hdf5') to visualize.")
    parser.add_argument("--port", type=int, default=8080, help="Port to run the Viser web server on.")
    
    args = parser.parse_args()
    logger = setup_logging(args.log_file)

    if args.scene:
        scene_names = [args.scene]
    else:
        scene_names = sorted([d for d in os.listdir(args.data_root) if d.startswith('ai_') and osp.isdir(osp.join(args.data_root, d))])
    
    logger.info(f"Found {len(scene_names)} scenes to process in '{args.data_root}'.")

    for scene_name in scene_names:
        scene_dir = osp.join(args.data_root, scene_name)
        scene_meta = get_scene_metadata(args.data_root, scene_name, logger)
        if scene_meta is None:
            continue

        if args.subscene:
            subscene_dirs = [osp.join(scene_dir,'images', args.subscene)]
        else:
            subscene_dirs = sorted([d for d in glob.glob(osp.join(scene_dir, 'images', 'scene_cam_*_geometry_hdf5')) if osp.isdir(d)])
        for subscene_dir in subscene_dirs:
            subscene_key = f"{scene_name}/{osp.basename(subscene_dir)}"
            logger.info(f"\n--- Processing subscene: {subscene_key} ---")
            
            try:
                cam_id = osp.basename(subscene_dir).split('_')[2]
                trajectory_data = get_camera_trajectory(scene_dir, cam_id)
                
                depth_paths = sorted(glob.glob(osp.join(subscene_dir, "*.depth_meters.hdf5")))
                if not depth_paths:
                    logger.warning(f"No depth files found in {subscene_dir}. Skipping.")
                    continue

                # --- Prepare data for the visualizer ---
                pc_list, color_list, conf_list = [], [], []
                focals, pps, Rs, ts = [], [], [], []
                
                logger.info("Loading views and generating point clouds...")
                for path in tqdm(depth_paths, file=sys.stdout):
                    view_data = load_hypersim_view_for_visualizer(path, scene_meta, trajectory_data, logger)
                    if view_data:
                        # Generate world-space point cloud for this single view
                        world_points, valid_mask, _ = depthmap_to_absolute_camera_coordinates(
                            view_data["depth"], view_data["intrinsics"], view_data["pose"]
                        )
                        pc_list.append(world_points)
                        color_list.append(view_data['rgb'])
                        conf_list.append(valid_mask.astype(np.float32))

                        # Aggregate camera parameters
                        intrinsics = view_data['intrinsics']
                        pose = view_data['pose']
                        focals.append(intrinsics[0, 0])
                        pps.append((intrinsics[0, 2], intrinsics[1, 2]))
                        Rs.append(pose[:3, :3])
                        ts.append(pose[:3, 3])
            
                if not pc_list:
                    logger.warning(f"No valid data could be loaded for {subscene_key}. Skipping visualization.")
                    continue

                cam_dict = {'focal': focals, 'pp': pps, 'R': Rs, 't': ts}

                # --- Launch the visualizer ---
                try:
                    logger.info(f"Launching interactive visualizer for {subscene_key} at http://localhost:{args.port}")
                    logger.info("--> Close the browser tab or press Ctrl+C in the console to continue to the next subscene.")
                    launch_visualizer(
                        pc_list=pc_list,
                        color_list=color_list,
                        conf_list=conf_list,
                        cam_dict=cam_dict,
                        port=args.port
                    )
                except KeyboardInterrupt:
                    logger.info(f"Visualizer for {subscene_key} closed, proceeding...")
                except Exception as e:
                    logger.error(f"Failed to launch visualizer for {subscene_key}: {e}", exc_info=True)
                break

            except Exception as e:
                logger.error(f"An unexpected error occurred processing {subscene_dir}: {e}", exc_info=True)

    logger.info("\n--- Visual verification complete. ---")
