import os
import os.path as osp
import sys
import glob
import logging
import argparse # Import argparse
import numpy as np

os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"

import cv2
import torch
from tqdm import tqdm



# --- 1. Define Success Criteria (Thresholds) ---
MAE_THRESHOLD = 0.01          # meters (1 cm)
RMSE_THRESHOLD = 0.02         # meters (2 cm)
INLIER_METRIC_ERROR_THRESH = 0.05 # meters (5 cm)
INLIER_METRIC_PERCENT_THRESH = 99.0 # % of pixels
from verify_utils import imread_cv2, setup_logging, verify_subscene

def load_view_data(rgb_path, logger, actual_mask_bg=True):
    try:      
        depth_path = rgb_path.replace("rgb.png", "depth.png")
        cam_path = rgb_path.replace("rgb.png", "cam.npz")
        mask_path = rgb_path.replace("rgb.png", "mask.png")
        
        rgb_image = imread_cv2(rgb_path, cv2.IMREAD_COLOR)
        depthmap = imread_cv2(depth_path, cv2.IMREAD_UNCHANGED)
        cam_file = np.load(cam_path)
        intrinsics = cam_file["intrinsics"].astype(np.float32)
        camera_pose = cam_file["pose"].astype(np.float32)

        depthmap[depthmap == 65535] = 0  # Handle invalid depth values
        depthmap = depthmap.astype(np.float32)/1000.0

        if actual_mask_bg:
            object_mask = imread_cv2(mask_path, cv2.IMREAD_UNCHANGED)
            object_mask = (object_mask.astype(np.float32) / 255.0) > 0.1 # Example threshold
            
            depthmap *= object_mask

        
        return {
            "rgb": rgb_image, "depth": depthmap, "intrinsics": intrinsics,
            "pose": camera_pose, "path": rgb_path
        }
    except FileNotFoundError as e:
        logger.warning(f"Could not find all files for {osp.basename(rgb_path)}. Skipping view. Details: {e}")
        return None

if __name__ == '__main__':
    # --- Rewritten main block with argparse ---
    parser = argparse.ArgumentParser(
        description="Verify the geometric consistency of a preprocessed Hypersim dataset."
    )
    
    parser.add_argument(
        "--data_root", 
        metavar="DATA_ROOT",
        type=str,
        default='/lc/data/3D/wildrgbd/processed',
        help="Path to the root directory of the resized Hypersim dataset (e.g., /path/to/resized_hypersim/train)."
    )
    
    parser.add_argument(
        "--log_file", 
        type=str, 
        default="logs/verification_wildrgbd.log",
        help="Path to the output log file. Defaults to 'verification.log'."
    )

    parser.add_argument(
        "--scene", 
        type=str, 
        default=None,
        help="Optional: Specify a single scene name (e.g., 'ai_044_007') to verify only that scene."
    )
    
    args = parser.parse_args()
    
    logger = setup_logging(args.log_file)

    if not osp.isdir(args.data_root):
        logger.error(f"Provided data root '{args.data_root}' does not exist.")
        sys.exit(1)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f"Using device: {device}")
    
    if args.scene:
        # If a specific scene is requested, only use that one.
        scene_dirs = [osp.join(args.data_root, args.scene)]
        if not osp.isdir(scene_dirs[0]):
            logger.error(f"Specified scene directory '{scene_dirs[0]}' does not exist.")
            sys.exit(1)
        logger.info(f"Running verification for single specified scene: {args.scene}")
    else:
        # Otherwise, find all scenes in the data root.    
        scene_dirs = sorted([d for d in glob.glob(osp.join(args.data_root, '*')) if osp.isdir(d)])
        logger.info(f"Found {len(scene_dirs)} scenes in '{args.data_root}'.")
    
    all_results = {}
    for scene_dir in scene_dirs:
        subscene_dirs = sorted([d for d in glob.glob(osp.join(scene_dir, '*')) if osp.isdir(d)])
        for subscene_dir in subscene_dirs:
            try:
                result = verify_subscene(subscene_dir, device=device, logger=logger, load_view_data=load_view_data, ext='_rgb.png')
                all_results[subscene_dir] = "PASS" if result else "FAIL"
            except Exception as e:
                logger.error(f"An unexpected error occurred while processing {subscene_dir}: {e}", exc_info=True)
                all_results[subscene_dir] = "ERROR"
            break
        break

    # --- Final Summary ---
    summary = ["\n\n--- FINAL SUMMARY ---"]
    for subscene, result in all_results.items():
        summary.append(f"{subscene}: {result}")
    
    num_failed = list(all_results.values()).count("FAIL")
    num_error = list(all_results.values()).count("ERROR")
    
    if num_failed > 0 or num_error > 0:
        summary.append(f"\nVerification complete. {num_failed} subscene(s) failed the consistency check and {num_error} subscene(s) encountered an error.")
    else:
        summary.append("\nVerification complete. All subscenes passed the consistency check.")
    
    logger.info('\n'.join(summary))