#!/usr/bin/env python3
# Copyright (C) 2024-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
#
# --------------------------------------------------------
# Script to pre-process the scannet++ dataset using pre-undistorted assets.
# This version assumes undistorted images and NeRF Studio's transform file are available.
# Usage:
# python3 datasets_preprocess/preprocess_scannetpp_undistorted.py --scannetpp_dir /path/to/scannetpp
# --------------------------------------------------------
import os
import sys
os.environ["PYOPENGL_PLATFORM"] = "egl" 
import OpenGL
print("PyOpenGL platform:", os.environ.get("PYOPENGL_PLATFORM"))
print("Using OpenGL implementation at:", OpenGL.__file__)

import argparse
import os.path as osp
import re
from tqdm import tqdm
import json
import numpy as np
import cv2
import PIL.Image as Image
import logging
import rootutils
rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)

# Setup logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
if not logger.handlers:
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)
    file_handler = logging.FileHandler('preprocess_undistorted.log', mode='a')
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

from utils.cropping import rescale_image_depthmap
import src.dust3r.utils.geometry as geometry

# Regex to extract frame ID from filenames like "DSC01234.JPG"
REGEXPR_DSLR = re.compile(r"DSC(?P<frameid>\d+)\.JPG$")

OPENGL_TO_OPENCV = np.float32(
    [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
)

def get_parser():
    parser = argparse.ArgumentParser(description="Preprocess ScanNet++ with undistorted assets.")
    parser.add_argument("--scannetpp_dir", type=str, default="/home/liucong/data/3d/scannetpp_unzipped", help="Root directory of the ScanNet++ dataset.")
    parser.add_argument("--scannetpp_dir2", type=str, default=None, help="Root directory2 of the ScanNet++ dataset.")
    parser.add_argument("--output_dir", type=str, default="/mnt/sda/scannetpp_processed", help="Directory to save the processed data.")
    parser.add_argument(
        "--target_resolution",
        type=int,
        nargs=2,
        default=(576, 384),
        help="Target size for the maximum dimension of processed images. (576, 384)",
        )
    parser.add_argument("--render_depth", action="store_true", help="Whether to render depth maps from the mesh.")
    return parser

def get_frame_number(name):
    """Extracts the frame number from a DSLR filename."""
    basename = osp.basename(name)
    matches = re.search(REGEXPR_DSLR, basename)
    if matches:
        return matches["frameid"]
    raise ValueError(f"Could not extract frame number from {name}")
# STEP 1: Transformation matrix for the camera convention (right-multiplication)
# This flips the local Y and Z axes.
T_camera = np.array([
    [1., 0., 0., 0.],
    [0.,-1., 0., 0.],
    [0., 0.,-1., 0.],
    [0., 0., 0., 1.]
])

# STEP 2: Transformation matrix for the world system (left-multiplication)
# This swaps the world X and Y axes and inverts the world Z axis.
T_world = np.array([
    [0., 1., 0., 0.],
    [1., 0., 0., 0.],
    [0., 0.,-1., 0.],
    [0., 0., 0., 1.]
])

def transform_c2w(c2w):
    C = c2w @ T_camera  # Step 1
    return T_world @ C # Step 2

def load_nerfstudio_transforms(transforms_path):
    """Loads camera data from NeRF Studio's transforms_undistorted.json file."""
    logger.info(f"Loading transforms from {transforms_path}")
    with open(transforms_path, 'r') as f:
        meta = json.load(f)

    # --- CORRECTED LOGIC STARTS HERE ---
    
    # Construct the shared intrinsics matrix K from the top-level keys
    K = np.eye(3)
    K[0, 0] = meta['fl_x']
    K[1, 1] = meta['fl_y']
    K[0, 2] = meta['cx']
    K[1, 2] = meta['cy']

    img_infos = {}
    for frame in tqdm(meta['frames'], desc="Parsing transforms.json", leave=False):
        filename = osp.basename(frame['file_path'])
        
        # Each frame uses the same, shared intrinsics matrix K
        img_infos[filename] = {
            'path': frame['file_path'],
            'frame_id': get_frame_number(filename),
            'cam_to_world': transform_c2w(np.array(frame['transform_matrix'])),
            'intrinsics': K
        }
    for frame in tqdm(meta['test_frames'], desc="Parsing transforms.json", leave=False):
        filename = osp.basename(frame['file_path'])
        
        # Each frame uses the same, shared intrinsics matrix K
        img_infos[filename] = {
            'path': frame['file_path'],
            'frame_id': get_frame_number(filename),
            'cam_to_world': transform_c2w(np.array(frame['transform_matrix'])),
            'intrinsics': K
        }
    return img_infos

def process_scenes(scannetpp_dir, output_dir, target_resolution=512, render_depth=True, scannetpp_dir2=None):
    """
    Process each scene in the Scannet++ dataset using pre-undistorted DSLR images.
    """
    data_dir = osp.join(scannetpp_dir, "data")
    if not osp.isdir(data_dir):
        logger.error(f"Data directory not found at {data_dir}")
        return

    # scenes = sorted([d for d in os.listdir(data_dir) if osp.isdir(osp.join(data_dir, d))])[11:30]
    scenes = ['0271889ec0']
    
    znear = 0.05
    zfar = 20.0

    if render_depth:
        import pyrender
        import trimesh

    for scene in tqdm(scenes, desc="Processing scenes"):
        scene_dir = osp.join(data_dir, scene)
        dslr_dir = osp.join(scene_dir, "dslr")
        
        # Define paths for the new undistorted assets
        transforms_path = osp.join(dslr_dir, "nerfstudio", "transforms_undistorted.json")
        rgb_dir = osp.join(dslr_dir, "resized_undistorted_images")
        mask_dir = osp.join(dslr_dir, "resized_undistorted_masks") # Assumes masks are available

        if scannetpp_dir2 is not None:
            if not osp.exists(transforms_path):
                transforms_path = transforms_path.replace(scannetpp_dir, scannetpp_dir2)
                if not osp.exists(transforms_path):
                    logger.warning(f"Skipping scene '{scene}': missing transforms_path.")
                    sys.exit(0)
                    continue

            if not osp.exists(rgb_dir):
                rgb_dir = rgb_dir.replace(scannetpp_dir, scannetpp_dir2)
                if not osp.exists(rgb_dir):
                    logger.warning(f"Skipping scene '{scene}': missing rgb_dir.")
                    sys.exit(0)
                    continue

            if not osp.exists(mask_dir):
                mask_dir = mask_dir.replace(scannetpp_dir, scannetpp_dir2)
                if not osp.exists(mask_dir):
                    logger.warning(f"Skipping scene '{scene}': missing mask_dir.")
                    sys.exit(0)
                    continue

        else:
            if not all(osp.exists(p) for p in [transforms_path, rgb_dir, mask_dir]):
                logger.warning(f"Skipping scene '{scene}': missing required undistorted assets.")
                continue

        if render_depth:
            current_render_depth = True
            ply_path = osp.join(scene_dir, "scans", f"mesh_aligned_0.05.ply")
            if scannetpp_dir2 is not None:
                if not osp.exists(ply_path):
                    ply_path = ply_path.replace(scannetpp_dir, scannetpp_dir2)
                    if not osp.exists(ply_path):
                        logger.warning(f"Skipping scene '{scene}': missing ply_path.")
                        current_render_depth = False
                        sys.exit(0)
            else:
                if not osp.isfile(ply_path):
                    logger.warning(f"Mesh file not found for scene '{scene}'. Skipping depth rendering.")
                    current_render_depth = False
                    sys.exit(0)

            if current_render_depth:
                try:
                    mesh_scene = trimesh.load(ply_path, force='mesh')
                    mesh = pyrender.Mesh.from_trimesh(mesh_scene, smooth=False)
                    pyrender_scene = pyrender.Scene(bg_color=[0, 0, 0, 0], ambient_light=[0.3, 0.3, 0.3])
                    pyrender_scene.add(mesh)
                    renderer = pyrender.OffscreenRenderer(1, 1)
                except Exception as e:
                    logger.error(f"Error loading mesh for scene '{scene}': {e}. Skipping depth rendering.")
                    sys.exit(0)
                    current_render_depth = False
        else:
            current_render_depth = False

        try:
            img_infos = load_nerfstudio_transforms(transforms_path)
        except (IOError, json.JSONDecodeError, KeyError) as e:
            logger.error(f"Could not load or parse transforms for scene '{scene}': {e}. Skipping.")
            continue

        scene_outdir = osp.join(output_dir, scene)
        os.makedirs(scene_outdir, exist_ok=True)
        print('img_infos: ', len(list(img_infos.keys())))

        for rgb_filename, img_info in tqdm(img_infos.items(), desc=f"Processing images in {scene}", leave=False):
            frame_id = img_info["frame_id"]
            
            # Construct full path for RGB image and its corresponding mask.
            # The file_path from transforms.json might be relative, e.g., "images/DSC...".
            # We assume it's just the basename here.
            rgb_path = osp.join(rgb_dir, rgb_filename)
            mask_filename = rgb_filename.replace(".JPG", ".png")
            mask_path = osp.join(mask_dir, mask_filename)

            if not osp.isfile(rgb_path) or not osp.isfile(mask_path):
                logger.warning(f"RGB or mask file missing for image {rgb_path} {mask_path} in {scene}. Skipping.")
                # sys.exit(0)
                continue
            
            out_rgb_path = osp.join(scene_outdir, f"{frame_id}_rgb.jpg")
            out_depth_path = osp.join(scene_outdir, f"{frame_id}_depth.png")
            out_cam_path = osp.join(scene_outdir, f"{frame_id}_cam.npz")
            
            try:
                rgb_img = Image.open(rgb_path)
                mask_img = Image.open(mask_path)
            except IOError as e:
                logger.warning(f"Could not open image/mask for {rgb_filename}: {e}. Skipping. {rgb_path} {mask_path}")
                sys.exit(0)
                continue

            # Since images are pre-undistorted, we go straight to rescaling
            intrinsics_opencv = img_info["intrinsics"]
            # intrinsics_opencv = geometry.colmap_to_opencv_intrinsics(intrinsics_colmap)

            # Rescale to target resolution: width = target_resolution
            # Height will be adjusted to maintain aspect ratio.
            new_w, new_h = target_resolution
            orig_w, orig_h = rgb_img.size
            # new_h = int(orig_h * (new_w / orig_w))

            rgb_rescaled, mask_rescaled, intrinsics_rescaled_opencv = rescale_image_depthmap(
                np.array(rgb_img), np.array(mask_img), intrinsics_opencv, (new_w, new_h)
            )
            
            W, H = rgb_rescaled.size # Note: PIL size is (width, height)
            intrinsics_rescaled_colmap = geometry.opencv_to_colmap_intrinsics(intrinsics_rescaled_opencv)

            rgb_rescaled.save(out_rgb_path, quality=95)
            np.savez(out_cam_path, intrinsics=intrinsics_rescaled_colmap, pose=img_info["cam_to_world"])

            if current_render_depth:
                renderer.viewport_width, renderer.viewport_height = W, H
                
                fx, fy = intrinsics_rescaled_colmap[0, 0], intrinsics_rescaled_colmap[1, 1]
                cx, cy = intrinsics_rescaled_colmap[0, 2], intrinsics_rescaled_colmap[1, 2]
                
                cam = pyrender.camera.IntrinsicsCamera(fx, fy, cx, cy, znear=znear, zfar=zfar)
                
                camera_pose = img_info["cam_to_world"] @ OPENGL_TO_OPENCV
                camera_node = pyrender_scene.add(obj=cam, pose=camera_pose)
                
                depth = renderer.render(pyrender_scene, flags=pyrender.RenderFlags.DEPTH_ONLY)
                pyrender_scene.remove_node(camera_node)
                
                depth_mm = (depth * 1000).astype(np.uint16)
                depth_mm[np.array(mask_rescaled) < 255] = 0
                
                Image.fromarray(depth_mm).save(out_depth_path)
        
        if current_render_depth:
            renderer.delete()
        
        logger.info(f"Finished processing scene {scene}")

    print("All scenes processed.")

if __name__ == "__main__":
    parser = get_parser()
    args = parser.parse_args()    
    process_scenes(
        args.scannetpp_dir,
        args.output_dir,
        args.target_resolution,
        args.render_depth,
        scannetpp_dir2=args.scannetpp_dir2,
    )