#!/usr/bin/env python3
"""
Preprocess the DTU MVS dataset.

This script processes images from the DTU dataset. It iterates through each
depth map, finds the corresponding camera and all 7 lighting-condition images.
It scales the source RGB images to match the depth map resolution and saves
the processed outputs with a clean naming convention.

Usage:
    python preprocess_dtu.py --dtu_dir /path/to/dtu_training --output_dir /path/to/processed_dtu
"""
import sys
sys.path.append('D:/codes/working/3D/a3R')
sys.path.append('D:/codes/working/3D/a3R/src')
import os
import os.path as osp
import re
import sys
from tqdm import tqdm
import numpy as np
import argparse
from PIL import Image

from src.dust3r.datasets.base.base_multiview_dataset import cropping
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2


def get_parser():
    """Sets up the argument parser."""
    parser = argparse.ArgumentParser(
        description="Preprocess the DTU dataset by resizing images to match "
        "depth maps, assuming intrinsics are already correct."
    )
    parser.add_argument(
        "--dtu_dir",
        required=True,
        help="Root directory of the DTU dataset (e.g., 'dtu_training').",
    )
    parser.add_argument(
        "--output_dir",
        default="data/dtu_processed",
        help="Output directory for processed DTU data.",
    )
    return parser


def main(dtu_root, output_dir):
    """Main processing function."""
    print(">> Listing all sequences from the DTU dataset")
    # Iterate through scenes by looking in the Depths directory
    scenes_dir = osp.join(dtu_root, "Depths")
    sequences = [f for f in os.listdir(scenes_dir) if osp.isdir(osp.join(scenes_dir, f))]

    if not sequences:
        print(f"!! Did not find any sequences at {scenes_dir}")
        return
    print(f"   (found {len(sequences)} sequences)")

    for seq in tqdm(sequences, desc="Processing Scenes"):
        scene_outdir = osp.join(output_dir, seq)
        os.makedirs(scene_outdir, exist_ok=True)

        # Iterate over depth files instead of RGB images
        depth_dir = osp.join(scenes_dir, seq)
        depth_files = sorted([f for f in os.listdir(depth_dir) if f.endswith(".pfm")])

        for depth_file in tqdm(depth_files, leave=False, desc=f"Views in {seq}"):
            # New processing function that handles one depth -> many images
            process_single_depth(dtu_root, seq, depth_file, scene_outdir)

    print(f"\n>> Done, saved everything in {output_dir}/")


def scale_and_center_crop_image_only(image, target_dims):
    """
    Scales an image to target dimensions.
    This implementation assumes the source image has dimensions that are a
    fixed multiple of the target dimensions.
    """
    target_h, target_w = target_dims
    assert target_w > target_h
    assert target_w == 160
    assert target_h == 128
    source_h, source_w, _ = image.shape

    # The original script assumes a hardcoded scale factor of 4.
    # This implies source images (e.g., 640x512) are 4x the depth maps (160x128).
    scale = 4

    # Calculate intermediate scaled dimensions
    scaled_w, scaled_h = int(source_w / scale), int(source_h / scale)
    assert scaled_w == target_w, scaled_h == target_h
    # print(scaled_w, scaled_h)

    # Resize image
    image_scaled = cv2.resize(image, (scaled_w, scaled_h), interpolation=cv2.INTER_LINEAR)

    # Calculate crop offsets
    # crop_x = (scaled_w - target_w) // 2
    # crop_y = (scaled_h - target_h) // 2
    # print(scaled_w, scaled_h, crop_x, crop_y)

    # Perform crop
    # image_cropped = image_scaled[crop_y : crop_y + target_h, crop_x : crop_x + target_w]

    return image_scaled


def process_single_depth(dtu_root, scene_id, depth_filename, out_dir):
    """Processes a single depth map and its corresponding 7 light condition images."""
    # Extract view index from depth filename, e.g., "depth_map_0048.pfm" -> 48
    view_match = re.search(r"depth_map_(\d{4})\.pfm", depth_filename)
    if not view_match:
        return
    view_idx = int(view_match.group(1))  # This is the 0-based view index

    # Define output paths based on the new naming convention
    out_cam_path = osp.join(out_dir, f"{view_idx:04d}_cam.npz")
    out_depth_path = osp.join(out_dir, f"{view_idx:04d}_depth.exr")

    # If the main files are already processed, assume all 7 images are done and skip
    if osp.isfile(out_cam_path) and osp.isfile(out_depth_path):
        return

    # --- Construct input paths ---
    depth_path = osp.join(dtu_root, "Depths", scene_id, depth_filename)
    cam_path = osp.join(dtu_root, "Cameras/train", f"{view_idx:08d}_cam.txt")

    if not all(osp.exists(p) for p in [depth_path, cam_path]):
        return

    # --- Load single depth and camera data ---
    try:
        intrinsics, T_cam2world = _load_pose(cam_path, ret_44=True)
        depthmap = load_pfm_file(depth_path)
        depthmap[~np.isfinite(depthmap)] = 0
    except Exception as e:
        print(f"Error loading depth/camera data for {depth_path}: {e}")
        return

    target_dims = depthmap.shape[:2]

    # --- Process and Save Camera and Depth Data ONCE ---
    try:
        cv2.imwrite(out_depth_path, depthmap.astype(np.float32))
        intrinsics, T_cam2world = cropping.get_center_camera(intrinsics, T_cam2world, depthmap=depthmap)
        np.savez(
            out_cam_path,
            intrinsics=intrinsics.astype(np.float32),
            pose=T_cam2world.astype(np.float32),
        )
    except Exception as e:
        print(f"Error saving depth/camera for view {view_idx} in {scene_id}: {e}")
        return

    # --- Process and save each of the 7 RGB images ---
    img_dir = osp.join(dtu_root, "Rectified", scene_id)

    for light_idx in range(7):
        # Image filenames in DTU are 1-based (e.g., rect_049_...).
        # We construct the prefix to find the correct file.
        img_filename = f"rect_{view_idx + 1:03d}_{light_idx}_r5000.png"
        rgb_path = osp.join(img_dir, img_filename)

        if not osp.exists(rgb_path):
            print(f'{rgb_path} not exists')
            continue

        try:
            color_image = cv2.cvtColor(cv2.imread(rgb_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
            image_out = scale_and_center_crop_image_only(color_image, target_dims)
            out_rgb_path = osp.join(out_dir, f"{view_idx:04d}_{light_idx}_rgb.png")
            Image.fromarray(image_out).save(out_rgb_path)
        except Exception as e:
            print(f"Error processing {rgb_path}: {e}")
            continue


def _load_pose(path, ret_44=False):
    with open(path) as f:
        RT = np.loadtxt(f, skiprows=1, max_rows=4, dtype=np.float32)
        K = np.loadtxt(f, skiprows=2, max_rows=3, dtype=np.float32)

    assert RT.shape == (4, 4)
    RT = np.linalg.inv(RT)  # world2cam to cam2world

    assert K.shape == (3, 3)

    if ret_44:
        return K, RT
    return K, RT[:3, :3], RT[:3, 3]


def load_pfm_file(file_path):
    """Loads a PFM file and returns it as a numpy array."""
    with open(file_path, "rb") as file:
        header = file.readline().decode("UTF-8").strip()

        if header == "PF":
            is_color = True
        elif header == "Pf":
            is_color = False
        else:
            raise ValueError("The provided file is not a valid PFM file.")

        dimensions_line = file.readline().decode("UTF-8").strip()
        dimensions = re.match(r"^(\d+)\s+(\d+)$", dimensions_line)

        if dimensions:
            img_width, img_height = map(int, dimensions.groups())
        else:
            raise ValueError("Invalid PFM header format.")

        endian_scale = float(file.readline().decode("UTF-8").strip())
        dtype = "<f" if endian_scale < 0 else ">f"

        try:
            data_buffer = file.read()
            img_data = np.frombuffer(data_buffer, dtype=dtype)
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
            sys.exit(0)


        if is_color:
            img_data = np.reshape(img_data, (img_height, img_width, 3))
        else:
            img_data = np.reshape(img_data, (img_height, img_width))

        img_data = cv2.flip(img_data, 0)
    return img_data


if __name__ == "__main__":
    parser = get_parser()
    args = parser.parse_args()
    main(args.dtu_dir, args.output_dir)