#!/usr/bin/env python3
"""
Preprocess the DTU MVS dataset.

This script processes images from the DTU dataset. It assumes the camera
parameters from 'Cameras_train' already match the depth map resolution.
It scales and center-crops the source RGB image to match the dimensions
of the corresponding depth map and saves the processed outputs.

Usage:
    python preprocess_dtu.py --dtu_dir /path/to/dtu_training --output_dir /path/to/processed_dtu
"""
import sys
sys.path.append('D:/codes/working/3D/a3R')
sys.path.append('D:/codes/working/3D/a3R/src')
import os
import os.path as osp
import re
import sys
from tqdm import tqdm
import numpy as np
import argparse
from PIL import Image

from src.dust3r.datasets.base.base_multiview_dataset import cropping
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2


def get_parser():
    """Sets up the argument parser."""
    parser = argparse.ArgumentParser(
        description="Preprocess the DTU dataset by resizing images to match "
        "depth maps, assuming intrinsics are already correct."
    )
    parser.add_argument(
        "--dtu_dir",
        required=True,
        help="Root directory of the DTU dataset (e.g., 'dtu_training').",
    )
    parser.add_argument(
        "--output_dir",
        default="data/dtu_processed",
        help="Output directory for processed DTU data.",
    )
    return parser


def main(dtu_root, output_dir):
    """Main processing function."""
    print(">> Listing all sequences from the DTU dataset")
    rectified_dir = osp.join(dtu_root, "Rectified")
    sequences = [f for f in os.listdir(rectified_dir) if osp.isdir(osp.join(rectified_dir, f))]

    if not sequences:
        print(f"!! Did not find any sequences at {rectified_dir}")
        return
    print(f"   (found {len(sequences)} sequences)")

    for seq in tqdm(sequences, desc="Processing Scenes"):
        scene_outdir = osp.join(output_dir, seq)
        os.makedirs(scene_outdir, exist_ok=True)

        img_dir = osp.join(rectified_dir, seq)
        img_files = sorted([f for f in os.listdir(img_dir) if f.endswith(".png")])

        for img_file in tqdm(img_files, leave=False, desc=f"Views in {seq}"):
            process_single_view(dtu_root, seq, img_file, scene_outdir)
            
    print(f"\n>> Done, saved everything in {output_dir}/")

def scale_and_center_crop_image_only(image, target_dims):
    """
    Scales and center-crops an image to target dimensions.

    Args:
        image (np.array): The source RGB image.
        target_dims (tuple): The target dimensions as (H, W).

    Returns:
        np.array: The processed image.
    """
    target_h, target_w = target_dims
    assert target_w > target_h
    assert target_w == 160
    assert target_h == 128
    source_h, source_w, _ = image.shape

    # Maintain aspect ratio by scaling the larger dimension to match the target.
    # scale = max(target_w / source_w, target_h / source_h)
    scale = 4
    # print(scale, int(source_w * scale), int(source_h * scale))

    # Calculate intermediate scaled dimensions
    scaled_w, scaled_h = int(source_w / scale), int(source_h / scale)
    assert scaled_w == target_w, scaled_h == target_h
    # print(scaled_w, scaled_h)

    # Resize image
    image_scaled = cv2.resize(image, (scaled_w, scaled_h), interpolation=cv2.INTER_LINEAR)

    # Calculate crop offsets
    # crop_x = (scaled_w - target_w) // 2
    # crop_y = (scaled_h - target_h) // 2
    # print(scaled_w, scaled_h, crop_x, crop_y)

    # Perform crop
    # image_cropped = image_scaled[crop_y : crop_y + target_h, crop_x : crop_x + target_w]

    return image_scaled

def process_single_view(dtu_root, scene_id, img_filename, out_dir):
    """Processes a single view from a DTU scene."""
    img_name_base = img_filename.replace(".png", "")

    out_cam_path = osp.join(out_dir, f"{img_name_base}_cam.npz")
    if osp.isfile(out_cam_path):
        return  # Already processed

    view_match = re.search(r"rect_(\d+)_", img_filename)
    if not view_match:
        return
    view_idx = int(view_match.group(1)) - 1

    # Construct paths using the pre-processed camera files
    rgb_path = osp.join(dtu_root, "Rectified", scene_id, img_filename)
    depth_path = osp.join(dtu_root, "Depths", scene_id, f"depth_map_{view_idx:04d}.pfm")
    # --- Use the camera files that are assumed to match the depth map ---
    cam_path = osp.join(dtu_root, "Cameras/train", f"{view_idx:08d}_cam.txt")

    if not all(osp.exists(p) for p in [rgb_path, depth_path, cam_path]):
        return

    # Load all data
    try:
        intrinsics, T_cam2world = _load_pose(cam_path, ret_44=True)
        color_image = cv2.cvtColor(cv2.imread(rgb_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        depthmap = load_pfm_file(depth_path)
        depthmap[~np.isfinite(depthmap)] = 0
    except Exception as e:
        print(f"Error loading data for {rgb_path}: {e}")
        return

    # --- Process the RGB image ONLY to match the depth map size ---
    try:
        target_dims = depthmap.shape[:2]  # Get (H, W) from depth map
        image_out = scale_and_center_crop_image_only(color_image, target_dims)
    except Exception as e:
        print(f"Error processing {img_filename}: {e}")
        sys.exit(0)
    # print(intrinsics)
    intrinsics, T_cam2world = cropping.get_center_camera(intrinsics, T_cam2world, depthmap=depthmap)

    # --- Save processed data ---
    out_rgb_path = osp.join(out_dir, f"{img_name_base}_rgb.png")
    out_depth_path = osp.join(out_dir, f"{img_name_base}_depth.exr")

    Image.fromarray(image_out).save(out_rgb_path)
    cv2.imwrite(out_depth_path, depthmap.astype(np.float32))

    # Save camera parameters without modification
    np.savez(
        out_cam_path,
        intrinsics=intrinsics.astype(np.float32),
        pose=T_cam2world.astype(np.float32),
    )


def _load_pose(path, ret_44=False):
    f = open(path)
    RT = np.loadtxt(f, skiprows=1, max_rows=4, dtype=np.float32)
    assert RT.shape == (4, 4)
    RT = np.linalg.inv(RT)  # world2cam to cam2world

    K = np.loadtxt(f, skiprows=2, max_rows=3, dtype=np.float32)
    assert K.shape == (3, 3)

    if ret_44:
        return K, RT
    return K, RT[:3, :3], RT[:3, 3]


def load_pfm_file(file_path):
    """Loads a PFM file and returns it as a numpy array."""
    with open(file_path, "rb") as file:
        header = file.readline().decode("UTF-8").strip()

        if header == "PF":
            is_color = True
        elif header == "Pf":
            is_color = False
        else:
            raise ValueError("The provided file is not a valid PFM file.")

        # dimensions = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("UTF-8"))
        dimensions_line = file.readline().decode("UTF-8").strip()
        dimensions = re.match(r"^(\d+)\s+(\d+)$", dimensions_line)

        if dimensions:
            img_width, img_height = map(int, dimensions.groups())
        else:
            raise ValueError("Invalid PFM header format.")

        endian_scale = float(file.readline().decode("UTF-8").strip())
        if endian_scale < 0:
            dtype = "<f"  # little-endian
        else:
            dtype = ">f"  # big-endian

        try:
            data_buffer = file.read()
            img_data = np.frombuffer(data_buffer, dtype=dtype)
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
            sys.exit(0)


        if is_color:
            img_data = np.reshape(img_data, (img_height, img_width, 3))
        else:
            img_data = np.reshape(img_data, (img_height, img_width))

        img_data = cv2.flip(img_data, 0)

    return img_data


if __name__ == "__main__":
    parser = get_parser()
    args = parser.parse_args()
    main(args.dtu_dir, args.output_dir)


