#!/usr/bin/env python3
"""
Preprocess the BlendedMVS dataset.

This script processes images from the BlendedMVS dataset, converting camera parameters,
resizing images, and saving processed RGB images, depth maps, and camera metadata into
an output directory structured to match the Hypersim processed format.

Usage:
    python preprocess_blendedmvs.py --blendedmvs_dir /path/to/blendedMVS \
                                    --output_dir /path/to/processed_blendedmvs
"""
import os
import os.path as osp
import re
import shutil
import sys
from tqdm import tqdm
import numpy as np
import argparse

os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
import cv2

from utils.parallel import parallel_threads
from utils import cropping  # noqa
from utils.cropping import crop_resize_if_necessary


def get_parser():
    parser = argparse.ArgumentParser(
        description="Preprocess the BlendedMVS dataset by converting camera parameters, "
        "resizing images, and saving processed outputs in Hypersim format."
    )
    parser.add_argument(
        "--blendedmvs_dir", 
        required=True,
        help="Root directory of the BlendedMVS dataset."
    )
    parser.add_argument(
        "--output_dir", 
        default="data/blendedmvs_processed",
        help="Output directory for processed BlendedMVS data."
    )
    return parser


def main(db_root, output_dir):
    print(">> Listing all sequences")
    sequences = [f for f in os.listdir(db_root) if len(f) == 24]
    # should find 502 scenes
    assert sequences, f"did not found any sequences at {db_root}"
    print(f"   (found {len(sequences)} sequences)")

    for i, seq in enumerate(tqdm(sequences)):
        scene_outdir = osp.join(output_dir, seq)
        os.makedirs(scene_outdir, exist_ok=True)

        # Process each sequence
        root = osp.join(db_root, seq)
        cam_dir = osp.join(root, "cams")
        # Get all camera views (excluding pair files)
        cam_files = [f for f in os.listdir(cam_dir) if not f.startswith("pair")]
        
        # Process all images in this sequence
        for i, cam_file in enumerate(tqdm(cam_files, leave=False)):
            img_name = cam_file[:-8]  # Remove "_cam.txt" suffix
            process_single_view(root, img_name, scene_outdir, i)
        # shutil.rmtree(root)

    print(f">> Done, saved everything in {output_dir}/")


def process_single_view(root, img_name, out_dir, frame_id):
    rgb_path = osp.join(root, "blended_images", img_name + ".jpg")
    if not os.path.exists(rgb_path):
        return False
    # Load data
    intrinsics, T_cam2world = _load_pose(
        osp.join(root, "cams", img_name + "_cam.txt"), ret_44=True
    )
    
    color_image = cv2.cvtColor(
        cv2.imread(rgb_path, cv2.IMREAD_COLOR),
        cv2.COLOR_BGR2RGB,
    )
    depthmap = load_pfm_file(osp.join(root, "rendered_depth_maps", img_name + ".pfm"))

    # Resize image and update intrinsics
    H, W = color_image.shape[:2]
    assert H * 4 == W * 3
    # print("original size", H, W, color_image.shape)
    
    try:
        image, depth, intrinsics_out = crop_resize_if_necessary(
            color_image, depthmap, intrinsics, (512, 384), crop=False
        )
    except Exception as e:
        print(f"Error processing {img_name}: {e}")
        return False
    # print("output size", image.size, np.array(image).shape)
    # sys.exit(0)
    # Save in Hypersim format
    out_rgb_path = osp.join(out_dir, f"{img_name}_rgb.png")
    out_depth_path = osp.join(out_dir, f"{img_name}_depth.exr")
    out_cam_path = osp.join(out_dir, f"{img_name}_cam.npz")
    
    # Convert PIL image to numpy and save
    image.save(out_rgb_path)
    cv2.imwrite(out_depth_path, depth)
    # Save camera parameters
    np.savez(
        out_cam_path,
        intrinsics=intrinsics_out,
        pose=T_cam2world.astype(np.float32),
    )


def _load_pose(path, ret_44=False):
    f = open(path)
    RT = np.loadtxt(f, skiprows=1, max_rows=4, dtype=np.float32)
    assert RT.shape == (4, 4)
    RT = np.linalg.inv(RT)  # world2cam to cam2world

    K = np.loadtxt(f, skiprows=2, max_rows=3, dtype=np.float32)
    assert K.shape == (3, 3)

    if ret_44:
        return K, RT
    return K, RT[:3, :3], RT[:3, 3]


def load_pfm_file(file_path):
    with open(file_path, "rb") as file:
        header = file.readline().decode("UTF-8").strip()

        if header == "PF":
            is_color = True
        elif header == "Pf":
            is_color = False
        else:
            raise ValueError("The provided file is not a valid PFM file.")

        # dimensions = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("UTF-8"))
        dimensions_line = file.readline().decode("UTF-8").strip()
        dimensions = re.match(r"^(\d+)\s+(\d+)$", dimensions_line)

        if dimensions:
            img_width, img_height = map(int, dimensions.groups())
        else:
            raise ValueError("Invalid PFM header format.")

        endian_scale = float(file.readline().decode("UTF-8").strip())
        if endian_scale < 0:
            dtype = "<f"  # little-endian
        else:
            dtype = ">f"  # big-endian

        try:
            data_buffer = file.read()
            img_data = np.frombuffer(data_buffer, dtype=dtype)
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
            sys.exit(0)


        if is_color:
            img_data = np.reshape(img_data, (img_height, img_width, 3))
        else:
            img_data = np.reshape(img_data, (img_height, img_width))

        img_data = cv2.flip(img_data, 0)

    return img_data


if __name__ == "__main__":
    parser = get_parser()
    args = parser.parse_args()
    main(args.blendedmvs_dir, args.output_dir)