#!/usr/bin/env python3
"""
Preprocess the WildRGB-D dataset.

This script reads camera parameters from metadata files, processes RGB images,
depth maps, and camera poses, applies cropping and rescaling, and saves the
processed data into an output directory. Processing is done per scene and per
frame.

Usage:
    python preprocess_wildrgbd2.py --wildrgbd_dir /path/to/wildrgbd \
                                  --output_dir /path/to/processed_wildrgbd \
                                  --img_size 512
"""
import argparse
import json
import os
import multiprocessing
import sys

import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
from utils.cropping import crop_resize_if_necessary
from utils.rotate import rotate

# Ensure OpenCV supports necessary formats.
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"

#  find -type f -name "???[0-9]?\.png" ! -name "???[0,2,4,6,8]0\.png" | wc -l
#  find -type f -name "???[0,2,4,6,8]0\.png" | wc -l
# r"find -type f -name "???[0-9]?\.png" ! -name "???[0,2,4,6,8]0\.png" -exec rm -f {} +"
def get_parser():
    parser = argparse.ArgumentParser(
        description="Preprocess the WildRGB-D dataset by processing RGB images, "
        "depth maps, and camera parameters. "
        "find -type -exec to retain 1 in 20 "
        "python preprocess_wildrgbd2.py --wildrgbd_dir /lc/data/3D/wildrgbd/wildrgbd/original --output_dir /lc/data/3D/wildrgbd/wildrgbd/processed"
    )
    parser.add_argument(
        "--wildrgbd_dir",
        default="/lc/data/3D/wildrgbd/original",
        help="Root directory of the WildRGB-D dataset.",
    )
    parser.add_argument(
        "--output_dir",
        default="/lc/data/3D/wildrgbd/scalerotate",
        # default="/lc/data/3D/wildrgbd/rotatescale",
        help="Output directory for processed WildRGB-D data.",
    )
    parser.add_argument(
        "--img_size",
        type=int,
        nargs=2,
        default=(384, 512),
        # default=(512, 384),
        # default=(476, 636),
        # default=(636, 476),
        help="Target size for the maximum dimension of processed images.",
    )
    return parser


def process_scene(args):
    rootdir, outdir, img_size, category, scene_name = args
    scene_outdir = os.path.join(outdir, category, scene_name)
    os.makedirs(scene_outdir, exist_ok=True)
    scene_dir = os.path.join(rootdir, category, 'scenes',scene_name)

    # Read metadata.
    metadata_file = os.path.join(scene_dir, "metadata")
    if not os.path.exists(metadata_file):
        print(f"Metadata file {metadata_file} does not exist.")
        return
    with open(metadata_file, "r") as f:
        metadata = json.load(f)

    # Extract camera intrinsics.
    K = np.array(metadata["K"]).reshape(3, 3).T
    fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
    w, h = metadata["w"], metadata["h"]
    camera_intrinsics = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32)

    # Read camera poses.
    camera_to_world_path = os.path.join(scene_dir, "cam_poses.txt")
    if not os.path.exists(camera_to_world_path):
        print(f"Camera poses file {camera_to_world_path} does not exist.")
        return
    camera_to_world_content = np.genfromtxt(camera_to_world_path)
    # frame_ids = camera_to_world_content[:, 0].astype(int)
    camera_to_world = camera_to_world_content[:, 1:].reshape(-1, 4, 4).astype(np.float32)

    frame_ids = sorted([int(f.removesuffix('.png')) for f in os.listdir(os.path.join(scene_dir, "rgb")) if f.endswith(".png")])

    # Process each frame.
    for frame_id in tqdm(frame_ids, desc=f"Processing {category}/{scene_name}"):
        rgb_path = os.path.join(scene_dir, "rgb", f"{frame_id:05d}.png")
        depth_path = os.path.join(scene_dir, "depth", f"{frame_id:05d}.png")
        mask_path = os.path.join(scene_dir, "masks", f"{frame_id:05d}.png")

        if not (os.path.exists(rgb_path) and os.path.exists(depth_path) and os.path.exists(mask_path)):
            print(rgb_path, depth_path, mask_path)
            print(f"Missing files for frame {frame_id} in {scene_name}.")
            sys.exit(0)
            continue

        # Load images and depth maps.
        input_rgb_image = Image.open(rgb_path).convert("RGB")
        input_depthmap = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED).astype(np.float64)
        input_mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.0
        depth_mask = np.stack((input_depthmap, input_mask), axis=-1)
        H, W = input_depthmap.shape
        # print("original size H, W:", H, W, img_size, input_rgb_image.size)
        assert depth_mask.shape == (H, W, 2)
        # print("intrinsics:", camera_intrinsics)
        # print("pose", camera_to_world[frame_id])
        intrinsics_new = camera_intrinsics.copy()
        camera_pose = camera_to_world[frame_id]
        # print(depth_mask.shape)

        # input_rgb_image, depth_mask, intrinsics_new, camera_pose, mask_rot = rotate(input_rgb_image, depth_mask, intrinsics_new, camera_pose, mask=None)

        try:
            input_rgb_image, depth_mask, intrinsics_new = crop_resize_if_necessary(
                input_rgb_image, depth_mask, intrinsics_new, img_size, info=frame_id, crop=False
            )
        except Exception as e:
            print(f"Error processing {frame_id}: {e}")
            sys.exit(0)
            # return False

        # if H > W:
        # print("rotate ccw 90")
        # print("resized", input_rgb_image.size)
        input_rgb_image, depth_mask, intrinsics_new, camera_pose, mask_rot = rotate(input_rgb_image, depth_mask, intrinsics_new, camera_pose, mask=None)
        # print("new intrinsics:", intrinsics_new)
        # print("new pose:", camera_pose)
        # print(depth_mask.shape)

        # print("output size", input_rgb_image.shape)
        # print(depth_mask.shape)
        # sys.exit(0)
        input_depthmap = depth_mask[:, :, 0]
        input_mask = depth_mask[:, :, 1]

        # Save processed outputs.
        out_rgb_path = os.path.join(scene_outdir, f"{frame_id:06d}_rgb.png")
        out_depth_path = os.path.join(scene_outdir, f"{frame_id:06d}_depth.png")
        out_mask_path = os.path.join(scene_outdir, f"{frame_id:06d}_mask.png")
        out_cam_path = os.path.join(scene_outdir, f"{frame_id:06d}_cam.npz")

        os.makedirs(os.path.dirname(out_rgb_path), exist_ok=True)
        if not isinstance(input_rgb_image, Image.Image):
            input_rgb_image = Image.fromarray(input_rgb_image)
        input_rgb_image.save(out_rgb_path)
        cv2.imwrite(out_depth_path, input_depthmap.astype(np.uint16))
        cv2.imwrite(out_mask_path, (input_mask * 255).astype(np.uint8))
        np.savez(
            out_cam_path,
            intrinsics=intrinsics_new.astype(np.float32),
            pose=camera_pose.astype(np.float32),
        )


def main():
    parser = get_parser()
    args = parser.parse_args()

    rootdir = args.wildrgbd_dir
    outdir = args.output_dir
    img_size = args.img_size
    os.makedirs(outdir, exist_ok=True)

    # Collect all categories and scenes.
    categories = sorted(
        [
            dirname
            for dirname in os.listdir(rootdir)
            if os.path.isdir(os.path.join(rootdir, dirname))
        ]
    )

    # Prepare arguments for multiprocessing.
    # scene_args = []
    for category in categories:
        category_dir = os.path.join(rootdir, category, 'scenes')
        scenes = sorted(
            [
                dirname
                for dirname in os.listdir(category_dir)
                if os.path.isdir(os.path.join(category_dir, dirname))
            ]
        )
        for i, scene in enumerate(tqdm(scenes)):
            # scene_args.append((rootdir, outdir, img_size, category, scene))
            process_scene((rootdir, outdir, img_size, category, scene))
            # break

    # Process scenes in parallel.
    # with multiprocessing.Pool() as pool:
    #     pool.map(process_scene, scene_args)


if __name__ == "__main__":
    main()