#!/usr/bin/env python3
"""
Process HDF5 files and convert data to zarr format.
"""

import argparse
import os
import numpy as np
import zarr
import h5py
from termcolor import cprint
from os import listdir
from os.path import join
from tqdm import tqdm
import re
import transforms3d.quaternions as t3d_quat
from sklearn.cluster import DBSCAN
from dataclasses import dataclass
import fpsample
from typing import List
from realsense_camera import create_point_cloud_from_depth_image, transform_point_cloud, CameraInfo
import sys
sys.path.append('pcd_visualizer')
from pcd_visualizer.pointcloud import pcd_visualizer
import cv2

# Third Party
import torch

from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose
from curobo.types.robot import RobotConfig
from curobo.util_file import get_robot_configs_path, join_path, load_yaml
from curobo.wrap.reacher.ik_solver import IKSolver, IKSolverConfig

np.random.seed(0)

ROBOT_POSITION = np.array([0, -0.65, 0.75])
ROBOT_QUATERNION = np.array([0.707, 0, 0, 0.707]) # qx, qy, qz, qw

# ################################ Hyperparameters for pcd_process ##################################
@dataclass
class PCDProcConfig:
    random_drop_points: int
    outlier_distance: float
    outlier_count: int
    n_points: int
    work_space: List[List[float]]

pcd_config = PCDProcConfig(
    random_drop_points=5000,
    outlier_distance=0.012,
    outlier_count=50,
    n_points=1024,
    work_space=[
        [0.0, 1.0],
        [-0.66, 0.66],
        [-0.02, 0.45]
    ])
# ###############################################################################################

def transform_extrinsic_from_world_to_robot(camera_extrinsic_world):
    """
    Convert camera extrinsic from world coordinate system to robot coordinate system.

    Args:
        camera_extrinsic_world (np.ndarray): Camera extrinsic in world coordinates (3, 4).

    Returns:
        np.ndarray: Camera extrinsic in robot coordinates (3, 4).
    """

    world_R_robot = t3d_quat.quat2mat(ROBOT_QUATERNION)
    world_T_robot = np.eye(4)
    world_T_robot[:3, :3] = world_R_robot
    world_T_robot[:3, 3] = ROBOT_POSITION
    world_T_camera = np.eye(4)
    world_T_camera[:3, :] = camera_extrinsic_world
    robot_T_camera = np.linalg.inv(world_T_robot) @ world_T_camera
    return robot_T_camera[:3, :]

def transform_ee_pose_from_world_to_robot(end_pose):
    """
    Batch convert end-effector poses from world to robot coordinate system.

    Args:
        end_pose (np.ndarray): End-effector pose(s) in world coordinates (T, 7) - [x, y, z, w, x, y, z].

    Returns:
        np.ndarray: End-effector pose(s) in robot coordinates (T, 7) - [x, y, z, w, x, y, z].
    """
    end_pose = np.asarray(end_pose)
    if end_pose.ndim == 1:
        end_pose = end_pose[None, :]
    T = end_pose.shape[0]

    world_R_robot = t3d_quat.quat2mat(ROBOT_QUATERNION)
    world_T_robot = np.eye(4)
    world_T_robot[:3, :3] = world_R_robot
    world_T_robot[:3, 3] = ROBOT_POSITION
    world_T_robot_inv = np.linalg.inv(world_T_robot)
    robot_poses = []
    for i in range(T):
        world_ee_pos = end_pose[i, :3]  # [x, y, z]

        world_ee_quat_wxyz = np.array([end_pose[i, 3:7]])
        world_R_ee = t3d_quat.quat2mat(world_ee_quat_wxyz)
        world_T_ee = np.eye(4)
        world_T_ee[:3, :3] = world_R_ee
        world_T_ee[:3, 3] = world_ee_pos
        robot_T_ee = world_T_robot_inv @ world_T_ee
        robot_pos_ee = robot_T_ee[:3, 3]
        robot_quat_ee_wxyz = t3d_quat.mat2quat(robot_T_ee[:3, :3])

        robot_pose = np.concatenate([robot_pos_ee, robot_quat_ee_wxyz])
        robot_poses.append(robot_pose)
    robot_poses = np.stack(robot_poses, axis=0)
    if robot_poses.shape[0] == 1:
        return robot_poses[0]
    return robot_poses

def pcd_crop(points, endpose, cfg=pcd_config):
    """
    Crop point cloud based on end-effector pose in robot coordinate system.

    Args:
        points (np.ndarray): Point cloud data (N, 6) - [x, y, z, r, g, b].
        endpose (np.ndarray): End-effector pose in robot coordinates (7,) - [x, y, z, w, x, y, z].
        cfg (PCDProcConfig): Configuration parameters.

    Returns:
        np.ndarray: Cropped point cloud.
    """
    WORK_SPACE = cfg.work_space
    WORK_SPACE[0][0] = endpose[0]
    points = points[np.where((points[..., 0] > WORK_SPACE[0][0]) & (points[..., 0] < WORK_SPACE[0][1]) &
                             (points[..., 1] > WORK_SPACE[1][0]) & (points[..., 1] < WORK_SPACE[1][1]) &
                             (points[..., 2] > WORK_SPACE[2][0]) & (points[..., 2] < WORK_SPACE[2][1]))]
    return points

def pcd_cluster(points, cfg=pcd_config):
    """
    Cluster and sample the point cloud.

    Args:
        points (np.ndarray): Point cloud data (N, 6).
        cfg (PCDProcConfig): Configuration parameters.

    Returns:
        np.ndarray: Clustered and sampled point cloud.
    """
    RANDOM_DROP_POINTS = cfg.random_drop_points
    OUTLIER_DISTANCE = cfg.outlier_distance
    OUTLIER_COUNT = cfg.outlier_count
    N_POINTS = cfg.n_points

    points = points[np.random.choice(points.shape[0], RANDOM_DROP_POINTS, replace=False)]
    points_xyz = points[..., :3]

    bdscan = DBSCAN(eps=OUTLIER_DISTANCE, min_samples=10)
    labels = bdscan.fit_predict(points_xyz)

    unique_labels, counts = np.unique(labels, return_counts=True)
    outlier_labels = unique_labels[counts < OUTLIER_COUNT]
    if -1 not in outlier_labels:
        outlier_labels = np.append(outlier_labels, -1)

    points = points[~np.isin(labels, outlier_labels)]
    points_xyz = points[..., :3]

    sample_indices = fpsample.bucket_fps_kdline_sampling(points_xyz, N_POINTS, h=3)
    points = points[sample_indices]

    return points

def init_ik_solver():
    """
    Initialize the inverse kinematics solver.

    Returns:
        IKSolver: Initialized IK solver.
    """
    tensor_args = TensorDeviceType()
    config_file = load_yaml(join_path(get_robot_configs_path(), "franka.yml"))
    urdf_file = config_file["robot_cfg"]["kinematics"]["urdf_path"]
    base_link = config_file["robot_cfg"]["kinematics"]["base_link"]
    ee_link = config_file["robot_cfg"]["kinematics"]["ee_link"]
    robot_cfg = RobotConfig.from_basic(urdf_file, base_link, ee_link, tensor_args)
    ik_config = IKSolverConfig.load_from_robot_config(
        robot_cfg,
        num_seeds=20,
        tensor_args=tensor_args,
        use_cuda_graph=True,
    )
    ik_solver = IKSolver(ik_config)
    return ik_solver

def process_hdf5_file(file_path, output_dir, visualize=True):
    """
    Process a single HDF5 file.

    Args:
        file_path (str): Path to the HDF5 file.
        output_dir (str): Output directory.
        visualize (bool): Whether to generate visualization.

    Returns:
        dict: Processed data.
    """
    cprint(f"Processing file: {file_path}", "green")
    ik_solver = init_ik_solver()

    with h5py.File(file_path, 'r') as f:
        cprint("Generating point cloud from depth image", "yellow")
        depth_data = f['observation/head_camera/depth'][:]
        scale = 1000.0
        intrinsic_data = f['observation/head_camera/intrinsic_cv'][:]
        extrinsic_data = f['observation/head_camera/extrinsic_cv'][:]

        raw_rgb_data = f['observation/head_camera/rgb'][:]
        rgb_data = decode_rgb_data(raw_rgb_data)
        T = depth_data.shape[0]
        point_cloud_data = []

        joint_state = f['joint_action/left_arm'][:]
        joint_state = torch.from_numpy(joint_state).to(ik_solver.tensor_args.device).float().reshape(-1, 7)
        kin_state = ik_solver.fk(joint_state)
        robot_pose_ee = np.concatenate([kin_state.ee_position.cpu().numpy(), kin_state.ee_quaternion.cpu().numpy()], axis=1)

        gripper = f['endpose/left_gripper'][:]
        state = np.concatenate([robot_pose_ee, gripper.reshape(-1, 1)], axis=1)

        for t in tqdm(range(T), desc="Generating point cloud"):
            depth = depth_data[t]
            intrinsic = intrinsic_data[t]
            world_T_camera = extrinsic_data[t]
            world_T_camera[1, 3], world_T_camera[2, 3] = world_T_camera[2, 3], world_T_camera[1, 3]

            robot_T_camera = transform_extrinsic_from_world_to_robot(world_T_camera)
            
            camera = CameraInfo(
                width=depth.shape[1],
                height=depth.shape[0],
                fx=intrinsic[0, 0],
                fy=intrinsic[1, 1],
                cx=intrinsic[0, 2],
                cy=intrinsic[1, 2],
                scale=scale
            )

            point_xyz = create_point_cloud_from_depth_image(depth, camera, organized=False)
            point_xyz = transform_point_cloud(point_xyz, robot_T_camera, format='3x4')

            valid_depth_mask = (depth.reshape(-1) > 0) & (depth.reshape(-1) < 2 * scale)
            point_xyz = point_xyz[valid_depth_mask]

            rgb_image = rgb_data[t]
            rgb_flat = rgb_image.reshape(-1, 3)[valid_depth_mask]
            colors = rgb_flat

            point_cloud = np.concatenate([point_xyz, colors], axis=1)

            if len(point_cloud) < 1024:
                raise ValueError(f"Not enough points in point cloud: {len(point_cloud)}")
            point_cloud = pcd_crop(point_cloud, robot_pose_ee[t])
            point_cloud = pcd_cluster(point_cloud)

            point_cloud_data.append(point_cloud)

            if visualize and (t == 1 or t == 100):
                viz_dir = os.path.join("data/saw_simulation", "visualization")
                os.makedirs(viz_dir, exist_ok=True)

                base_name = os.path.splitext(os.path.basename(file_path))[0]

                html_path = os.path.join(viz_dir, f"{base_name}_frame{t}.html")
                vis = pcd_visualizer()
                vis.save_visualization_to_file(point_cloud, file_path=html_path)
                
        point_cloud_data = np.array(point_cloud_data)

        if state.shape[0] > 1:
            action = state[1:] - state[:-1]
            state = state[1:]
            point_cloud_data = point_cloud_data[1:]
        else:
            action = np.zeros_like(state)

        data = {
            'point_cloud': point_cloud_data.astype(np.float32),
            'state': state.astype(np.float32),
            'action': action.astype(np.float32)
        }

        base_name = os.path.splitext(os.path.basename(file_path))[0]
        output_path = os.path.join(output_dir, f"{base_name}.pkl")

        import pickle
        with open(output_path, 'wb') as f:
            pickle.dump(data, f)

        cprint(f"Saved data to: {output_path}", "green")
        cprint(f"Point cloud shape: {point_cloud_data.shape}", "cyan")
        cprint(f"Robot state shape: {state.shape}", "cyan")
        cprint(f"Action shape: {action.shape}", "cyan")

        return data


def project_points_to_image(point_cloud, K, R=np.eye(3), T=np.zeros(3)):
    points_3d = point_cloud[:, :3]
    points_3d = (R @ points_3d.T).T + T
    
    fx, fy = K[0, 0], K[1, 1]
    cx, cy = K[0, 2], K[1, 2]
    
    X, Y, Z = points_3d[:, 0], points_3d[:, 1], points_3d[:, 2]
    u = np.round(fx * X / Z + cx)
    v = np.round(fy * Y / Z + cy)

    return np.stack([u, v], axis=1).astype(np.int32)


def main():
    """
    Main processing function.
    """
    input_dir = "data/saw_simulation"
    output_dir = "data/datasets/sim_source/beat"

    os.makedirs(output_dir, exist_ok=True)

    hdf5_files = ["episode0.hdf5", "episode1.hdf5", "episode2.hdf5", "episode3.hdf5", "episode4.hdf5"]

    if not hdf5_files:
        cprint("No HDF5 files found", "red")
        return

    for file_name in tqdm(hdf5_files, desc="Processing HDF5 files"):
        file_path = os.path.join(input_dir, file_name)
        process_hdf5_file(file_path, output_dir)

    cprint("All files processed", "green")

def decode_rgb_data(rgb_data):
    """
    Decode rgb_data read from hdf5 (may be JPEG byte stream or raw array).

    Args:
        rgb_data (np.ndarray): Raw RGB data from hdf5.

    Returns:
        np.ndarray: Standard numpy array (N, H, W, 3, uint8).
    """
    if rgb_data.dtype.kind in {'S', 'O'} or rgb_data.dtype == object:
        frames = []
        for i in range(len(rgb_data)):
            img_bytes = rgb_data[i]
            if isinstance(img_bytes, np.ndarray):
                img_bytes = img_bytes.tobytes()
            img_bytes = img_bytes.rstrip(b'\0')
            img = cv2.imdecode(np.frombuffer(img_bytes, np.uint8), cv2.IMREAD_COLOR)
            if img is None:
                print(f"Frame {i} decode failed, skipping")
                continue
            frames.append(img)
        if not frames:
            raise ValueError("No valid frames, cannot decode to numpy array")
        arr = np.stack(frames, axis=0)
        return arr
    else:
        arr = rgb_data
        if arr.dtype != np.uint8:
            arr = (arr * 255).clip(0, 255).astype(np.uint8)
        if arr.shape[-1] != 3:
            raise ValueError("Decoded array last dimension is not 3 channels")
        return arr

if __name__ == '__main__':
    main() 