import pickle, os
import numpy as np
import pdb
from copy import deepcopy
import zarr
import shutil
import argparse
import yaml
import cv2
import h5py
import logging
from pathlib import Path

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def decode_rgb_data(encoded_data):
    """Decode JPEG-encoded RGB data back to numpy arrays"""
    try:
        # Remove padding (null bytes) and decode
        jpeg_data = encoded_data.rstrip(b'\0')
        nparr = np.frombuffer(jpeg_data, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        if img is not None:
            # Convert BGR to RGB
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        return img
    except Exception as e:
        logger.warning(f"Failed to decode RGB data: {e}")
        return None


def load_hdf5_comprehensive(dataset_path):
    """Load all data from HDF5 file comprehensively"""
    if not os.path.isfile(dataset_path):
        print(f"Dataset does not exist at \n{dataset_path}\n")
        exit()

    data = {}
    
    with h5py.File(dataset_path, "r") as root:
        # Load joint actions
        if 'joint_action' in root:
            joint_action = root['joint_action']
            data['left_gripper'] = joint_action['left_gripper'][()]
            data['left_arm'] = joint_action['left_arm'][()]
            data['right_gripper'] = joint_action['right_gripper'][()]
            data['right_arm'] = joint_action['right_arm'][()]
            data['vector'] = joint_action['vector'][()]
        
        # Load end pose if exists
        if 'endpose' in root:
            data['endpose'] = root['endpose'][()]
        
        # Load point cloud
        if 'pointcloud' in root:
            data['pointcloud'] = root['pointcloud'][()]
        
        # Load observer RGB if exists
        if 'observer_rgb' in root:
            data['observer_rgb'] = root['observer_rgb'][()]
        
        # Load all camera data
        if 'observation' in root:
            observation = root['observation']
            data['cameras'] = {}
            
            for camera_name in observation.keys():
                camera_group = observation[camera_name]
                data['cameras'][camera_name] = {}
                
                # Load all camera datasets including segmentation
                for dataset_name in camera_group.keys():
                    dataset = camera_group[dataset_name]
                    data['cameras'][camera_name][dataset_name] = dataset[()]

    return data


def find_minimum_pointcloud_size(data_list):
    """Find minimum point cloud size across all episodes and timesteps"""
    min_size = float('inf')
    
    for data in data_list:
        if 'pointcloud' in data:
            pointcloud = data['pointcloud']
            for i in range(len(pointcloud)):
                if pointcloud[i].shape[0] < min_size:
                    min_size = pointcloud[i].shape[0]
    
    return min_size


def main():
    parser = argparse.ArgumentParser(description="Process some episodes comprehensively.")
    parser.add_argument(
        "task_name",
        type=str,
        help="The name of the task (e.g., shoe_place)",
    )
    parser.add_argument("task_config", type=str, default="franka-panda+franka-panda-m0_b0_l0_h0_c0_D435")
    parser.add_argument("num", type=int, default=1)
 
    
    args = parser.parse_args()

    task_name = args.task_name
    num = args.num
    task_config = args.task_config

    load_dir = "/cpfs04/user/gyh/rtdata-new/" + str(task_name) + "/" + str(task_config)+"/"+"data"

    total_count = 0
    save_dir = f"./data/{task_name}-{task_config}-{num}.zarr"
    print(f"Load directory: {load_dir}")
    print(f"Current working directory: {os.getcwd()}")
    print(f"Save directory: {save_dir}")
    
    if os.path.exists(save_dir):
        shutil.rmtree(save_dir)

    current_ep = 0

    # First pass: collect all data and find minimum point cloud dimension
    all_episode_data = []
    episode_ends_arrays = []
    
    print("First pass: collecting all data and finding minimum point cloud dimension...")
    
    while current_ep < num:
        print(f"Processing episode: {current_ep + 1} / {num}", end="\r")

        load_path = os.path.join(load_dir, f"episode{current_ep}.hdf5")
        
        # Check if the episode file exists
        if not os.path.exists(load_path):
            print(f"\n=>Skipping episode {current_ep}: file does not exist")
            current_ep += 1
            continue
            
        episode_data = load_hdf5_comprehensive(load_path)
        
        # Skip episodes with empty data
        if len(episode_data.get('vector', [])) == 0:
            print(f"\n=>Skipping episode {current_ep}: empty data")
            current_ep += 1
            continue

        all_episode_data.append(episode_data)
        current_ep += 1
        
        # Calculate episode end
        episode_length = len(episode_data['vector']) - 1  # Subtract 1 for action calculation
        total_count += episode_length
        episode_ends_arrays.append(total_count)

    print()
    
    # Check if we have any valid data
    if len(all_episode_data) == 0:
        print("No valid episodes found! All episodes are empty.")
        return
    
    # Find minimum point cloud dimension
    min_points = find_minimum_pointcloud_size(all_episode_data)
    print(f"Found minimum point cloud size: {min_points} points")
    
    # Second pass: process and organize all data
    print("Second pass: processing and organizing all data...")
    
    # Initialize data containers
    all_states = []
    all_actions = []
    all_endposes = []
    all_pointclouds = []
    all_observer_rgb = []
    
    # Camera data containers
    camera_names = list(all_episode_data[0]['cameras'].keys())
    camera_data = {camera_name: {} for camera_name in camera_names}
    
    # Initialize camera data containers
    for camera_name in camera_names:
        camera_data[camera_name] = {
            'rgb': [],
            'depth': [],
            'intrinsic_cv': [],
            'extrinsic_cv': [],
            'cam2world_gl': [],
            'segmentation': []
        }
    
    for ep_idx, episode_data in enumerate(all_episode_data):
        print(f"Processing episode data {ep_idx + 1} / {len(all_episode_data)}", end="\r")
        
        # Get episode data
        vector_all = episode_data['vector']
        pointcloud_all = episode_data['pointcloud']
        endpose_all = episode_data.get('endpose', None)
        observer_rgb_all = episode_data.get('observer_rgb', None)
        
        # Process timesteps (exclude last timestep for actions)
        for j in range(len(vector_all) - 1):
            # States (current timestep)
            all_states.append(vector_all[j])
            
            # Actions (next timestep)
            all_actions.append(vector_all[j + 1])
            
            # End poses
            if endpose_all is not None:
                all_endposes.append(endpose_all[j])
            
            # Point clouds (trim to minimum size)
            pointcloud = pointcloud_all[j]
            if pointcloud.shape[0] > min_points:
                # Randomly sample min_points from the point cloud
                indices = np.random.choice(pointcloud.shape[0], min_points, replace=False)
                trimmed_pc = pointcloud[indices]
            else:
                trimmed_pc = pointcloud
            all_pointclouds.append(trimmed_pc)
            
            # Observer RGB
            if observer_rgb_all is not None:
                # Decode observer RGB data if it's encoded
                encoded_observer_rgb = observer_rgb_all[j]
                decoded_observer_rgb = decode_rgb_data(encoded_observer_rgb)
                if decoded_observer_rgb is not None:
                    all_observer_rgb.append(decoded_observer_rgb)
                else:
                    # If decoding fails, try to use raw data
                    all_observer_rgb.append(encoded_observer_rgb)
            
            # Camera data
            for camera_name in camera_names:
                camera_info = episode_data['cameras'][camera_name]
                
                if 'rgb' in camera_info:
                    # Decode RGB data if it's encoded
                    encoded_rgb = camera_info['rgb'][j]
                    decoded_rgb = decode_rgb_data(encoded_rgb)
                    if decoded_rgb is not None:
                        camera_data[camera_name]['rgb'].append(decoded_rgb)
                    else:
                        # If decoding fails, try to use raw data
                        camera_data[camera_name]['rgb'].append(encoded_rgb)
                if 'depth' in camera_info:
                    camera_data[camera_name]['depth'].append(camera_info['depth'][j])
                if 'intrinsic_cv' in camera_info:
                    camera_data[camera_name]['intrinsic_cv'].append(camera_info['intrinsic_cv'][j])
                if 'extrinsic_cv' in camera_info:
                    camera_data[camera_name]['extrinsic_cv'].append(camera_info['extrinsic_cv'][j])
                if 'cam2world_gl' in camera_info:
                    camera_data[camera_name]['cam2world_gl'].append(camera_info['cam2world_gl'][j])
                # Handle both actor_segmentation and mesh_segmentation
                if 'actor_segmentation' in camera_info:
                    camera_data[camera_name]['segmentation'].append(camera_info['actor_segmentation'][j])
                elif 'mesh_segmentation' in camera_info:
                    camera_data[camera_name]['segmentation'].append(camera_info['mesh_segmentation'][j])

    print()
    
    # Convert to numpy arrays
    episode_ends_arrays = np.array(episode_ends_arrays)
    state_arrays = np.array(all_states)
    action_arrays = np.array(all_actions)
    pointcloud_arrays = np.array(all_pointclouds)
    
    if all_endposes:
        endpose_arrays = np.array(all_endposes)
    
    if all_observer_rgb:
        observer_rgb_arrays = np.array(all_observer_rgb)
    
    # Convert camera data to arrays
    for camera_name in camera_names:
        for data_type in camera_data[camera_name]:
            if camera_data[camera_name][data_type]:
                camera_data[camera_name][data_type] = np.array(camera_data[camera_name][data_type])

    print(f"Final shapes:")
    print(f"  States: {state_arrays.shape}")
    print(f"  Actions: {action_arrays.shape}")
    print(f"  Point clouds: {pointcloud_arrays.shape}")
    if all_endposes:
        print(f"  End poses: {endpose_arrays.shape}")
    if all_observer_rgb:
        print(f"  Observer RGB: {observer_rgb_arrays.shape}")
    
    # Print camera data shapes
    for camera_name in camera_names:
        print(f"  Camera {camera_name}:")
        for data_type, data_array in camera_data[camera_name].items():
            if len(data_array) > 0:
                print(f"    {data_type}: {data_array.shape}")

    # Create ZARR file
    zarr_root = zarr.group(save_dir)
    zarr_data = zarr_root.create_group("data")
    zarr_meta = zarr_root.create_group("meta")
    zarr_cameras = zarr_root.create_group("cameras")
    zarr_pcd = zarr_root.create_group("pcd")  # Create pcd group for processed pointcloud data

    compressor = zarr.Blosc(cname="zstd", clevel=3, shuffle=1)
    
    # Save core data
    state_chunk_size = (100, state_arrays.shape[1])
    action_chunk_size = (100, action_arrays.shape[1])
    pointcloud_chunk_size = (100, pointcloud_arrays.shape[1], pointcloud_arrays.shape[2])
    
    print("Creating datasets...")
    
    zarr_data.create_dataset(
        "state",
        data=state_arrays,
        chunks=state_chunk_size,
        dtype="float32",
        overwrite=True,
        compressor=compressor,
    )
    
    zarr_data.create_dataset(
        "action",
        data=action_arrays,
        chunks=action_chunk_size,
        dtype="float32",
        overwrite=True,
        compressor=compressor,
    )
    
    # Save pointcloud data to pcd/pointcloud instead of data/point_cloud
    zarr_pcd.create_dataset(
        "pointcloud",
        data=pointcloud_arrays,
        chunks=pointcloud_chunk_size,
        overwrite=True,
        compressor=compressor,
    )
    
    # Save end poses if available
    if all_endposes:
        endpose_chunk_size = (100, endpose_arrays.shape[1])
        zarr_data.create_dataset(
            "endpose",
            data=endpose_arrays,
            chunks=endpose_chunk_size,
            dtype="float32",
            overwrite=True,
            compressor=compressor,
        )
    
    # Save observer RGB if available
    if all_observer_rgb:
        observer_rgb_chunk_size = (100, observer_rgb_arrays.shape[1], observer_rgb_arrays.shape[2], observer_rgb_arrays.shape[3])
        zarr_data.create_dataset(
            "observer_rgb",
            data=observer_rgb_arrays,
            chunks=observer_rgb_chunk_size,
            dtype="uint8",
            overwrite=True,
            compressor=compressor,
        )
    
    # Save camera data
    for camera_name in camera_names:
        camera_group = zarr_cameras.create_group(camera_name)
        
        for data_type, data_array in camera_data[camera_name].items():
            if len(data_array) > 0:
                if data_type == 'rgb':
                    # Handle raw RGB images with chunking
                    rgb_chunk_size = (100, data_array.shape[1], data_array.shape[2], data_array.shape[3])
                    camera_group.create_dataset(
                        data_type,
                        data=data_array,
                        chunks=rgb_chunk_size,
                        dtype="uint8",
                        overwrite=True,
                        compressor=compressor,
                    )
                elif data_type == 'depth':
                    # Chunk depth images
                    depth_chunk_size = (100, data_array.shape[1], data_array.shape[2])
                    camera_group.create_dataset(
                        data_type,
                        data=data_array,
                        chunks=depth_chunk_size,
                        dtype="float32",
                        overwrite=True,
                        compressor=compressor,
                    )
                elif data_type == 'segmentation':
                    # Chunk segmentation images (similar to depth but uint8)
                    seg_chunk_size = (100, data_array.shape[1], data_array.shape[2], data_array.shape[3])
                    camera_group.create_dataset(
                        data_type,
                        data=data_array,
                        chunks=seg_chunk_size,
                        dtype="uint8",
                        overwrite=True,
                        compressor=compressor,
                    )
                else:
                    # Other camera parameters
                    param_chunk_size = (100,) + data_array.shape[1:]
                    camera_group.create_dataset(
                        data_type,
                        data=data_array,
                        chunks=param_chunk_size,
                        dtype="float32",
                        overwrite=True,
                        compressor=compressor,
                    )
    
    # Save episode metadata
    zarr_meta.create_dataset(
        "episode_ends",
        data=episode_ends_arrays,
        dtype="int64",
        overwrite=True,
        compressor=compressor,
    )
    
    # Save dataset info
    info = {
        'num_episodes': len(all_episode_data),
        'num_timesteps': len(state_arrays),
        'min_pointcloud_size': min_points,
        'camera_names': camera_names,
        'has_endpose': len(all_endposes) > 0,
        'has_observer_rgb': len(all_observer_rgb) > 0,
    }
    
    zarr_meta.attrs.update(info)
    
    # Create softlink from data/point_cloud to pcd/pointcloud for training compatibility
    print("Creating softlink for training compatibility...")
    
    # Get the zarr directory path
    zarr_path = Path(save_dir)
    data_point_cloud_path = zarr_path / "data" / "point_cloud"
    
    # Create relative softlink to pcd/pointcloud
    relative_target = "../pcd/pointcloud"
    
    try:
        os.symlink(relative_target, data_point_cloud_path)
        print(f"✓ Created softlink: data/point_cloud -> {relative_target}")
    except Exception as e:
        print(f"✗ Failed to create softlink: {e}")
    
    print("Dataset creation completed!")
    print(f"Saved to: {os.getcwd()}/{save_dir}")
    print(f"Total episodes: {info['num_episodes']}")
    print(f"Total timesteps: {info['num_timesteps']}")
    print(f"Camera data included: {info['camera_names']}")
    print(f"Pointcloud data saved to: pcd/pointcloud")
    print(f"Softlink created: data/point_cloud -> pcd/pointcloud")


if __name__ == "__main__":
    main() 