# 提取输入的hdf5文件中的数据集，每个数据集保存为单独的文件
import h5py
import sys
import argparse
import os
import numpy as np
import cv2

def extract_hdf5_data(file_path, output_dir=None):
    """
    Extract datasets from HDF5 file to separate files
    Args:
        file_path: Path to HDF5 file
        output_dir: Optional path to output directory
    """
    if output_dir is None:
        # 使用HDF5文件名作为输出目录名
        output_dir = os.path.splitext(os.path.basename(file_path))[0]
    
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    def save_dataset(name, obj):
        if isinstance(obj, h5py.Dataset):
            # 构建输出文件路径，将'/'替换为'_'以避免目录问题
            output_path = os.path.join(output_dir, name.replace('/', '_'))
            try:
                data = obj[()]
                
                # 检查是否为图像数据集
                if 'images' in name:
                    # 创建图像目录
                    image_dir = f"{output_path}_images"
                    os.makedirs(image_dir, exist_ok=True)
                    
                    # 获取第一帧确定视频参数
                    if 'compress' in name or data[0].dtype == np.uint8:
                        first_frame = cv2.imdecode(data[0], 1)
                    else:
                        first_frame = data[0]
                    
                    height, width = first_frame.shape[:2]
                    fps = 30
                    
                    # 创建视频写入器
                    video_path = os.path.join(image_dir, 'video.mp4')
                    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                    out = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
                    
                    # 遍历并写入每一帧
                    for frame in data:
                        if 'compress' in name or frame.dtype == np.uint8:
                            img = cv2.imdecode(frame, 1)
                        else:
                            img = frame
                        out.write(img)
                        
                    out.release()
                    print(f"Saved video '{name}' to {video_path}")
                
                elif isinstance(data, np.ndarray):
                    np.savetxt(f"{output_path}.txt", data, fmt='%s')
                else:
                    with open(f"{output_path}.txt", 'w', encoding='utf-8') as f:
                        f.write(str(data))
                    print(f"Saved dataset '{name}' to {output_path}.txt")
                    
            except Exception as e:
                print(f"Error saving dataset '{name}': {e}")

    try:
        with h5py.File(file_path, 'r') as f:
            print(f"\nExtracting datasets from {file_path} to {output_dir}/")
            f.visititems(save_dataset)
            
    except Exception as e:
        print(f"Error reading HDF5 file: {e}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Extract HDF5 datasets to files')
    parser.add_argument('file_path', help='Path to HDF5 file')
    parser.add_argument('-o', '--output', help='Output directory path', default=None)
    
    args = parser.parse_args()
    extract_hdf5_data(args.file_path, args.output)

# python extract_hdf5.py /gemini/data-1/ACT/yogurt_new/episode_70.hdf5 -o ../data_analysis/yogurt_new_episode_70.hdf5
