#!/usr/bin/env python

import os
import sys
import logging
from pprint import pformat

import cv2
import h5py
import numpy as np
import torch

from lerobot.common.datasets.factory import make_dataset
from lerobot.common.utils.utils import init_logging
from lerobot.configs import parser
from lerobot.configs.train import TrainPipelineConfig


@parser.wrap()
def convert_dataset(cfg: TrainPipelineConfig):
    cfg.validate()
    logging.info(pformat(cfg.to_dict()))

    dataset = make_dataset(cfg)
    logging.info(f"Loaded dataset with {len(dataset)} items")
    logging.info(f"Loaded dataset with {dataset.__len__()} frames")

    # --- 自动获取相机名称 ---
    # 从第一个数据样本中动态识别所有相机
    try:
        first_item = dataset[0]
        print(f"keys is: {first_item.keys()}")
        camera_names = [
            key.split(".")[-1]
            for key in first_item.keys()
            if key.startswith("observation.images.")
        ]
        logging.info(f"✅ Automatically detected cameras: {camera_names}")
    except Exception as e:
        logging.error(f"❌ Failed to get camera names from the dataset: {e}")
        return  # 如果无法获取相机名称，则无法继续

    episode_from = dataset.episode_data_index["from"]
    episode_to = dataset.episode_data_index["to"]

    save_dir = str(cfg.output_dir)
    save_dir = save_dir if save_dir.endswith("/") else save_dir + "/"
    os.makedirs(save_dir, exist_ok=True)

    for idx, (start, end) in enumerate(zip(episode_from, episode_to)):

        start, end = start.item(), end.item()

        # --- 使用字典动态存储数据 ---
        action_qpos_list, status_qpos_list = [], []
        # 使用字典来处理任意数量的相机
        image_data = {name: [] for name in camera_names}

        while start < end:
            try:
                item = dataset[start]
            except Exception as e:
                logging.warning(f"Error loading dataset[{start}] in episode {idx}: {e}")
                start += 1
                continue

            action_qpos = item["action"][0].numpy()
            # action_qpos = np.concatenate([action_qpos, [0]])  # dim-6 -> dim-7

            status_qpos = item["observation.state"].numpy()
            # status_qpos = np.concatenate([status_qpos, [0]])  # dim-6 -> dim-7

            action_qpos_list.append(action_qpos)
            status_qpos_list.append(status_qpos)

            # --- 动态处理每个相机的图像 ---
            for cam_name in camera_names:
                image_key = f"observation.images.{cam_name}"
                if image_key in item:
                    img_tensor = item[image_key]
                    img_np = (img_tensor * 255).byte().permute(1, 2, 0).numpy()
                    image_data[cam_name].append(img_np)

            start += 1

        # --- 检查所有相机数据是否都为空 ---
        if not any(image_data.values()):
            logging.warning(
                f"Episode {idx} has no valid images for any camera, skipping."
            )
            continue

        action_qpos_arr = np.array(action_qpos_list, dtype=np.float32)
        status_qpos_arr = np.array(status_qpos_list, dtype=np.float32)

        # --- 使用字典动态堆叠图像数组 ---
        image_arrays = {}
        for cam_name, frames in image_data.items():
            if frames:  # 确保列表不为空
                image_arrays[cam_name] = np.stack(frames)

        # Save HDF5
        hdf5_path = os.path.join(save_dir, f"episode_{idx}.hdf5")
        logging.info(f"Writing {hdf5_path}")

        with h5py.File(hdf5_path, "w") as f:
            f.create_dataset(
                "action", data=action_qpos_arr, compression="gzip", compression_opts=4
            )
            f.create_dataset(
                "observations/qpos",
                data=status_qpos_arr,
                compression="gzip",
                compression_opts=4,
            )

            img_grp = f.create_group("observations/images")

            # --- 动态写入每个相机的 HDF5 数据集 ---
            for cam_name, cam_arr in image_arrays.items():
                img_grp.create_dataset(
                    cam_name,
                    data=cam_arr,
                    dtype=np.uint8,
                    compression="gzip",
                    compression_opts=4,
                    chunks=(1, cam_arr.shape[1], cam_arr.shape[2], cam_arr.shape[3]),
                )

    logging.info("✅ All episodes converted successfully.")


if __name__ == "__main__":
    init_logging()
    convert_dataset()
