"""
Convert umi/dp dataset stored in .zarr files to lerobot dataset

# state是-3, 0两帧9维状态的拼接, action是相对轨迹起始点的10维pose+gripper_width
1. 更改数据集路径
2. 更改变换后的数据集名称(repo_id)
3. 更改task description
"""
import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
import shutil
import zarr
import numpy as np
import tensorflow_datasets as tfds
from termcolor import cprint
from tqdm import tqdm
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from utils.cowa_data_util import *
from utils.imagecodecs_numcodecs import register_codecs

register_codecs()

def lerobot_builder(data_dir: str):
    # Clean up any existing dataset in the output directory
    output_path = os.path.join(os.path.dirname(__file__), "../data/")
    repo_id = "cowa_umi_button_lerobot_fix1"
    root = os.path.join(output_path, repo_id)
    if os.path.exists(root):
        cprint(f"Output path {root} already exists. Deleting it.", 'red')
        shutil.rmtree(root)

    # Create LeRobot dataset, define features to store
    dataset = LeRobotDataset.create(
        repo_id=repo_id,
        root=root,
        robot_type="cowa",
        fps=30,
        features={
            "observation.image.image": {
                "dtype": 'video',
                "shape": (224, 224, 3),
                "names": ["height", "width", "channels"],
            },
            "observation.state": {
                "dtype": "float32",
                "shape": (18,),
                "names": ["state"],
            },
            "action": {
                "dtype": "float32",
                "shape": (10,),
                "names": ["action"],
            },
        },
        image_writer_threads=10,
        image_writer_processes=5,
    )

    cowa_dataset = zarr.open(data_dir, mode='r')
    episode_ends = cowa_dataset['/meta/episode_ends'][:]
    robot0_eef_pos = cowa_dataset['/data/robot0_eef_pos'][:]
    robot0_eef_rot_axis_angle = cowa_dataset['/data/robot0_eef_rot_axis_angle'][:]
    robot0_gripper_width = cowa_dataset['/data/robot0_gripper_width'][:]
    robot0_full_state = np.concat([robot0_eef_pos, robot0_eef_rot_axis_angle, robot0_gripper_width], axis=-1)
    state_chunk_size = 1 # 拼接长度为state_chunk_size的历史状态序列作为一个state
    obs_pose_repr = 'relative'

    # Loop over zarr datasets and write episodes to the LeRobot dataset
    # You can modify this for your own data format
    action_cache = []
    last_episode_end = 0
    task_description = "Press the button on the top left of the coffee machine"
    # task_description = "Put the block on the right into the box on the right, and put the block on the left into the box on the left"
    for episode in tqdm(range(len(episode_ends)), desc="Processing episodes", unit="episode"):
        episode_states = robot0_full_state[last_episode_end:episode_ends[episode]]
        for step in range(last_episode_end, episode_ends[episode]):
            episode_step = step - last_episode_end

            state_sample = sample_chunk(episode_states, episode_step, idx_relate=[-3, 0], idx_padding=0)
            action_sample = sample_chunk(episode_states, episode_step, 
                                        idx_relate=np.linspace(1, 16, 16).astype(np.int32),
                                        idx_padding=-1)
            
            pose_mat = pose_to_mat(state_sample[..., 0:6])

            obs_pose_mat = convert_pose_mat_rep(
                pose_mat, 
                base_pose_mat=pose_mat[-1],
                pose_rep=obs_pose_repr,
                backward=False)
            obs_pose = mat_to_pose10d(obs_pose_mat)
            obs = obs_pose.flatten()

            action_mat = pose_to_mat(action_sample[..., 0: 6])
            action_pose_mat = convert_pose_mat_rep(
                action_mat, 
                base_pose_mat=pose_mat[-1],
                pose_rep=obs_pose_repr,
                backward=False)
            action_pose = mat_to_pose10d(action_pose_mat)
            action_gripper = action_sample[..., 6: 7]
            # action = np.concatenate([action_pose, action_gripper], axis=-1).flatten()
            action = np.concatenate([action_pose, action_gripper], axis=-1)

            action_abs_pose = pose_to_mat(action_sample[0, 0: 6])
            action_abs10d = mat_to_pose10d(action_abs_pose)
            action_abs = np.concatenate([action_abs10d, action_gripper[0]], axis=-1)

            dataset.add_frame(
                {
                    "observation.image.image": cowa_dataset['/data/camera0_rgb'][step],
                    "observation.state": obs.astype(np.float32),
                    "action": action_abs.astype(np.float32),
                    "task": task_description,
                }
            )

            action_cache.append(action)
        dataset.save_episode()
        last_episode_end = episode_ends[episode]

    all_action = np.stack(action_cache, axis=0)
    all_action = all_action.reshape(-1, 10)
    action_statistics = {
        "mean": np.mean(all_action, axis=0).tolist(),
        "std": np.std(all_action, axis=0).tolist(),
        "min": np.min(all_action, axis=0).tolist(),
        "max": np.max(all_action, axis=0).tolist(),
        "q01": np.quantile(all_action, 0.01, axis=0).tolist(),
        "q99": np.quantile(all_action, 0.99, axis=0).tolist(),
    }

    #保存action_statistics到stats.json
    import json
    with open(os.path.join(root, 'stats_temp.json'), 'w') as f:
        json.dump(action_statistics, f, indent=4)

    print('done')


if __name__ == "__main__":
    lerobot_builder(data_dir=os.path.join(os.path.dirname(__file__), '../data/0324button.zarr'))