"""
Regenerates a LIBERO dataset (HDF5 files) by replaying demonstrations in the environments.

Notes:
    - We save image observations at 256x256px resolution (instead of 128x128).
    - We filter out transitions with "no-op" (zero) actions that do not change the robot's state.
    - We filter out unsuccessful demonstrations.
    - In the LIBERO HDF5 data -> RLDS data conversion (not shown here), we rotate the images by
    180 degrees because we observe that the environments return images that are upside down
    on our platform.

Usage:
    python experiments/robot/libero/regenerate_libero_dataset.py \
        --libero_task_suite [ libero_spatial | libero_object | libero_goal | libero_10 ] \
        --libero_raw_data_dir <PATH TO RAW HDF5 DATASET DIR> \
        --libero_target_dir <PATH TO TARGET DIR>

    Example (LIBERO-Spatial):
        python experiments/robot/libero/regenerate_libero_dataset.py \
            --libero_task_suite libero_spatial \
            --libero_raw_data_dir ./LIBERO/libero/datasets/libero_spatial \
            --libero_target_dir ./LIBERO/libero/datasets/libero_spatial_no_noops

"""

import argparse
import json
import os
import re

import h5py
import copy
import numpy as np
import robosuite.utils.transform_utils as T
import tqdm
from libero.libero import benchmark
from libero.libero import get_libero_path
from libero.libero.envs import OffScreenRenderEnv
from lerobot.datasets.libero_noops.libero.libero_utils import (
    get_libero_dummy_action,
    get_libero_env,
    quat2axisangle
)

IMAGE_RESOLUTION = 512
# IMAGE_RESOLUTION = 256


def is_noop(action, prev_action=None, threshold=1e-4):
    """
    Returns whether an action is a no-op action.

    A no-op action satisfies two criteria:
        (1) All action dimensions, except for the last one (gripper action), are near zero.
        (2) The gripper action is equal to the previous timestep's gripper action.

    Explanation of (2):
        Naively filtering out actions with just criterion (1) is not good because you will
        remove actions where the robot is staying still but opening/closing its gripper.
        So you also need to consider the current state (by checking the previous timestep's
        gripper action as a proxy) to determine whether the action really is a no-op.
    """
    # Special case: Previous action is None if this is the first action in the episode
    # Then we only care about criterion (1)
    if prev_action is None:
        return np.linalg.norm(action[:-1]) < threshold

    # Normal case: Check both criteria (1) and (2)
    gripper_action = action[-1]
    prev_gripper_action = prev_action[-1]
    return np.linalg.norm(action[:-1]) < threshold and gripper_action == prev_gripper_action

def get_cls_name(names):
    out_names = []
    ignore_key = [r'\d', 'main', 'vis']
    for x in names:
        x = re.sub('|'.join(ignore_key), "_", x)
        x = x.split('_')
        x = ' '.join(x)
        out_names.append(x)
    return out_names    

def selected_list(body_names_all, selected_names):
    body_names, body_ids = [], []
    for i, name in enumerate(body_names_all):
        to_select = False
        for s_name in selected_names:
            if s_name in name:
                to_select = True
                break
        body_ids.append(to_select)
        if to_select: body_names.append(name)
    body_names = get_cls_name(body_names)
    return body_names, body_ids

def main(args):
    print(f"Regenerating {args.libero_task_suite} dataset!")

    # Create target directory
    if os.path.isdir(args.libero_target_dir):
        user_input = input(f"Target directory already exists at path: {args.libero_target_dir}\nEnter 'y' to overwrite the directory, or anything else to exit: ")
        if user_input != 'y':
            exit()
    os.makedirs(args.libero_target_dir, exist_ok=True)

    # Prepare JSON file to record success/false and initial states per episode
    metainfo_json_dict = {}
    metainfo_json_out_path = os.path.join(args.libero_target_dir, "..", f"{args.libero_task_suite}_metainfo.json")
    with open(metainfo_json_out_path, "w") as f:
        # Just test that we can write to this file (we overwrite it later)
        json.dump(metainfo_json_dict, f)

    # Get task suite
    benchmark_dict = benchmark.get_benchmark_dict()
    task_suite = benchmark_dict[args.libero_task_suite]()
    num_tasks_in_suite = task_suite.n_tasks

    # Setup
    num_replays = 0
    num_success = 0
    num_noops = 0

    for task_id in tqdm.tqdm(range(num_tasks_in_suite)):
        # Get task in suite
        task = task_suite.get_task(task_id)
        env, task_description = get_libero_env(task, "llava", resolution=IMAGE_RESOLUTION)

        # Get dataset for task
        orig_data_path = os.path.join(args.libero_raw_data_dir, f"{task.name}_demo.hdf5")
        assert os.path.exists(orig_data_path), f"Cannot find raw data file {orig_data_path}."
        orig_data_file = h5py.File(orig_data_path, "r")
        orig_data = orig_data_file["data"]
        print(f"Begin demos of task: {task.name}")

        # Create new HDF5 file for regenerated demos
        new_data_path = os.path.join(args.libero_target_dir, f"{task.name}_demo.hdf5")
        new_data_file = h5py.File(new_data_path, "w")
        grp = new_data_file.create_group("data")

        for i in range(len(orig_data.keys())):
            # Get demo data
            demo_data = orig_data[f"demo_{i}"]
            orig_actions = demo_data["actions"][()]
            orig_states = demo_data["states"][()]

            # Reset environment, set initial state, and wait a few steps for environment to settle
            env.reset()
            env.set_init_state(orig_states[0])
            for _ in range(10):
                obs, reward, done, info = env.step(get_libero_dummy_action("llava"))

            # Set up new data lists
            states = []
            actions = []
            ee_states = []
            ee_quat_states = []
            obj_states = []
            obj_quat_states = []
            gripper_states = []
            gripper_distance = []
            joint_states = []
            agentview_images = []
            eye_in_hand_images = []

            # Set up state lists
            fixtures_keys = env.env.fixtures_dict.keys()
            objects_keys = env.env.objects_dict.keys()
            selected_names = list(fixtures_keys) + list(objects_keys)
            geom_size_id=[]
            for sn in selected_names:
                if sn+'_g0' in env.env.sim.model.geom_names:
                    geom_size_id.append(env.env.sim.model.geom_name2id(sn+'_g0'))
                else:
                    for kkk in env.env.sim.model.geom_names: # usually is  sn+'_base_vis' 
                        if kkk is None: continue
                        if sn in kkk:
                            geom_size_id.append(env.env.sim.model.geom_name2id(kkk))
                            break
            geom_size = env.env.sim.model.geom_size[geom_size_id]

            # Replay original demo actions in environment and record observations
            for _, action in enumerate(orig_actions):
                # Skip transitions with no-op actions
                prev_action = actions[-1] if len(actions) > 0 else None
                if is_noop(action, prev_action):
                    print(f"\tSkipping no-op action: {action}")
                    num_noops += 1
                    continue

                # if states == []:
                #     # In the first timestep, since we're using the original initial state to initialize the environment,
                #     # copy the initial state (first state in episode) over from the original HDF5 to the new one
                #     states.append(orig_states[0])
                #     # robot_states.append(demo_data["robot_states"][0])
                # else:
                #     # For all other timesteps, get state from environment and record it
                #     states.append(env.sim.get_state().flatten())
                # Prepare observations dict
                state = np.concatenate(
                    (
                        obs["robot0_eef_pos"],
                        quat2axisangle(obs["robot0_eef_quat"]),
                        obs["robot0_gripper_qpos"],
                    )
                )
                states.append(state)

                # Record original action (from demo)
                actions.append(action)
                ee_quat_states.append(np.concatenate([obs["robot0_eef_pos"], obs["robot0_eef_quat"]]))
                ee_states.append(np.hstack((obs["robot0_eef_pos"], T.quat2axisangle(obs["robot0_eef_quat"]),)))

                '''
                注意：
                1. selected_names和obs['object-state']的关键字高度重合。前者在后者中会以'basket_1_pos', 'basket_1_quat', 'basket_1_to_robot0_eef_pos', 'basket_1_to_robot0_eef_quat'的形式对应后者。有3+4+3+4个数字，最后这些数据全部汇总在'object-state'中，对比'robot0_eef_pos'，第三组数据偏移量也基本是正确的。
                2. 关于物体大小等数据应该在geom_size中获取，对比了xml文件与其关键字geom是完全一致的。geom_names的话也是和selected_names是对应起来的。命名方式是*—g[0-n]这种形式，g0代表了整体的外形。geom_names或geom_name2id取索引，geom_size中拿尺寸。这个尺寸好像是半尺寸https://zhuanlan.zhihu.com/p/640110929
                '''
                # Record data returned by environment
                if "robot0_gripper_qpos" in obs:
                    gripper_states.append(obs["robot0_gripper_qpos"])
                # https://github.com/ARISE-Initiative/robosuite/issues/406
                finger1_col = env.sim.data.geom_xpos[env.sim.model.geom_name2id("gripper0_finger1_collision")]
                finger2_col = env.sim.data.geom_xpos[env.sim.model.geom_name2id("gripper0_finger2_collision")]
                distance = np.sum((finger1_col - finger2_col) ** 2, axis=0)
                gripper_distance.append(distance)

                joint_states.append(obs["robot0_joint_pos"])
                obj_state, obj_state_quat = [], []
                for sn in selected_names:
                    geom_state = env.env.object_states_dict[sn].get_geom_state()
                    obj_pos = geom_state['pos'] # obs[sn+'_pos'] only work for non-fixture
                    obj_quat = np.roll(geom_state['quat'], -1) # obs[sn+'_quat'] -- so does quat
                    obj_state_quat.append(np.hstack([obj_pos, obj_quat]))
                    obj_state.append(np.hstack([obj_pos, T.quat2axisangle(obj_quat)]))
                obj_states.append(obj_state)
                obj_quat_states.append(obj_state_quat)
                
                agentview_image = obs["agentview_image"]
                eye_in_hand_image = obs["robot0_eye_in_hand_image"]
                agentview_images.append(copy.deepcopy(agentview_image[::-1,::-1]))
                eye_in_hand_images.append(copy.deepcopy(eye_in_hand_image[::-1,::-1]))

                # Execute demo action in environment
                obs, reward, done, info = env.step(action.tolist())# 跳进去看看obs的获取是怎么来的，特别关注有没有一些数据获取过程是可以对应起来的。bddl定义了各个目标的位置

            # At end of episode, save replayed trajectories to new HDF5 files (only keep successes)
            if done:
                dones = np.zeros(len(actions)).astype(np.uint8)
                dones[-1] = 1
                rewards = np.zeros(len(actions)).astype(np.uint8)
                rewards[-1] = 1
                assert len(actions) == len(agentview_images)

                ep_data_grp = grp.create_group(f"demo_{i}")
                ep_data_grp.create_dataset("actions", data=actions)
                ep_data_grp.create_dataset("states", data=np.stack(states))
                # ep_data_grp.create_dataset("robot_states", data=np.stack(robot_states, axis=0))
                ep_data_grp.create_dataset("rewards", data=rewards)
                ep_data_grp.create_dataset("dones", data=dones)
                ###########################################################################################
                obs_grp = ep_data_grp.create_group("obs")
                obs_grp.create_dataset("gripper_states", data=np.stack(gripper_states, axis=0)) # L*2 并不完全对称，几乎没有变化，应该和夹具的力度有关，action[-1]是闭合
                obs_grp.create_dataset("joint_states", data=np.stack(joint_states, axis=0))     #当前位置
                obs_grp.create_dataset("ee_quat_states", data=np.stack(ee_quat_states, axis=0)) #带四元组
                obs_grp.create_dataset("ee_states", data=np.stack(ee_states, axis=0))           #不带四元组
                obs_grp.create_dataset("ee_pos", data=np.stack(ee_states, axis=0)[:, :3])
                obs_grp.create_dataset("ee_ori", data=np.stack(ee_states, axis=0)[:, 3:])
                obs_grp.create_dataset("agentview_rgb", data=np.stack(agentview_images, axis=0))
                obs_grp.create_dataset("eye_in_hand_rgb", data=np.stack(eye_in_hand_images, axis=0))
                # cls,pos,quat, scale
                obs_grp.create_dataset("cls_names", data=selected_names)                        #n_obj，和长度无关
                obs_grp.create_dataset("obj_states", data=np.stack(obj_states, axis=0))         #不带四元组，位置+角度
                obs_grp.create_dataset("obj_quat_states", data=np.stack(obj_quat_states, axis=0))#带四元组，位置+quat
                obs_grp.create_dataset("geom_size", data=geom_size)                             #n_obj，和长度无关，物体的尺寸
                ###########################################################################################
                # body_pos_all = np.stack(body_pos_all)
                # body_quat_all = np.stack(body_quat_all)
                # site_pos_all = np.stack(site_pos_all)
                # site_quat_all = np.stack(site_quat_all)
                # state_grp = ep_data_grp.create_group("state_obj")
                # state_grp.create_dataset("body_pos", data=body_pos_all[:,body_ids,:])
                # state_grp.create_dataset("body_quat", data=body_quat_all[:,body_ids,:])
                # state_grp.create_dataset("body_names", data=body_names_star)
                # state_grp.create_dataset("site_pos", data=site_pos_all)
                # state_grp.create_dataset("site_quat", data=site_quat_all)
                # state_grp.create_dataset("site_names", data=site_names_all)
                # # state_grp.create_dataset("fixtures", data=list(env.env.fixtures_dict.keys()))
                # # state_grp.create_dataset("objects", data=list(env.env.objects_dict.keys()))
                # ###########################################################################################
                # mesh_pos_all = np.stack(mesh_pos_all)
                # mesh_quat_all = np.stack(mesh_quat_all)
                # mesh_scale_all = np.stack(mesh_scale_all)
                # state_mesh = ep_data_grp.create_group("state_mesh")
                # state_mesh.create_dataset("mesh_pos", data=mesh_pos_all[:,mesh_ids,:])
                # state_mesh.create_dataset("mesh_quat", data=mesh_quat_all[:,mesh_ids,:])
                # state_mesh.create_dataset("mesh_scale", data=mesh_scale_all[:,mesh_ids,:])
                # state_mesh.create_dataset("mesh_names", data=mesh_names_star)
                ###########################################################################################
                # get pos quat这是错误的示范，这些信息是对模型stl的调整，不是最终呈现出的样子
                # body_pos = env.env.sim.model.body_pos
                # body_quat = env.env.sim.model.body_quat
                # site_pos = env.env.sim.model.site_pos
                # site_quat = env.env.sim.model.site_quat
                # mesh_pos = env.env.sim.model.mesh_pos
                # mesh_quat = env.env.sim.model.mesh_quat
                # mesh_scale = env.env.sim.model.mesh_scale
                # body_pos_all.append(body_pos)
                # body_quat_all.append(body_quat)
                # site_pos_all.append(site_pos)
                # site_quat_all.append(site_quat)
                # mesh_pos_all.append(mesh_pos)
                # mesh_quat_all.append(mesh_quat)
                # mesh_scale_all.append(mesh_scale)
                # 示范2
                # obj_pos = env.env.sim.model.body_pos
                # obj_quat = env.env.sim.model.body_quat
                # obj_pos_all.append(body_pos)
                # obj_quat_all.append(body_quat)

                num_success += 1

            num_replays += 1

            # Record success/false and initial environment state in metainfo dict
            task_key = task_description.replace(" ", "_")
            episode_key = f"demo_{i}"
            if task_key not in metainfo_json_dict:
                metainfo_json_dict[task_key] = {}
            if episode_key not in metainfo_json_dict[task_key]:
                metainfo_json_dict[task_key][episode_key] = {}
            metainfo_json_dict[task_key][episode_key]["success"] = bool(done)
            metainfo_json_dict[task_key][episode_key]["initial_state"] = orig_states[0].tolist()

            # Write metainfo dict to JSON file
            # (We repeatedly overwrite, rather than doing this once at the end, just in case the script crashes midway)
            with open(metainfo_json_out_path, "w") as f:
                json.dump(metainfo_json_dict, f, indent=2)

            # Count total number of successful replays so far
            print(
                f"Total # episodes replayed: {num_replays}, Total # successes: {num_success} ({num_success / num_replays * 100:.1f} %)"
            )

            # Report total number of no-op actions filtered out so far
            print(f"  Total # no-op actions filtered out: {num_noops}")

        # Close HDF5 files
        orig_data_file.close()
        new_data_file.close()
        print(f"Saved regenerated demos for task '{task_description}' at: {new_data_path}")

    print(f"Dataset regeneration complete! Saved new dataset at: {args.libero_target_dir}")
    print(f"Saved metainfo JSON at: {metainfo_json_out_path}")
    print('\n\n-----------------------------------------\n\n')


if __name__ == "__main__":
    # Parse command-line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--libero_task_suite", type=str, choices=["libero_spatial", "libero_object", "libero_goal", "libero_10", "libero_90"],
                        help="LIBERO task suite. Example: libero_spatial", required=True)
    parser.add_argument("--libero_raw_data_dir", type=str,
                        help="Path to directory containing raw HDF5 dataset. Example: ./LIBERO/libero/datasets/libero_spatial", required=True)
    parser.add_argument("--libero_target_dir", type=str,
                        help="Path to regenerated dataset directory. Example: ./LIBERO/libero/datasets/libero_spatial_no_noops", required=True)
    args = parser.parse_args()

    # Start data regeneration
    main(args)
