import os
from os.path import join, dirname, abspath
import numpy as np
import pickle
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
import torch
import trimesh
import time
import argparse
from tqdm import tqdm
from legged_gym.scripts.diff_quat import vec6d_to_quat, quat_to_matrix, vec6d_to_matrix, flip_quat_by_w, broadcast_quat_apply, broadcast_quat_multiply
import open3d as o3d


ENERGY_JAYLON_THRESHOLDS = [2.5e5, 5e5, 1e6, 2e6, 4e6, 8e6]


def init_common_eval_results():
    eval_results = {
        "episode_length": [],  # second
        "torque": [],  # torque's unit
        "success": [],  # True / False
        "success_average_on_energies": [],  # float32
    }
    for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
        eval_results["success_energy_{}".format(str(i))] = []  # True / False
    return eval_results


def get_target_sitting_height(p, radius=0.05):
    """
    p: a posed chair/sofa/bed dense point cloud, dtype = np.float32, shape = (N, 3)
    """
    x_min, y_min = list(p.min(axis=0)[:2])
    x_max, y_max = list(p.max(axis=0)[:2])
    cx, cy = (x_min + x_max) * 0.5, (y_min + y_max) * 0.5
    valid = (p[:, 0] > cx - radius) & (p[:, 0] < cx + radius) & (p[:, 1] > cy - radius) & (p[:, 1] < cy + radius)
    p_around_center = p[valid]
    target_sitting_height = p_around_center[:, 2].max()
    return target_sitting_height


def point_distances(A, B):
    """
    A: np.float32, shape = (N, 2/3), unit: m
    B: np.float32, shape = (M, 2/3), unit: m
    return np.float32, shape = (N, M), unit: m
    """
    N, M = A.shape[0], B.shape[0]
    aa = (A**2).sum(axis=1).reshape(N, 1).repeat(M, axis=1)
    bb = (B**2).sum(axis=1).reshape(1, M).repeat(N, axis=0)
    ab = A @ B.T
    D = (aa + bb - 2 * ab).clip(0, None)**0.5
    return D


def compute_regularizers(seq_results):
    # torque and dof_vel
    torques = seq_results["tracked_torques"]
    dof_vels = seq_results["tracked_dof_vels"]
    mean_torque = np.abs(torques).mean(axis=1)
    mean_dof_vel = np.abs(dof_vels).mean(axis=1)
    mean_energy_jaylon = np.square(torques * dof_vels).mean(axis=1)
    # print("[mean torque]", mean_torque.mean())
    # print("[mean dof_vel]", mean_dof_vel.mean())
    # print("[mean energy_jaylon]", mean_energy_jaylon.mean())
    return mean_torque, mean_torque, mean_energy_jaylon


def compute_cumu_max(energy):
    cumu_max_energy = np.zeros(energy.shape)
    max_energy = 0
    for i in range(energy.shape[0]):
        max_energy = max(max_energy, energy[i])
        cumu_max_energy[i] = max_energy
    return cumu_max_energy


def judge_success_with_keeping_time_threshold(valid, keeping_time_threshold=1.0, fps=50):
    N_frame = valid.shape[0]
    if valid.all():
        last_distance_invalid_frame_idx = 0
    else:
        last_distance_invalid_frame_idx = np.where(valid == 0)[0].max()
    keeping_time = (N_frame - 1 - last_distance_invalid_frame_idx) / fps
    keeping_time_valid = keeping_time > keeping_time_threshold
    return keeping_time_valid


def evaluation_sit_chair(results, fps=50, H1_scene_distance_threshold=0.27, keeping_time_threshold=0.3):
    eval_results = init_common_eval_results()
    eval_results["min_H1_scene_z_distance"] = []  # m

    cnt = 0
    for seq_results in tqdm(results):
        N_frame = seq_results["tracked_next_states"].shape[0]
        if N_frame < 10:
            print("error", N_frame)
            continue
        episode_length = N_frame / fps

        regularizers = compute_regularizers(seq_results)
        cumu_max_energy_jaylon = compute_cumu_max(regularizers[2])
        
        scene_mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(seq_results["scene_mesh"]["vertices"]), triangles=o3d.utility.Vector3iVector(seq_results["scene_mesh"]["faces"]))
        scene_pcd = scene_mesh.sample_points_uniformly(number_of_points=10000)
        scene_pts = np.float32(scene_pcd.points)  # (10000, 3)
        try:
            target_sitting_height = get_target_sitting_height(scene_pts)
        except:
            target_sitting_height = get_target_sitting_height(scene_pts, radius=0.10)
        
        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            H1_root_positions = seq_results["tracked_next_root_states"][:, :3]  # (N_frame, 3)
        else:  # for global information tracking
            H1_root_positions = seq_results["tracked_next_states"][:, 338:341]  # (N_frame, 3)
        H1_scene_distance = point_distances(H1_root_positions, scene_pts).min(axis=1)  # (N_frame,)
        min_H1_scene_distance = H1_scene_distance.min()
        H1_scene_z_distance = H1_root_positions[:, 2] - target_sitting_height
        H1_scene_xy_distance = point_distances(H1_root_positions[:, :2], scene_pts[:, :2]).min(axis=1)  # (N_frame,)
        # H1_scene_distance_valid = (H1_scene_distance < H1_scene_distance_threshold) & (H1_scene_xy_distance < 0.05)
        H1_scene_distance_valid = (0.0 < H1_scene_z_distance) & (H1_scene_z_distance < H1_scene_distance_threshold) & (H1_scene_xy_distance < 0.05)

        eval_results["episode_length"].append(episode_length)
        eval_results["min_H1_scene_z_distance"].append(H1_scene_z_distance.min())
        eval_results["torque"].append(regularizers[0].mean())
        eval_results["success"].append(judge_success_with_keeping_time_threshold(H1_scene_distance_valid, keeping_time_threshold=keeping_time_threshold, fps=fps))
        sucess_with_energy_metrics = []
        for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
            success_flag = judge_success_with_keeping_time_threshold(H1_scene_distance_valid & (cumu_max_energy_jaylon < ejt), keeping_time_threshold=keeping_time_threshold, fps=fps)
            sucess_with_energy_metrics.append(success_flag)
            eval_results["success_energy_{}".format(str(i))].append(success_flag)
        eval_results["success_average_on_energies"].append(np.mean(np.float32(sucess_with_energy_metrics)))

    return eval_results


def evaluation_lie_bed(results, fps=50, H1_scene_distance_threshold=0.40, xy_threshold=0.05, keeping_time_threshold=0.3):
    eval_results = init_common_eval_results()
    eval_results["min_H1_scene_z_distance"] = []  # m

    cnt = 0
    for seq_results in tqdm(results):
        N_frame = seq_results["tracked_next_states"].shape[0]
        episode_length = N_frame / fps

        regularizers = compute_regularizers(seq_results)
        cumu_max_energy_jaylon = compute_cumu_max(regularizers[2])
        
        scene_mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(seq_results["scene_mesh"]["vertices"]), triangles=o3d.utility.Vector3iVector(seq_results["scene_mesh"]["faces"]))
        scene_pcd = scene_mesh.sample_points_uniformly(number_of_points=10000)
        scene_pts = np.float32(scene_pcd.points)  # (10000, 3)
        target_sitting_height = get_target_sitting_height(scene_pts, radius=0.10)
        
        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            H1_root_positions = seq_results["tracked_next_root_states"][:, :3]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_root_states"][:, 3:7]))  # (N_frame, 3, 3)
        else:  # for global information tracking
            H1_root_positions = seq_results["tracked_next_states"][:, 338:341]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 341:345]))  # (N_frame, 3, 3)
        H1_scene_z_distance = H1_root_positions[:, 2] - target_sitting_height
        H1_scene_xy_distance = point_distances(H1_root_positions[:, :2], scene_pts[:, :2]).min(axis=1)  # (N_frame,)
        H1_scene_distance_valid = (0.05 < H1_scene_z_distance) & (H1_scene_z_distance < H1_scene_distance_threshold) & (H1_scene_xy_distance < xy_threshold)
        H1_left_ankle_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 53:56]), H1_root_rotations.permute(0, 2, 1)).detach().cpu().numpy() + H1_root_positions
        H1_right_ankle_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 68:71]), H1_root_rotations.permute(0, 2, 1)).detach().cpu().numpy() + H1_root_positions
        H1_left_ankle_xy_valid = point_distances(H1_left_ankle_positions[:, :2], scene_pts[:, :2]).min(axis=1) < xy_threshold
        H1_right_ankle_xy_valid = point_distances(H1_right_ankle_positions[:, :2], scene_pts[:, :2]).min(axis=1) < xy_threshold
        # H1_valid = H1_scene_distance_valid
        H1_valid = H1_scene_distance_valid & H1_left_ankle_xy_valid & H1_right_ankle_xy_valid

        eval_results["episode_length"].append(episode_length)
        eval_results["min_H1_scene_z_distance"].append(H1_scene_z_distance.min())
        eval_results["torque"].append(regularizers[0].mean())
        eval_results["success"].append(judge_success_with_keeping_time_threshold(H1_valid, keeping_time_threshold=keeping_time_threshold, fps=fps))
        sucess_with_energy_metrics = []
        for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
            success_flag = judge_success_with_keeping_time_threshold(H1_valid & (cumu_max_energy_jaylon < ejt), keeping_time_threshold=keeping_time_threshold, fps=fps)
            sucess_with_energy_metrics.append(success_flag)
            eval_results["success_energy_{}".format(str(i))].append(success_flag)
        eval_results["success_average_on_energies"].append(np.mean(np.float32(sucess_with_energy_metrics)))

    return eval_results


def evaluation_lie_sofa(results, fps=50, H1_scene_distance_threshold=0.40, xy_threshold=0.05, keeping_time_threshold=0.3):
    eval_results = init_common_eval_results()
    eval_results["min_H1_scene_z_distance"] = []  # m

    cnt = 0
    for seq_results in tqdm(results):
        N_frame = seq_results["tracked_next_states"].shape[0]
        episode_length = N_frame / fps

        regularizers = compute_regularizers(seq_results)
        cumu_max_energy_jaylon = compute_cumu_max(regularizers[2])
        
        scene_mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(seq_results["scene_mesh"]["vertices"]), triangles=o3d.utility.Vector3iVector(seq_results["scene_mesh"]["faces"]))
        scene_pcd = scene_mesh.sample_points_uniformly(number_of_points=10000)
        scene_pts = np.float32(scene_pcd.points)  # (10000, 3)
        target_sitting_height = get_target_sitting_height(scene_pts, radius=0.10)
        
        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            H1_root_positions = seq_results["tracked_next_root_states"][:, :3]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_root_states"][:, 3:7]))  # (N_frame, 3, 3)
        else:  # for global information tracking
            H1_root_positions = seq_results["tracked_next_states"][:, 338:341]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 341:345]))  # (N_frame, 3, 3)
        H1_scene_z_distance = H1_root_positions[:, 2] - target_sitting_height
        H1_scene_xy_distance = point_distances(H1_root_positions[:, :2], scene_pts[:, :2]).min(axis=1)  # (N_frame,)
        H1_scene_distance_valid = (0.05 < H1_scene_z_distance) & (H1_scene_z_distance < H1_scene_distance_threshold) & (H1_scene_xy_distance < xy_threshold)
        H1_left_ankle_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 53:56]), H1_root_rotations.permute(0, 2, 1)).detach().cpu().numpy() + H1_root_positions
        H1_right_ankle_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 68:71]), H1_root_rotations.permute(0, 2, 1)).detach().cpu().numpy() + H1_root_positions
        H1_left_ankle_z_valid = H1_left_ankle_positions[:, 2] > (H1_scene_z_distance * 0.5)
        H1_right_ankle_z_valid = H1_right_ankle_positions[:, 2] > (H1_scene_z_distance * 0.5)
        # H1_valid = H1_scene_distance_valid
        H1_valid = H1_scene_distance_valid & H1_left_ankle_z_valid & H1_right_ankle_z_valid

        eval_results["episode_length"].append(episode_length)
        eval_results["min_H1_scene_z_distance"].append(H1_scene_z_distance.min())
        eval_results["torque"].append(regularizers[0].mean())
        eval_results["success"].append(judge_success_with_keeping_time_threshold(H1_valid, keeping_time_threshold=keeping_time_threshold, fps=fps))
        sucess_with_energy_metrics = []
        for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
            success_flag = judge_success_with_keeping_time_threshold(H1_valid & (cumu_max_energy_jaylon < ejt), keeping_time_threshold=keeping_time_threshold, fps=fps)
            sucess_with_energy_metrics.append(success_flag)
            eval_results["success_energy_{}".format(str(i))].append(success_flag)
        eval_results["success_average_on_energies"].append(np.mean(np.float32(sucess_with_energy_metrics)))

    return eval_results


def evaluation_touch_point(results, fps=50, hand_distance_threshold=0.10, keeping_time_threshold=1.0):
    eval_results = init_common_eval_results()
    eval_results["tracking_keeping_time"] = []  # second
    eval_results["tracking_success"] = []  # True / False


    for seq_results in tqdm(results):
        N_frame = seq_results["tracked_next_states"].shape[0]
        episode_length = N_frame / fps

        regularizers = compute_regularizers(seq_results)
        cumu_max_energy_jaylon = compute_cumu_max(regularizers[2])

        hand_pos_to_elbow = torch.tensor([0.30, 0, 0]).to(torch.float32)
        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            H1_root_positions = seq_results["tracked_next_root_states"][:, :3]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_root_states"][:, 3:7]))  # (N_frame, 3, 3)
        else:  # for global information tracking
            H1_root_positions = seq_results["tracked_next_states"][:, 338:341]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 341:345]))  # (N_frame, 3, 3)
        H1_left_elbow_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 83:86]), H1_root_rotations.permute(0, 2, 1)) + H1_root_positions  # (N_frame, 3)
        H1_right_elbow_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 95:98]), H1_root_rotations.permute(0, 2, 1)) + H1_root_positions  # (N_frame, 3)
        H1_left_elbow_rotations_to_root = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 188:194]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        H1_right_elbow_rotations_to_root = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 212:218]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        H1_left_elbow_rotations = torch.einsum('bij,bjk->bik', H1_root_rotations, H1_left_elbow_rotations_to_root)  # (N_frame, 3, 3)
        H1_right_elbow_rotations = torch.einsum('bij,bjk->bik', H1_root_rotations, H1_right_elbow_rotations_to_root)  # (N_frame, 3, 3)
        H1_left_hand_position = torch.matmul(hand_pos_to_elbow, H1_left_elbow_rotations.permute(0, 2, 1)) + H1_left_elbow_positions  # (N_frame, 3)
        H1_right_hand_position = torch.matmul(hand_pos_to_elbow, H1_right_elbow_rotations.permute(0, 2, 1)) + H1_right_elbow_positions  # (N_frame, 3)

        # # tracking
        # target_left_elbow_positions = torch.from_numpy(seq_results["tracked_next_states"][:, 453:456])  # (N_frame, 3)
        # target_right_elbow_positions = torch.from_numpy(seq_results["tracked_next_states"][:, 465:468])  # (N_frame, 3)
        # target_left_elbow_rotations = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 558:564]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        # target_right_elbow_rotations = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 582:588]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        # target_left_hand_position = torch.matmul(hand_pos_to_elbow, target_left_elbow_rotations.permute(0, 2, 1)) + target_left_elbow_positions  # (N_frame, 3)
        # target_right_hand_position = torch.matmul(hand_pos_to_elbow, target_right_elbow_rotations.permute(0, 2, 1)) + target_right_elbow_positions  # (N_frame, 3)
        # left_hand_distance = ((H1_left_hand_position - target_left_hand_position)**2).sum(dim=-1)**0.5
        # right_hand_distance = ((H1_right_hand_position - target_right_hand_position)**2).sum(dim=-1)**0.5
        # H1_valid = (left_hand_distance < hand_distance_threshold) & (right_hand_distance < hand_distance_threshold)
        # if H1_valid.all():
        #     last_distance_invalid_frame_idx = 0
        # else:
        #     last_distance_invalid_frame_idx = np.where(H1_valid == 0)[0].max()
        # keeping_time = (N_frame - 1 - last_distance_invalid_frame_idx) / fps
        # keeping_time_valid = keeping_time > keeping_time_threshold
        # success_flag = keeping_time_valid
        # eval_results["episode_length"].append(episode_length)
        # eval_results["tracking_keeping_time"].append(keeping_time)
        # eval_results["tracking_success"].append(success_flag)

        # task
        task_target_hand_positions = seq_results["kinematic_task_target_hand_positions"]
        left_hand_distance = ((H1_left_hand_position - task_target_hand_positions[0:3])**2).sum(dim=-1)**0.5
        right_hand_distance = ((H1_right_hand_position - task_target_hand_positions[3:6])**2).sum(dim=-1)**0.5
        H1_valid = (left_hand_distance < hand_distance_threshold).detach().cpu().numpy() & (right_hand_distance < hand_distance_threshold).detach().cpu().numpy()

        eval_results["episode_length"].append(episode_length)
        eval_results["torque"].append(regularizers[0].mean())
        eval_results["success"].append(judge_success_with_keeping_time_threshold(H1_valid, keeping_time_threshold=keeping_time_threshold, fps=fps))
        sucess_with_energy_metrics = []
        for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
            success_flag = judge_success_with_keeping_time_threshold(H1_valid & (cumu_max_energy_jaylon < ejt), keeping_time_threshold=keeping_time_threshold, fps=fps)
            sucess_with_energy_metrics.append(success_flag)
            eval_results["success_energy_{}".format(str(i))].append(success_flag)
        eval_results["success_average_on_energies"].append(np.mean(np.float32(sucess_with_energy_metrics)))

    return eval_results


def evaluation(results, task, fps=50):
    if task == "sit":  # sit chair, sit sofa
        return evaluation_sit_chair(results, fps=fps)
    elif task == "lie":  # lie bed
        return evaluation_lie_bed(results, fps=fps)
    elif task == "lie_sofa":  # lie sofa
        return evaluation_lie_sofa(results, fps=fps)
    elif task == "touch":
        return evaluation_touch_point(results, fps=fps)
    else:
        raise NotImplementedError


def visualize(results):
    N_env = len(results)

    # setup gym envs
    gym = gymapi.acquire_gym()
    custom_parameters = [
        {"name": "--controller", "type": str, "default": "ik",
        "help": "Controller to use for Franka. Options are {ik, osc}"},
        {"name": "--show_axis", "action": "store_true", "help": "Visualize DOF axis"},
        {"name": "--speed_scale", "type": float, "default": 1.0, "help": "Animation speed scale"},
        {"name": "--num_envs", "type": int, "default": -1, "help": "Number of environments to create"},
        {"name": "--task", "type": str, "default": None, "help": "和main一致"},
    ]
    args = gymutil.parse_arguments(
        description="test",
        custom_parameters=custom_parameters,
    )
    device = args.sim_device if args.use_gpu_pipeline else 'cpu'
    sim_params = gymapi.SimParams()
    sim_fps = 5
    sim_params.dt = dt = 1.0 / sim_fps
    gymutil.parse_sim_config({"gravity": [0.0, 0.0, -9.81], "up_axis": 1}, sim_params)  # 0 is y, 1 is z
    if args.physics_engine == gymapi.SIM_PHYSX:
        sim_params.physx.solver_type = 1
        sim_params.physx.num_position_iterations = 6
        sim_params.physx.num_velocity_iterations = 0
        sim_params.physx.num_threads = args.num_threads
        sim_params.physx.use_gpu = args.use_gpu
    else:
        raise Exception("This example can only be used with PhysX")
    sim_params.use_gpu_pipeline = False
    if args.use_gpu_pipeline:
        print("WARNING: Forcing CPU pipeline.")
    sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)
    if sim is None:
        raise Exception("Failed to create sim")
    viewer = gym.create_viewer(sim, gymapi.CameraProperties())
    if viewer is None:
        raise Exception("Failed to create viewer")
    asset_root = join(dirname(abspath(__file__)), "../../resources/robots")
    # h1_asset_file = "h1/urdf/h1_fix_upper_body.urdf"
    h1_asset_file = "h1/urdf/h1.urdf"
    asset_options = gymapi.AssetOptions()
    asset_options.armature = 0.01
    asset_options.fix_base_link = True
    asset_options.disable_gravity = True
    asset_options.flip_visual_attachments = False  # 用dae文件则是True
    h1_asset = gym.load_asset(sim, asset_root, h1_asset_file, asset_options)
    h1_dof_names = gym.get_asset_dof_names(h1_asset)
    h1_dof_props = gym.get_asset_dof_properties(h1_asset)
    h1_num_dofs = gym.get_asset_dof_count(h1_asset)
    h1_dof_states = np.zeros(h1_num_dofs, dtype=gymapi.DofState.dtype)
    h1_dof_types = [gym.get_asset_dof_type(h1_asset, i) for i in range(h1_num_dofs)]
    h1_dof_positions = h1_dof_states['pos']
    h1_lower_limits = h1_dof_props["lower"]
    h1_upper_limits = h1_dof_props["upper"]
    h1_ranges = h1_upper_limits - h1_lower_limits
    h1_mids = 0.3 * (h1_upper_limits + h1_lower_limits)
    h1_stiffnesses = h1_dof_props['stiffness']
    h1_dampings = h1_dof_props['damping']
    h1_armatures = h1_dof_props['armature']
    h1_has_limits = h1_dof_props['hasLimits']
    h1_dof_props['hasLimits'] = np.array([True]*h1_num_dofs)
    num_envs = N_env
    num_per_row = int(np.sqrt(num_envs))
    env_lower = gymapi.Vec3(0., 0., 0.)
    env_upper = gymapi.Vec3(0., 0., 0.)
    print("Creating %d environments" % num_envs)
    plane_params = gymapi.PlaneParams()
    plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
    gym.add_ground(sim, plane_params)

    # set env origins
    env_origins = torch.zeros(N_env, 3, device=device, requires_grad=False)
    num_cols = np.floor(np.sqrt(N_env))
    num_rows = np.ceil(N_env / num_cols)
    xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols))
    spacing = 5.0
    env_origins[:, 0] = spacing * xx.flatten()[:N_env]
    env_origins[:, 1] = spacing * yy.flatten()[:N_env]
    env_origins[:, 2] = 0.

    # set object params
    object_asset_options = gymapi.AssetOptions()
    object_asset_options.fix_base_link = True
    object_asset_options.use_mesh_materials = True
    object_asset_options.mesh_normal_mode = gymapi.COMPUTE_PER_VERTEX
    object_asset_options.override_inertia = True
    object_asset_options.override_com = True
    object_asset_options.vhacd_enabled = False
    object_pose = gymapi.Transform()

    envs = []
    actor_handles = []
    retargeted_motion_actor_handles = []

    for i in range(N_env):

        seq = results[i]

        # create env
        env = gym.create_env(sim, env_lower, env_upper, int(np.sqrt(N_env)))
        envs.append(env)

        # add object
        object_vertices, object_faces = np.float32(seq["scene_mesh"]["vertices"]).copy(), np.uint32(seq["scene_mesh"]["faces"]).copy()
        tm_params = gymapi.TriangleMeshParams()
        tm_params.nb_vertices = object_vertices.shape[0]
        tm_params.nb_triangles = object_faces.shape[0]
        object_info = {"vertices": object_vertices, "faces": object_faces, "tm_params": tm_params}
        env_ori = env_origins[i].detach().cpu().numpy()
        object_info["tm_params"].transform.p.x = env_ori[0] + 0.0
        object_info["tm_params"].transform.p.y = env_ori[1] + 0.0
        object_info["tm_params"].transform.p.z = 0.0
        gym.add_triangle_mesh(sim, object_info["vertices"].flatten(), object_info["faces"].flatten(), object_info["tm_params"])

        # add actor
        pose = gymapi.Transform()
        pose.p = gymapi.Vec3(0.0, 0.0, 1.05)
        pose.r = gymapi.Quat(*[0, 0, 0, 1])
        
        actor_handle = gym.create_actor(env, h1_asset, pose, "actor", i*2, 1)
        actor_handles.append(actor_handle)

        actor_handle = gym.create_actor(env, h1_asset, pose, "actor", i*2+1, 1)
        for link_idx in range(25):
            gym.set_rigid_body_color(env, actor_handle, link_idx, gymapi.MeshType.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0.0, 0.8, 0.0))
        retargeted_motion_actor_handles.append(actor_handle)

        # set default DOF positions
        gym.set_actor_dof_states(env, actor_handle, h1_dof_states, gymapi.STATE_ALL)
    
    print("Creating a camera")
    cam_pos = gymapi.Vec3(0, -3, 2.0)
    cam_target = gymapi.Vec3(0, 3, 0)
    gym.viewer_camera_look_at(viewer, envs[0], cam_pos, cam_target)
    
    gym.prepare_sim(sim)

    result_obss = [results[i]["tracker_inputs"] for i in range(N_env)]
    N_frames = [result_obss[i].shape[0] for i in range(N_env)]
    max_N_frame = np.int32(N_frames).max()

    overall_pred_left_hand_positions = None
    overall_pred_right_hand_positions = None
    for frame_idx in tqdm(range(max_N_frame)):
        gym.simulate(sim)
        gym.fetch_results(sim, True)

        actor_root_state = gym.acquire_actor_root_state_tensor(sim)
        root_states = gymtorch.wrap_tensor(actor_root_state)

        pred_left_hand_positions = np.zeros((N_env, 3))
        pred_right_hand_positions = np.zeros((N_env, 3))

        for i in range(N_env):

            # set global pose
            if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                t = torch.from_numpy(results[i]["root_states"][min(frame_idx, N_frames[i]-1), 0:3]).to(device) + env_origins[i]
                q_xyzw = torch.from_numpy(results[i]["root_states"][min(frame_idx, N_frames[i]-1), 3:7]).to(device)
            else:  # for global information tracking
                t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 338:341]).to(device) + env_origins[i]
                q_xyzw = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 341:345]).to(device)
            root_states[i*2, :3] = t
            root_states[i*2, 3:7] = q_xyzw
            root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
            gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                t_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 395:398]).to(device)
                q_xyzw_to_proprio_root = vec6d_to_quat(torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 455:461]).reshape(1, 3, 2)).reshape(4).to(device)
                t = broadcast_quat_apply(root_states[i*2, 3:7].to(device), t_to_proprio_root) + root_states[i*2, :3].to(device)
                q_xyzw = flip_quat_by_w(broadcast_quat_multiply(root_states[i*2, 3:7].to(device), q_xyzw_to_proprio_root)).to(device)
            else:
                t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 408:411]).to(device) + env_origins[i]
                q_xyzw = vec6d_to_quat(torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 468:474]).reshape(1, 3, 2)).reshape(4).to(device)
            root_states[i*2+1, :3] = t
            root_states[i*2+1, 3:7] = q_xyzw
            root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], retargeted_motion_actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
            gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            # compute mean hand linear velocities
            pred = result_obss[i][min(frame_idx, N_frames[i]-1), 38:98].reshape(20, 3)  # shape = (20, 3)
            pred_R = result_obss[i][min(frame_idx, N_frames[i]-1), 98:218].reshape(20, 3, 2)  # shape = (20, 3, 2)
            pred_R = vec6d_to_matrix(torch.from_numpy(pred_R)).detach().cpu().numpy()  # shape = (20, 3, 3)
            hand_pos_to_elbow = np.float32([0.30, 0, 0])
            pred_left_hand_positions[i] = hand_pos_to_elbow @ pred_R[15].T + pred[15]
            pred_right_hand_positions[i] = hand_pos_to_elbow @ pred_R[19].T + pred[19]

            # set joint angles
            h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 0:19]
            gym.set_actor_dof_states(envs[i], actor_handles[i], h1_dof_states, gymapi.STATE_POS)

            if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 357:376]
            else:
                h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 370:389]
            gym.set_actor_dof_states(envs[i], retargeted_motion_actor_handles[i], h1_dof_states, gymapi.STATE_POS)

            if i == 0:
                print("xxxxxxxxxxxxx")
                print("[tracked_motion] root_pose =", root_states[i*2, 0:7])
                print("[tracked_motion] dof_pos =", result_obss[i][min(frame_idx, N_frames[i]-1), 0:19])
                print("[retargeted_motion] root_pose =", root_states[i*2+1, 0:7])
                if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                    print("[retargeted_motion] dof_pos =", result_obss[i][min(frame_idx, N_frames[i]-1), 357:376])
                else:
                    print("[retargeted_motion] dof_pos =", result_obss[i][min(frame_idx, N_frames[i]-1), 370:389])
                
                # # compute contact
                # # gym.simulate(sim)
                # # gym.fetch_results(sim, True)
                # contacts = gym.get_env_rigid_contacts(envs[i])
                # print(contacts.dtype)
                # print(contacts)

        if overall_pred_left_hand_positions is None:
            overall_pred_left_hand_positions = pred_left_hand_positions[None, ...]
            overall_pred_right_hand_positions = pred_right_hand_positions[None, ...]
        else:
            overall_pred_left_hand_positions = np.concatenate((overall_pred_left_hand_positions, pred_left_hand_positions[None, ...]), axis=0)
            overall_pred_right_hand_positions = np.concatenate((overall_pred_right_hand_positions, pred_right_hand_positions[None, ...]), axis=0)
            mean_left_hand_velocity = np.abs((overall_pred_left_hand_positions[1:] - overall_pred_left_hand_positions[:-1]) * 50).sum(axis=-1).mean()
            mean_right_hand_velocity = np.abs((overall_pred_right_hand_positions[1:] - overall_pred_right_hand_positions[:-1]) * 50).sum(axis=-1).mean()
            print("[mean hand velocities]", mean_left_hand_velocity, mean_right_hand_velocity)

        gym.step_graphics(sim)
        gym.draw_viewer(viewer, sim, True)
        gym.clear_lines(viewer)
        gym.sync_frame_time(sim)

        if gym.query_viewer_has_closed(viewer):
            break
        
        # time.sleep(3)

    gym.destroy_viewer(viewer)
    gym.destroy_sim(sim)


if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument("--task", default=None, type=str)
    parser.add_argument("--result_file_path", default=None, type=str)
    args = parser.parse_args()
    task = args.task

    result_file_path = "./tracking_results.pkl" if args.result_file_path is None else args.result_file_path
    results = pickle.load(open(result_file_path, "rb"))
    N_train = len(results) // 2
    N_test = len(results) - N_train

    if task == "vis":
        print("###### visualization ######")
        visualize(results[0:100])
        exit(0)

    print("###### evaluate results on training motions ######")
    eval_results = evaluation(results[0 : N_train], task=task, fps=50)
    # sample_ids = list(np.random.choice(np.where(np.float32(eval_results["success"]) > 0.5)[0], 20))
    # vis_results = [results[i] for i in sample_ids]
    # visualize(vis_results)
    # print(eval_results)
    # if "tracking_success" in eval_results:
    #     print("tracking success rate =", np.float32(eval_results["tracking_success"]).mean())
    print("success rate =", np.float32(eval_results["success"]).mean())
    for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
        print("success rate with energy_metric_{} =".format(str(i)), np.float32(eval_results["success_energy_{}".format(str(i))]).mean())
    print("average success rate on energy metrics =", np.float32(eval_results["success_average_on_energies"]).mean())

    print("###### evaluate results on testing scenes and motions ######")
    eval_results = evaluation(results[N_train : N_train + N_test], task=task, fps=50)
    # print(eval_results)
    print("success rate =", np.float32(eval_results["success"]).mean())
    for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
        print("success rate with energy_metric_{} =".format(str(i)), np.float32(eval_results["success_energy_{}".format(str(i))]).mean())
    print("average success rate on energy metrics =", np.float32(eval_results["success_average_on_energies"]).mean())
