import os
from os.path import join, dirname, abspath
import numpy as np
import pickle
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
import torch
import trimesh
import time
import argparse
from transforms3d.quaternions import quat2mat
from tqdm import tqdm
from legged_gym.scripts.diff_quat import vec6d_to_quat, quat_to_matrix, vec6d_to_matrix, flip_quat_by_w, broadcast_quat_apply, broadcast_quat_multiply
from legged_gym.scripts.result_evaluation import ENERGY_JAYLON_THRESHOLDS, init_common_eval_results, compute_regularizers, compute_cumu_max, point_distances
import open3d as o3d


def q_xyzw_to_R(q):
    return quat2mat([q[3], q[0], q[1], q[2]])


def get_pts_from_box_urdf(urdf_path, N_point=5000):
    # get xyz, size
    with open(urdf_path, "r") as f:
        for line in f:
            line = line.strip()
            if len(line) == 0:
                continue
            if line.find("xyz=") > -1:
                xyz = np.float32([float(x) for x in line.split("\"")[1].split(" ")])  # (3,)
            if line.find("box size=") > -1:
                size = np.float32([float(x) for x in line.split("\"")[1].split(" ")])  # (3,)
    
    mesh = o3d.geometry.TriangleMesh.create_box(width=size[0], height=size[1], depth=size[2])
    pcd = mesh.sample_points_uniformly(number_of_points=N_point)
    pts = np.float32(pcd.points) - (size * 0.5).reshape(1, 3) + xyz.reshape(1, 3)
    return pts


def evaluation_carry_box(results, fps=50, final_object_distance_threshold=0.30, keeping_time_threshold=0.3, task_success_z_threshold=0.20, hand_object_distance_threshold=0.10):
    eval_results = init_common_eval_results()
    eval_results["object_urdf_path"] = []  # str
    eval_results["final_object_distance"] = []  # m
    eval_results["tracking_success"] = []  # True / False

    for seq_results in tqdm(results):
        N_frame = seq_results["tracked_next_states"].shape[0]
        episode_length = N_frame / fps
        episode_length_valid = episode_length > 2.0

        regularizers = compute_regularizers(seq_results)
        cumu_max_energy_jaylon = compute_cumu_max(regularizers[2])

        obj_model_pts = get_pts_from_box_urdf(seq_results["object_urdf_path"], N_point=5000)

        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            t_root = torch.from_numpy(seq_results["tracked_next_root_states"][0, 0:3])
            q_xyzw_root = torch.from_numpy(seq_results["tracked_next_root_states"][0, 3:7])
            t_obj_to_root = torch.from_numpy(seq_results["tracked_next_states"][0, -26:-23])
            t_initial = (broadcast_quat_apply(q_xyzw_root, t_obj_to_root) + t_root).detach().cpu().numpy()
            t_root = torch.from_numpy(seq_results["tracked_next_root_states"][-1, 0:3])
            q_xyzw_root = torch.from_numpy(seq_results["tracked_next_root_states"][-1, 3:7])
            R_root = q_xyzw_to_R(q_xyzw_root.detach().cpu().numpy())
            t_obj_to_root = torch.from_numpy(seq_results["tracked_next_states"][-1, -26:-23])
            q_xyzw_obj_to_root = torch.from_numpy(seq_results["tracked_next_states"][-1, -23:-19])
            R_obj_to_root = q_xyzw_to_R(q_xyzw_obj_to_root.detach().cpu().numpy())
            t_last = (broadcast_quat_apply(q_xyzw_root, t_obj_to_root) + t_root).detach().cpu().numpy()
            R_last = R_root @ R_obj_to_root
            obj_lift_height = t_last[2] - t_initial[2]
        else:
            t_last = seq_results["tracked_next_states"][-1, -26:-23]
            R_last = q_xyzw_to_R(seq_results["tracked_next_states"][-1, -23:-19])
            obj_lift_height = seq_results["tracked_next_states"][-1, -24] - seq_results["tracked_next_states"][0, -24]

        # print(N_frame, obj_lift_height)
        obj_position_diff = seq_results["tracked_next_states"][:, -26:-23] - seq_results["tracked_next_states"][:, -13:-10]  # in world space
        obj_position_error = (obj_position_diff**2).sum(axis=-1)**0.5
        obj_position_valid = obj_position_error < final_object_distance_threshold

        if obj_position_valid.all():
            last_invalid_frame_idx = 0
        else:
            last_invalid_frame_idx = np.where(obj_position_valid == 0)[0].max()
        keeping_time = (N_frame - 1 - last_invalid_frame_idx) / fps
        keeping_time_valid = keeping_time > keeping_time_threshold

        # get hand positions
        hand_pos_to_elbow = torch.tensor([0.30, 0, 0]).to(torch.float32)
        if ("global_information_tracking" in seq_results) and (not seq_results["global_information_tracking"]):  # for proprioception tracking
            H1_root_positions = seq_results["tracked_next_root_states"][:, :3]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_root_states"][:, 3:7]))  # (N_frame, 3, 3)
        else:  # for global information tracking
            H1_root_positions = seq_results["tracked_next_states"][:, 338:341]  # (N_frame, 3)
            H1_root_rotations = quat_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 341:345]))  # (N_frame, 3, 3)
        H1_left_elbow_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 83:86]), H1_root_rotations.permute(0, 2, 1)) + H1_root_positions  # (N_frame, 3)
        H1_right_elbow_positions = torch.einsum('bi,bij->bj', torch.from_numpy(seq_results["tracked_next_states"][:, 95:98]), H1_root_rotations.permute(0, 2, 1)) + H1_root_positions  # (N_frame, 3)
        H1_left_elbow_rotations_to_root = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 188:194]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        H1_right_elbow_rotations_to_root = vec6d_to_matrix(torch.from_numpy(seq_results["tracked_next_states"][:, 212:218]).reshape(N_frame, 3, 2))  # (N_frame, 3, 3)
        H1_left_elbow_rotations = torch.einsum('bij,bjk->bik', H1_root_rotations, H1_left_elbow_rotations_to_root)  # (N_frame, 3, 3)
        H1_right_elbow_rotations = torch.einsum('bij,bjk->bik', H1_root_rotations, H1_right_elbow_rotations_to_root)  # (N_frame, 3, 3)
        H1_left_hand_position = torch.matmul(hand_pos_to_elbow, H1_left_elbow_rotations.permute(0, 2, 1)) + H1_left_elbow_positions  # (N_frame, 3)
        H1_right_hand_position = torch.matmul(hand_pos_to_elbow, H1_right_elbow_rotations.permute(0, 2, 1)) + H1_right_elbow_positions  # (N_frame, 3)

        final_obj_pts = (obj_model_pts @ R_last.T) + t_last.reshape(1, 3)  # (N_point, 3)
        final_hand_positions = torch.cat((H1_left_hand_position[-1:], H1_right_hand_position[-1:]), dim=0).detach().cpu().numpy()  # (2, 3)
        final_hand_obj_distance = point_distances(final_hand_positions, final_obj_pts).min()

        H1_valid = episode_length_valid & (obj_lift_height > task_success_z_threshold) & (final_hand_obj_distance < hand_object_distance_threshold)

        eval_results["object_urdf_path"].append(seq_results["object_urdf_path"])
        eval_results["episode_length"].append(episode_length)
        eval_results["torque"].append(regularizers[0].mean())
        eval_results["final_object_distance"].append(obj_position_error[-1])
        eval_results["tracking_success"].append(episode_length_valid & keeping_time_valid)
        eval_results["success"].append(H1_valid)
        sucess_with_energy_metrics = []
        for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
            success_flag = H1_valid & (cumu_max_energy_jaylon[-1] < ejt)
            sucess_with_energy_metrics.append(success_flag)
            eval_results["success_energy_{}".format(str(i))].append(success_flag)
        eval_results["success_average_on_energies"].append(np.mean(np.float32(sucess_with_energy_metrics)))
        
        # # (for evaluating RL)
        # obj_lift_height = seq_results["tracked_next_states"][-1, 378] - seq_results["tracked_next_states"][0, 378]
        # eval_results["task_success"].append(episode_length_valid & (obj_lift_height > task_success_z_threshold))

    return eval_results


def visualize(results, visualize_reference_motion=True):
    N_env = len(results)

    # setup gym envs
    gym = gymapi.acquire_gym()
    custom_parameters = [
        {"name": "--controller", "type": str, "default": "ik",
        "help": "Controller to use for Franka. Options are {ik, osc}"},
        {"name": "--show_axis", "action": "store_true", "help": "Visualize DOF axis"},
        {"name": "--speed_scale", "type": float, "default": 1.0, "help": "Animation speed scale"},
        {"name": "--num_envs", "type": int, "default": -1, "help": "Number of environments to create"},
    ]
    args = gymutil.parse_arguments(
        description="test",
        custom_parameters=custom_parameters,
    )
    device = args.sim_device if args.use_gpu_pipeline else 'cpu'
    sim_params = gymapi.SimParams()
    sim_fps = 5
    sim_params.dt = dt = 1.0 / sim_fps
    gymutil.parse_sim_config({"gravity": [0.0, 0.0, -9.81], "up_axis": 1}, sim_params)  # 0 is y, 1 is z
    if args.physics_engine == gymapi.SIM_PHYSX:
        sim_params.physx.solver_type = 1
        sim_params.physx.num_position_iterations = 6
        sim_params.physx.num_velocity_iterations = 0
        sim_params.physx.num_threads = args.num_threads
        sim_params.physx.use_gpu = args.use_gpu
    else:
        raise Exception("This example can only be used with PhysX")
    sim_params.use_gpu_pipeline = False
    if args.use_gpu_pipeline:
        print("WARNING: Forcing CPU pipeline.")
    sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)
    if sim is None:
        raise Exception("Failed to create sim")
    viewer = gym.create_viewer(sim, gymapi.CameraProperties())
    if viewer is None:
        raise Exception("Failed to create viewer")
    asset_root = join(dirname(abspath(__file__)), "../../resources/robots")
    # h1_asset_file = "h1/urdf/h1_fix_upper_body.urdf"
    h1_asset_file = "h1/urdf/h1.urdf"
    asset_options = gymapi.AssetOptions()
    asset_options.armature = 0.01
    asset_options.fix_base_link = True
    asset_options.disable_gravity = True
    asset_options.flip_visual_attachments = False  # 用dae文件则是True
    h1_asset = gym.load_asset(sim, asset_root, h1_asset_file, asset_options)
    h1_dof_names = gym.get_asset_dof_names(h1_asset)
    h1_dof_props = gym.get_asset_dof_properties(h1_asset)
    h1_num_dofs = gym.get_asset_dof_count(h1_asset)
    h1_dof_states = np.zeros(h1_num_dofs, dtype=gymapi.DofState.dtype)
    h1_dof_types = [gym.get_asset_dof_type(h1_asset, i) for i in range(h1_num_dofs)]
    h1_dof_positions = h1_dof_states['pos']
    h1_lower_limits = h1_dof_props["lower"]
    h1_upper_limits = h1_dof_props["upper"]
    h1_ranges = h1_upper_limits - h1_lower_limits
    h1_mids = 0.3 * (h1_upper_limits + h1_lower_limits)
    h1_stiffnesses = h1_dof_props['stiffness']
    h1_dampings = h1_dof_props['damping']
    h1_armatures = h1_dof_props['armature']
    h1_has_limits = h1_dof_props['hasLimits']
    h1_dof_props['hasLimits'] = np.array([True]*h1_num_dofs)
    num_envs = N_env
    num_per_row = int(np.sqrt(num_envs))
    env_lower = gymapi.Vec3(0., 0., 0.)
    env_upper = gymapi.Vec3(0., 0., 0.)
    print("Creating %d environments" % num_envs)
    plane_params = gymapi.PlaneParams()
    plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
    gym.add_ground(sim, plane_params)

    # set env origins
    env_origins = torch.zeros(N_env, 3, device=device, requires_grad=False)
    num_cols = np.floor(np.sqrt(N_env))
    num_rows = np.ceil(N_env / num_cols)
    xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols))
    spacing = 5.0
    env_origins[:, 0] = spacing * xx.flatten()[:N_env]
    env_origins[:, 1] = spacing * yy.flatten()[:N_env]
    env_origins[:, 2] = 0.

    # set object params
    object_asset_options = gymapi.AssetOptions()
    object_asset_options.fix_base_link = True
    object_asset_options.use_mesh_materials = True
    object_asset_options.mesh_normal_mode = gymapi.COMPUTE_PER_VERTEX
    object_asset_options.override_inertia = True
    object_asset_options.override_com = True
    object_asset_options.vhacd_enabled = False
    object_pose = gymapi.Transform()

    envs = []
    actor_handles = []
    retargeted_motion_actor_handles = []
    object_actor_handles = []
    retargeted_motion_object_actor_handles = []
    actor_num_per_env = 4 if visualize_reference_motion else 2

    for i in range(N_env):

        seq = results[i]
        print(seq.keys())

        # create env
        env = gym.create_env(sim, env_lower, env_upper, int(np.sqrt(N_env)))
        envs.append(env)

        # add actor
        pose = gymapi.Transform()
        pose.p = gymapi.Vec3(0.0, 0.0, 1.05)
        pose.r = gymapi.Quat(*[0, 0, 0, 1])
        
        actor_handle = gym.create_actor(env, h1_asset, pose, "actor", i*actor_num_per_env, 1)
        gym.set_actor_dof_states(env, actor_handle, h1_dof_states, gymapi.STATE_ALL)  # set default DOF positions
        actor_handles.append(actor_handle)
        if visualize_reference_motion:
            actor_handle = gym.create_actor(env, h1_asset, pose, "actor", i*actor_num_per_env+1, 1)
            for link_idx in range(25):
                gym.set_rigid_body_color(env, actor_handle, link_idx, gymapi.MeshType.MESH_VISUAL_AND_COLLISION, gymapi.Vec3(0.0, 0.8, 0.0))
            gym.set_actor_dof_states(env, actor_handle, h1_dof_states, gymapi.STATE_ALL)  # set default DOF positions
            retargeted_motion_actor_handles.append(actor_handle)

        # add object
        object_asset = gym.load_asset(sim, dirname(seq["object_urdf_path"]), seq["object_urdf_path"].split("/")[-1], gymapi.AssetOptions())
        object_initial_pose = gymapi.Transform()
        object_initial_pose.r = gymapi.Quat(0, 0, 0, 1)
        object_initial_pose.p = gymapi.Vec3(env_origins[i, 0], env_origins[i, 1], 0.0)
        object_actor_handle = gym.create_actor(env, object_asset, object_initial_pose, "object", i*actor_num_per_env+2, 0, 0)
        c = 0.5 + 0.5 * np.random.random(3)
        color = gymapi.Vec3(c[0], c[1], c[2])
        gym.set_rigid_body_color(env, object_actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color)
        object_actor_handles.append(object_actor_handle)

        if visualize_reference_motion:
            object_asset = gym.load_asset(sim, dirname(seq["object_urdf_path"]), seq["object_urdf_path"].split("/")[-1], gymapi.AssetOptions())
            object_initial_pose = gymapi.Transform()
            object_initial_pose.r = gymapi.Quat(0, 0, 0, 1)
            object_initial_pose.p = gymapi.Vec3(env_origins[i, 0], env_origins[i, 1], 0.0)
            object_actor_handle = gym.create_actor(env, object_asset, object_initial_pose, "object", i*actor_num_per_env+3, 0, 0)
            color = gymapi.Vec3(0.0, 1.0, 0.0)
            gym.set_rigid_body_color(env, object_actor_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color)
            retargeted_motion_object_actor_handles.append(object_actor_handle)
    
    print("Creating a camera")
    cam_pos = gymapi.Vec3(0, -3, 2.0)
    cam_target = gymapi.Vec3(0, 3, 0)
    gym.viewer_camera_look_at(viewer, envs[0], cam_pos, cam_target)
    
    gym.prepare_sim(sim)

    result_obss = [results[i]["tracker_inputs"] for i in range(N_env)]
    N_frames = [result_obss[i].shape[0] for i in range(N_env)]
    max_N_frame = np.int32(N_frames).max()

    for frame_idx in tqdm(range(max_N_frame)):
        gym.simulate(sim)
        gym.fetch_results(sim, True)

        actor_root_state = gym.acquire_actor_root_state_tensor(sim)
        root_states = gymtorch.wrap_tensor(actor_root_state)

        for i in range(N_env):
            # set global pose
            if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                t = torch.from_numpy(results[i]["root_states"][min(frame_idx, N_frames[i]-1), 0:3]).to(device) + env_origins[i]
                q_xyzw = torch.from_numpy(results[i]["root_states"][min(frame_idx, N_frames[i]-1), 3:7]).to(device)
            else:  # for global information tracking
                t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 338:341]).to(device) + env_origins[i]
                q_xyzw = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 341:345]).to(device)
            root_states[i*actor_num_per_env, :3] = t
            root_states[i*actor_num_per_env, 3:7] = q_xyzw
            root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
            gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            if visualize_reference_motion:
                if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                    t_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 395:398]).to(device)
                    q_xyzw_to_proprio_root = vec6d_to_quat(torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 455:461]).reshape(1, 3, 2)).reshape(4).to(device)
                    t = broadcast_quat_apply(root_states[i*actor_num_per_env, 3:7].to(device), t_to_proprio_root) + root_states[i*actor_num_per_env, :3].to(device)
                    q_xyzw = flip_quat_by_w(broadcast_quat_multiply(root_states[i*actor_num_per_env, 3:7].to(device), q_xyzw_to_proprio_root)).to(device)
                else:
                    t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 408:411]).to(device) + env_origins[i]
                    q_xyzw = vec6d_to_quat(torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 468:474]).reshape(1, 3, 2)).reshape(4).to(device)
                root_states[i*actor_num_per_env+1, :3] = t
                root_states[i*actor_num_per_env+1, 3:7] = q_xyzw
                root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], retargeted_motion_actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
                gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            # set joint angles
            h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 0:19]
            gym.set_actor_dof_states(envs[i], actor_handles[i], h1_dof_states, gymapi.STATE_POS)

            if visualize_reference_motion:
                if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                    h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 357:376]
                else:
                    h1_dof_states['pos'] = result_obss[i][min(frame_idx, N_frames[i]-1), 370:389]
                gym.set_actor_dof_states(envs[i], retargeted_motion_actor_handles[i], h1_dof_states, gymapi.STATE_POS)

            # set object global pose
            if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                t_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 701:704]).to(device)
                q_xyzw_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 704:708]).to(device)
                t = broadcast_quat_apply(root_states[i*actor_num_per_env, 3:7].to(device), t_to_proprio_root) + root_states[i*actor_num_per_env, :3].to(device)
                q_xyzw = flip_quat_by_w(broadcast_quat_multiply(root_states[i*actor_num_per_env, 3:7].to(device), q_xyzw_to_proprio_root)).to(device)
            else:
                t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 714:717]).to(device) + env_origins[i]
                q_xyzw = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 717:721]).to(device)
            if visualize_reference_motion:
                root_states[i*actor_num_per_env+2, :3] = t
                root_states[i*actor_num_per_env+2, 3:7] = q_xyzw
            else:
                root_states[i*actor_num_per_env+1, :3] = t
                root_states[i*actor_num_per_env+1, 3:7] = q_xyzw
            root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], object_actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
            gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            if visualize_reference_motion:
                if ("global_information_tracking" in results[i]) and (not results[i]["global_information_tracking"]):  # for proprioception tracking
                    t_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 714:717]).to(device)
                    q_xyzw_to_proprio_root = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 717:721]).to(device)
                    t = broadcast_quat_apply(root_states[i*actor_num_per_env, 3:7].to(device), t_to_proprio_root) + root_states[i*actor_num_per_env, :3].to(device)
                    q_xyzw = flip_quat_by_w(broadcast_quat_multiply(root_states[i*actor_num_per_env, 3:7].to(device), q_xyzw_to_proprio_root)).to(device)
                else:
                    t = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 727:730]).to(device) + env_origins[i]
                    q_xyzw = torch.from_numpy(result_obss[i][min(frame_idx, N_frames[i]-1), 730:734]).to(device)
                root_states[i*actor_num_per_env+3, :3] = t
                root_states[i*actor_num_per_env+3, 3:7] = q_xyzw
                root_reset_actors_indices = torch.tensor([gym.get_actor_index(envs[i], retargeted_motion_object_actor_handles[i], gymapi.DOMAIN_SIM)]).to(dtype=torch.int32)
                gym.set_actor_root_state_tensor_indexed(sim, gymtorch.unwrap_tensor(root_states), gymtorch.unwrap_tensor(root_reset_actors_indices), 1)

            if i == 0:
                print("xxxxxxxxxxxxx")
                print("[tracked_motion] root_pose =", result_obss[i][min(frame_idx, N_frames[i]-1), 338:345])
                print("[tracked_motion] dof_pos =", result_obss[i][min(frame_idx, N_frames[i]-1), 0:19])
                # error = np.mean(np.square(result_obss[i][min(frame_idx, N_frames[i]-1), 10:19] - 0.0))
                # print("upper body error:", error, np.exp(-4*error))
                # print("[retargeted_motion] root_pose =", result_obss[i][min(frame_idx, N_frames[i]-1), 408:411], result_obss[i][min(frame_idx, N_frames[i]-1), 468:472])
                # print("[retargeted_motion] dof_pos =", result_obss[i][min(frame_idx, N_frames[i]-1), 370:389])

        gym.step_graphics(sim)
        gym.draw_viewer(viewer, sim, True)
        gym.clear_lines(viewer)
        gym.sync_frame_time(sim)

        if gym.query_viewer_has_closed(viewer):
            break
        
        # if frame_idx >= 30:
        #     time.sleep(100)

    gym.destroy_viewer(viewer)
    gym.destroy_sim(sim)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--result_file_path", default=None, type=str)
    args = parser.parse_args()

    result_file_path = "./tracking_results.pkl" if args.result_file_path is None else args.result_file_path
    results = pickle.load(open(result_file_path, "rb"))
    N_train = len(results) // 2
    N_test = len(results) - N_train

    # # print("###### visualization ######")
    # visualize(results[0:50])
    # exit(0)

    print("###### evaluate results on training motions ######")
    eval_results = evaluation_carry_box(results[0 : N_train], fps=50)
    # sample_ids = list(np.random.choice(np.where(np.float32(eval_results["task_success"]) > 0.5)[0], 20))
    # vis_results = [results[i] for i in sample_ids]
    # visualize(vis_results)
    # print(eval_results)
    # print("tracking & task success rates =", np.float32(eval_results["tracking_success"]).mean(), np.float32(eval_results["success"]).mean())
    # urdf_result_dict = {}
    # for i in range(len(eval_results["tracking_success"])):
    #     urdf_name = eval_results["object_urdf_path"][i].split("/")[-2]
    #     if not urdf_name in urdf_result_dict:
    #         urdf_result_dict[urdf_name] = [[], []]
    #     urdf_result_dict[urdf_name][0].append(eval_results["tracking_success"][i])
    #     urdf_result_dict[urdf_name][1].append(eval_results["task_success"][i])
    # urdf_names = list(urdf_result_dict.keys())
    # urdf_names.sort()
    # for urdf_name in urdf_names:
    #     print(urdf_name, np.float32(urdf_result_dict[urdf_name][0]).mean(), np.float32(urdf_result_dict[urdf_name][1]).mean())
    print("success rate =", np.float32(eval_results["success"]).mean())
    for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
        print("success rate with energy_metric_{} =".format(str(i)), np.float32(eval_results["success_energy_{}".format(str(i))]).mean())
    print("average success rate on energy metrics =", np.float32(eval_results["success_average_on_energies"]).mean())

    print("###### evaluate results on testing scenes and motions ######")
    eval_results = evaluation_carry_box(results[N_train : N_train + N_test], fps=50)
    # print(eval_results)
    # print("tracking & task success rates =", np.float32(eval_results["tracking_success"]).mean(), np.float32(eval_results["task_success"]).mean())
    # urdf_result_dict = {}
    # for i in range(len(eval_results["tracking_success"])):
    #     urdf_name = eval_results["object_urdf_path"][i].split("/")[-2]
    #     if not urdf_name in urdf_result_dict:
    #         urdf_result_dict[urdf_name] = [[], []]
    #     urdf_result_dict[urdf_name][0].append(eval_results["tracking_success"][i])
    #     urdf_result_dict[urdf_name][1].append(eval_results["task_success"][i])
    # urdf_names = list(urdf_result_dict.keys())
    # urdf_names.sort()
    # for urdf_name in urdf_names:
    #     print(urdf_name, np.float32(urdf_result_dict[urdf_name][0]).mean(), np.float32(urdf_result_dict[urdf_name][1]).mean())
    print("success rate =", np.float32(eval_results["success"]).mean())
    for i, ejt in enumerate(ENERGY_JAYLON_THRESHOLDS):
        print("success rate with energy_metric_{} =".format(str(i)), np.float32(eval_results["success_energy_{}".format(str(i))]).mean())
    print("average success rate on energy metrics =", np.float32(eval_results["success_average_on_energies"]).mean())

