import numpy as np
import torch
from scipy.spatial.transform import Rotation as R
from numprint import p
from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose
from curobo.types.robot import RobotConfig
from curobo.util_file import get_robot_configs_path, join_path, load_yaml
from curobo.wrap.reacher.ik_solver import IKSolver, IKSolverConfig
import gc
import zarr
import sys

def init_ik_solver(embodiment_name):
    """
    Initialize the inverse kinematics solver.

    Returns:
        IKSolver: Initialized IK solver.
    """
    tensor_args = TensorDeviceType()
    print("get_robot_configs_path",get_robot_configs_path())
    # config_file = load_yaml(join_path(get_robot_configs_path(), "franka.yml"))
    config_file = load_yaml(f'/home/algo/geyiheng/RoboTwin/assets/embodiments/{embodiment_name}/curobo.yml')
    urdf_file = config_file["robot_cfg"]["kinematics"]["urdf_path"]
    base_link = config_file["robot_cfg"]["kinematics"]["base_link"]
    ee_link = config_file["robot_cfg"]["kinematics"]["ee_link"]
    robot_cfg = RobotConfig.from_basic(urdf_file, base_link, ee_link, tensor_args)
    ik_config = IKSolverConfig.load_from_robot_config(
        robot_cfg,
        num_seeds=20,
        tensor_args=tensor_args,
        use_cuda_graph=True,
    )
    ik_solver = IKSolver(ik_config)
    return ik_solver
class FrankaFK:
    def __init__(self,embodiment_name):
        self.embodiment_name = embodiment_name
        self.ik_solver = init_ik_solver(embodiment_name)
    def fk_qxyzw(self,jointstate):
        kin_state = self.ik_solver.fk(jointstate)
        quaternion_reordered = torch.cat([kin_state.ee_quaternion[:, 1:], kin_state.ee_quaternion[:, 0:1]], dim=-1)
        robot_pose_ee = torch.cat([kin_state.ee_position, quaternion_reordered], dim=-1)
        return robot_pose_ee
    def large_test(self):
        n = 1
        q_sample = self.ik_solver.sample_configs(n)  
        selected_samples = q_sample[:n]
        print(q_sample.type(), q_sample.shape)
        kin_state = self.ik_solver.fk(selected_samples)
        quaternion_reordered = torch.cat([kin_state.ee_quaternion[:, 1:], kin_state.ee_quaternion[:, 0:1]], dim=-1)
        robot_pose_ee = torch.cat([kin_state.ee_position, quaternion_reordered], dim=-1)
        results = robot_pose_ee.cpu().numpy()
        print(results)
    def realworld_test(self):
        if self.embodiment_name == "franka-panda":
            self.franka_realworld_test()
        elif self.embodiment_name == "piper":
            self.piper_realworld_test()
        else:
            raise ValueError(f"Invalid embodiment name: {self.embodiment_name}")
    def franka_realworld_test(self):
        ARM_HOME_XYZRPY=np.array([0.49, 0.01, 0.44, 3.14, 0, 0.785])
        euler_angles = ARM_HOME_XYZRPY[3:6]
        rotation = R.from_euler('xyz', euler_angles)
        ARM_HOME_XYZ_QXYZW= np.concatenate([ARM_HOME_XYZRPY[:3], rotation.as_quat()]) 
        ARM_JS_HOME=torch.tensor([-0.174,-0.03,0.17,-2.155,0.023,2.058,0.764],device='cuda')
        CALCED_HOME_XYZ_QXYZW = self.fk_qxyzw(ARM_JS_HOME)
        p.print("ARM_JS_HOME",ARM_JS_HOME)
        p.print("real home pose7D",ARM_HOME_XYZ_QXYZW)
        p.print("fk results pose7D",CALCED_HOME_XYZ_QXYZW)
        p.print("diff you want to subtract from fk results to get real world pose", CALCED_HOME_XYZ_QXYZW.cpu().numpy()-ARM_HOME_XYZ_QXYZW)
    def piper_realworld_test(self):
        ARM_HOME_XYZ_QXYZW= [0.056127, 0.0, 0.213266, 0.0, 0.01294573, 0.0, 0.9999162]
        ARM_JS_HOME=torch.tensor([0.0,0.0,0.0,0.0,0.0,0.0,0.0],device='cuda')
        CALCED_HOME_XYZ_QXYZW = self.fk_qxyzw(ARM_JS_HOME)
        p.print("ARM_JS_HOME",ARM_JS_HOME)
        p.print("real home pose7D",ARM_HOME_XYZ_QXYZW)
        p.print("fk results pose7D",CALCED_HOME_XYZ_QXYZW)
        p.print("diff you want to subtract from fk results to get real world pose", CALCED_HOME_XYZ_QXYZW.cpu().numpy()-ARM_HOME_XYZ_QXYZW)
    
    def zarr_test(self):
        zarr_path = "/home/algo/geyiheng/zarrtraj2/grab-banana-demo-eq.zarr"
        zarr_data = zarr.open(zarr_path, mode='r')
        ee_state = zarr_data['eelocal/state'][:, :7].astype(np.float32)
        qpos_state = zarr_data['qpos/state'][:, :7].astype(np.float32)
        num_timesteps = ee_state.shape[0]
        for timestep in range(num_timesteps):
            p.print("current timestep", timestep)
            current_ee_pose = ee_state[timestep]
            current_qpos = torch.tensor(qpos_state[timestep], dtype=torch.float32, device='cuda')
            calc_ee_pose = self.fk_qxyzw(current_qpos.unsqueeze(0)).squeeze(0)
            # p.print("qpos/state", current_qpos)
            # p.print("real ee pose7D", current_ee_pose)
            # p.print("fk results pose7D", calc_ee_pose)
            calc_pose_np = calc_ee_pose.cpu().numpy()
            pos_diff = calc_pose_np[:3] - current_ee_pose[:3]
            calc_quat = R.from_quat(calc_pose_np[3:7])
            real_quat = R.from_quat(current_ee_pose[3:7])
            quat_diff = (calc_quat.inv() * real_quat).as_quat()
            diff = np.concatenate([pos_diff, quat_diff])
            p.print("diff you want to subtract from fk results to get real world pose", diff)
if __name__ == "__main__":
    franka_fk = FrankaFK(sys.argv[1])
    # franka_fk.zarr_test()
    # franka_fk.franka_realworld_test()
    franka_fk.realworld_test()