# 演示如何利用pinocchio联合mujoco进行impedance控制
# 注意两边数据的存取与转换
# mujoco读取末端接触力以及方向，并且读取关节力矩
# 根据受力大小和方向，以及设定的目标电位，计算出末端期望速度以及方向
# 映射到关节空间


import pinocchio as pin
import mujoco
import mujoco.viewer as viewer
import numpy as np
from os.path import dirname, join
import time
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation




# 在viewer循环前初始化存储
vel_diff_history = []
pos_diff_history = []

# pinocchio加载模型
modelpath = join(dirname(__file__), 'Dof6Arm_P.xml')
pin_model = pin.RobotWrapper.BuildFromMJCF(modelpath)

# mujoco加载模型
model = mujoco.MjModel.from_xml_path(join(dirname(__file__),'scene_P.xml')) 
data = mujoco.MjData(model)

key_id = model.key("home").id
mujoco.mj_resetDataKeyframe(model, data, key_id)

print(f"qpos : {data.qpos}")
print(f"type of qpos : {type(data.qpos)}")

damping: float = 1e-3
diag = damping * np.eye(6)
qa_max = 5.0
vmax = 1

dt_ik = 0.2 # IK步长
IT_MAX = 100 # IK步数
convergence_threshold = 1e-3
kv = 1

site_id = model.site('site_Hand').id
base_id = model.body('BigArm1').id
goal_pos = [0.5, 0.5, 0.5]
end_effector_id = model.body('Hand').id
jac = np.zeros((6, model.nv))

mocap_id = model.body("target").mocapid[0]

# pinocchio和mujoco都要更新
pin.framesForwardKinematics(pin_model.model, pin_model.data, data.qpos)
pin.updateFramePlacements(pin_model.model, pin_model.data) 
mujoco.mj_step(model, data)            # step xdot= f(x,u)
mujoco.mj_forward(model, data)

pos_sensor_names = [
        "Motor_Shoulder_pos", 
        "Motor_BigArm1_pos",
        "Motor_BigArm2_pos",
        "Motor_SmallArm1_pos",
        "Motor_SmallArm2_pos",
        "Motor_Hand_pos"
        ]

vel_sensor_names = [
        "Motor_Shoulder_vel", 
        "Motor_BigArm1_vel",
        "Motor_BigArm2_vel",
        "Motor_SmallArm1_vel",
        "Motor_SmallArm2_vel",
        "Motor_Hand_vel"
        ]

torque_sensor_names = [
    "Motor_Shoulder_Torque",
    "Motor_BigArm1_Torque", 
    "Motor_BigArm2_Torque",
    "Motor_SmallArm1_Torque",
    "Motor_SmallArm2_Torque"
]

# 获取目标位置SE3矩阵T_t
mocap_pos = data.mocap_pos[mocap_id]
mocap_quat = data.mocap_quat[mocap_id]


# mujoco的mocap_quat顺序是[w, x, y, z]
pin_quat = pin.Quaternion(mocap_quat[1], mocap_quat[2], mocap_quat[3], mocap_quat[0])
# 转换为旋转矩阵
mocap_rot = pin_quat.toRotationMatrix()

global T_t0
T_t0 = pin.SE3(mocap_rot, mocap_pos)




# TODO (暂时不好做)考虑mocap的速度，把速度误差纳入到IK里面
# TODO 考虑动力补偿和积分误差

def ik(model, data):
    start = time.time()

    # 判断是否ik_q是否已经满足要求
    if not hasattr(ik, 'ik_qstar'):  # 初始化前次控制量
        ik.ik_qstar = np.zeros(pin_model.model.nq)
    if not hasattr(ik, 'converged'):
        ik.converged = False
    pin.framesForwardKinematics(pin_model.model, pin_model.data, ik.ik_qstar)
    pin.updateFramePlacements(pin_model.model, pin_model.data)
    # 获取目标位置SE3矩阵T_t
    mocap_pos = data.mocap_pos[mocap_id]
    mocap_quat = data.mocap_quat[mocap_id]
    mocap_rot = Rotation.from_quat([mocap_quat[1], mocap_quat[2], mocap_quat[3], mocap_quat[0]]).as_matrix()
    T_t = pin.SE3(mocap_rot, mocap_pos)
    T_bt = pin_model.data.oMf[pin_model.model.getFrameId("site_Hand")].actInv(T_t)
    # 定义的误差向量
    e_q = pin.log(T_bt).vector
    if np.linalg.norm(e_q) < convergence_threshold:
        ik.converged = True
        print("Convergence achieved!")
    else:
        ik.converged = False

    # 通过sensor获取关节角度比直接使用data更符合实际
    qpos_sensors = np.array([data.sensor(pos_sensor_name).data[0] for pos_sensor_name in pos_sensor_names])


    if not ik.converged :
        
        pin.framesForwardKinematics(pin_model.model, pin_model.data, qpos_sensors)
        pin.updateFramePlacements(pin_model.model, pin_model.data)

        ik_q = qpos_sensors # 用于计算IK的关节q

        i = 0

        while True:
            # 求当前site的位姿T_b和目标位姿的相对差值矩阵T_bt
            T_bt = pin_model.data.oMf[pin_model.model.getFrameId("site_Hand")].actInv(T_t)
            # 定义的误差向量
            e_q = pin.log(T_bt).vector
            # 让误差较大时减轻计算量
            
            if np.linalg.norm(e_q) < convergence_threshold:
                ik.converged = True
                ik.ik_qstar = ik_q
                break
            if i>IT_MAX:
                ik.converged = False
                break
            
            i += 1

            # 求site_Hand的雅克比J_b
            J_b = pin.computeFrameJacobian(
                pin_model.model, 
                pin_model.data, 
                ik_q, 
                pin_model.model.getFrameId("site_Hand"), 
                pin.ReferenceFrame.LOCAL_WORLD_ALIGNED  # 根据需求选择坐标系
            )
            
            J_l = -pin.Jlog6(T_bt.inverse())
            # 接下来会这里会利用J_error v^{*} = -e(q)来求v^{*}
            J_error = J_l.dot(J_b) # 误差Jacobian

            # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
            v_star = np.linalg.solve(J_error.T.dot(J_error) + diag, -J_error.T.dot(e_q))
            # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

            ik_q = pin.integrate(pin_model.model, ik_q, v_star*dt_ik)
            pin.forwardKinematics(pin_model.model, pin_model.data, ik_q)
            pin.updateFramePlacements(pin_model.model, pin_model.data)
        


    delta_q = ik.ik_qstar - qpos_sensors
    max_delta = np.max(np.abs(delta_q))
    delta_q_new = vmax*dt_ik*IT_MAX*delta_q/max(max_delta, vmax*dt_ik*IT_MAX)
    ctrl = delta_q_new + qpos_sensors
    print(f"IK cost time: {time.time() - start}")

    return ctrl




with viewer.launch_passive(model,data) as viewer:
    # Initialize the camera view to that of the free camera.
    mujoco.mjv_defaultFreeCamera(model, viewer.cam)
    viewer.opt.frame = mujoco.mjtFrame.mjFRAME_SITE
    step = 0
    time_array = []
    while viewer.is_running():

        data.ctrl = ik(model,data)  # apply control
        mujoco.mj_step(model, data)            # step xdot= f(x,u)
        mujoco.mj_forward(model, data)
        qpos_sensors = np.array([data.sensor(pos_sensor_name).data[0] for pos_sensor_name in pos_sensor_names])
        pos_diff = data.qpos - data.ctrl
        pos_diff_history.append(pos_diff.copy())  # 记录历史数据
        step += 1
        t_now = model.opt.timestep * step
        time_array.append(t_now)
        viewer.sync()
        time.sleep(model.opt.timestep)
    
    pos_diff_array = np.array(pos_diff_history)
    for j in range(pos_diff_array.shape[1]):
        plt.plot(time_array, pos_diff_array[:,j])
    
    plt.title('Joint Position Tracking Errors')
    plt.xlabel('Time (s)')
    plt.ylabel('Error (rad/s)')
    plt.grid(True)
    plt.show()