# Copyright 2025 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Joystick task for Unitree G1."""

from typing import Any, Dict, Optional, Union

import jax
import jax.numpy as jp
from jax import vmap
from jax._src.scipy.spatial.transform import Rotation
from ml_collections import config_dict
from mujoco import mjx
from mujoco.mjx._src import math
import numpy as np

from mujoco_playground._src import collision
from mujoco_playground._src import gait
from mujoco_playground._src import mjx_env
from mujoco_playground._src.collision import geoms_colliding
from mujoco_playground._src.locomotion.g1 import base as g1_base
from mujoco_playground._src.locomotion.g1 import g1_constants as consts
from mujoco_playground._src.locomotion.g1.reward import HubRewardJAX
from mujoco_playground._src.reward import tolerance
from brax.math import quat_mul, quat_inv, quat_mul_np


def default_config() -> config_dict.ConfigDict:
    return config_dict.create(
        ctrl_dt=0.02,
        sim_dt=0.002,
        episode_length=1000,
        action_repeat=1,
        action_scale=0.1,
        history_len=1,
        obs_history_len=5,
        restricted_joint_range=False,
        soft_joint_pos_limit_factor=0.95,
        noise_config=config_dict.create(
            level=1.0,  # Set to 0.0 to disable noise.
            scales=config_dict.create(
                joint_pos=0.03,
                joint_vel=1.5,
                gravity=0.05,
                linvel=0.1,
                gyro=0.2,
            ),
        ),
        reward_config=config_dict.create(
            scales=config_dict.create(
                # Tracking related rewards.
                balance_support=1.0,
                balance_mismatch=1.0,
                balance_close=1.0,
                tracking_body_position=1.0,
                tracking_body_rotation=1.0,
                tracking_body_velocity=1.0,
                tracking_body_angular_velocity=1.0,
                tracking_dof_position=1.0,
                tracking_dof_velocity=1.0,
                limit_dof_position_limits=1.0,
                limit_termination=1.0,
                regularization_torque=1.0,
                regularization_dof_velocity=1.0,
                regularization_dof_acceleration=1.0,
                regularization_action_rate=1.0,
                regularization_feet_air_time=1.0,
                regularization_feet_contact_force=1.0,
                regularization_stumble=1.0,
                regularization_slippage=1.0,
                regularization_feet_orientation=1.0,
                regularization_in_the_air=1.0
            ),
            tracking_sigma_qpos=0.25,
            tracking_sigma_qvel=0.25,
            max_foot_height=0.15,
            base_height_target=0.5,
            max_contact_force=500.0,
        ),
        push_config=config_dict.create(
            enable=True,
            interval_range=[5.0, 10.0],
            magnitude_range=[0.1, 2.0],
        ),
        command_config=config_dict.create(
            # Uniform distribution for command amplitude.
            a=[1.0, 0.8, 1.0],
            # Probability of not zeroing out new command.
            b=[0.9, 0.25, 0.5],
        ),
        lin_vel_x=[-1.0, 1.0],
        lin_vel_y=[-0.5, 0.5],
        ang_vel_yaw=[-1.0, 1.0],
    )


from pathlib import Path


def get_project_root():
    """自动向上查找包含 pyproject.toml 或 .git 的目录"""
    current = Path(__file__).parent
    while not any((current / marker).exists() for marker in ["pyproject.toml", ".git"]):
        if current.parent == current:  # 已经到根目录了还没找到
            raise FileNotFoundError("找不到项目根目录！")
        current = current.parent
    return current

def quat_conj(q):
    """计算四元数的共轭"""
    return np.array([q[0], -q[1], -q[2], -q[3]])

def global_to_local_xquat(xquat):
    """
    将全局的 xquat 转换为局部的 xquat
    :param xquat: 全局的 xquat，维度为 [step, nbody, 4]
    :return: 局部的 xquat，维度为 [step, nbody-1, 4]，排除了世界坐标系
    """
    step, nbody, _ = xquat.shape
    # 提取根节点的四元数
    root_quat = xquat[:, 1, :]
    # 初始化局部 xquat
    local_xquat = np.zeros((step, nbody - 1, 4))
    # 遍历每个时间步
    for t in range(step):
        root_q = root_quat[t]
        root_q_conj = quat_inv(root_q)
        # 遍历每个物体（排除世界坐标系）
        for i in range(1, nbody):
            global_q = xquat[t, i]
            local_q = quat_mul_np(quat_conj(root_q), global_q)
            local_xquat[t, i - 1] = local_q
    return local_xquat

class Traj(g1_base.G1Env):
    """Track a joystick command."""

    def __init__(
            self,
            task: str = "traj",
            config: config_dict.ConfigDict = default_config(),
            config_overrides: Optional[Dict[str, Union[str, int, list[Any]]]] = None,
    ):
        super().__init__(
            xml_path=consts.task_to_xml(task).as_posix(),
            config=config,
            config_overrides=config_overrides,
        )
        self._post_init()

        # load ctrl traj
        traj = jp.load(str(get_project_root()) + '/npz/g1/dance1_subject1.xpos.npz')
        traj_qpos = traj['qpos'][2000:2500]
        # # 假设 traj.shape == (T, D)
        # D = traj_qpos.shape[0]
        # traj_qpos = jp.concatenate([
        #     traj_qpos[:, :20],  # (20, D)
        #     jp.zeros((D, 2)),  # 插入一行 0
        #     traj_qpos[:, 20:25],
        #     jp.zeros((D, 2)),
        #     traj_qpos[:, 25:],
        #     jp.zeros((D, 2)),
        # ], axis=-1)  # 沿着时间轴拼接
        self.obs_traj_qpos = jp.array(traj_qpos)
        self.ctrl_traj_qpos = jp.array(traj_qpos[:, 7:])

        traj_qvel = traj['qvel'][2000:2500]
        # 假设 traj.shape == (T, D)
        # D = traj_qvel.shape[0]
        # traj_qvel = jp.concatenate([
        #     traj_qvel[:, :19],  # (20, D)
        #     jp.zeros((D, 2)),  # 插入一行 0
        #     traj_qvel[:, 19:24],
        #     jp.zeros((D, 2)),
        #     traj_qvel[:, 24:],
        #     jp.zeros((D, 2)),
        # ], axis=-1)  # 沿着时间轴拼接
        self.obs_traj_qvel = jp.array(traj_qvel)

        traj_geom_xpos = traj['geom_xpos'][2000:2500]
        self.obs_traj_geom_xpos = jp.array(traj_geom_xpos)

        traj_xpos = traj['xpos'][2000:2500]
        self.obs_traj_xpos = jp.array(traj_xpos)

        traj_xquat = traj['xquat'][2000:2500]
        self.obs_traj_xquat = jp.array(traj_xquat)
        self.obs_traj_local_xquat_no_world = jp.array(global_to_local_xquat(traj_xquat))

        traj_cvel = traj['cvel'][2000:2500]
        self.obs_traj_cvel = jp.array(traj_cvel)

        traj_xmat = traj['xmat'][2000:2500]
        self.obs_traj_xmat = jp.array(traj_xmat)

        # self.obs_traj_local_pos_no_world = jp.dot(self.obs_traj_xpos[:, 1:] - self.obs_traj_xpos[:, 0:1], self.obs_traj_xmat[:, 1].reshape(-1, 3, 3))

        batch_size = 256  # 根据GPU内存调整
        local_pos = []
        for i in range(0, len(self.obs_traj_xpos), batch_size):
            batch_rel_pos = self.obs_traj_xpos[i:i + batch_size, 1:] - self.obs_traj_xpos[i:i + batch_size, 1:2]
            batch_rot = self.obs_traj_xmat[i:i + batch_size, 1].reshape(-1, 3, 3)
            local_pos.append(jp.matmul(batch_rel_pos, batch_rot))
        self.obs_traj_local_pos_no_world = jp.concatenate(local_pos, axis=0)

        # jax.debug.print("obs_traj_xquat={}", self.obs_traj_xquat.shape)
        # jax.debug.print("obs_traj_local_xquat_no_world={}", self.obs_traj_local_xquat_no_world.shape)


        self.traj_height_min = jp.min(self.obs_traj_qpos[:, 2])
        self.traj_height_max = jp.max(self.obs_traj_qpos[:, 2])

    def _post_init(self) -> None:
        self._init_q = jp.array(self._mj_model.keyframe("knees_bent").qpos)
        self._default_pose = jp.array(
            self._mj_model.keyframe("knees_bent").qpos[7:]
        )

        # Note: First joint is freejoint.
        self._lowers, self._uppers = self.mj_model.jnt_range[1:].T
        # c = (self._lowers + self._uppers) / 2
        # r = self._uppers - self._lowers
        # self._soft_lowers = c - 0.5 * r * self._config.soft_joint_pos_limit_factor
        # self._soft_uppers = c + 0.5 * r * self._config.soft_joint_pos_limit_factor

        waist_indices = []
        waist_joint_names = [
            "waist_yaw",
            "waist_roll",
            "waist_pitch",
        ]
        for joint_name in waist_joint_names:
            waist_indices.append(
                self._mj_model.joint(f"{joint_name}_joint").qposadr - 7
            )
        self._waist_indices = jp.array(waist_indices)

        arm_indices = []
        arm_joint_names = [
            "shoulder_roll",
            "shoulder_yaw",
            "wrist_roll",
            "wrist_pitch",
            "wrist_yaw",
        ]
        for side in ["left", "right"]:
            for joint_name in arm_joint_names:
                arm_indices.append(
                    self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
                )
        self._arm_indices = jp.array(arm_indices)

        hip_indices = []
        hip_joint_names = [
            "hip_roll",
            "hip_yaw",
        ]
        for side in ["left", "right"]:
            for joint_name in hip_joint_names:
                hip_indices.append(
                    self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
                )
        self._hip_indices = jp.array(hip_indices)

        knee_indices = []
        knee_joint_names = ["knee"]
        for side in ["left", "right"]:
            for joint_name in knee_joint_names:
                knee_indices.append(
                    self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
                )
        self._knee_indices = jp.array(knee_indices)

        # fmt: off
        # self._weights = jp.array([
        #     0.01, 1.0, 1.0, 0.01, 1.0, 1.0,  # left leg.
        #     0.01, 1.0, 1.0, 0.01, 1.0, 1.0,  # right leg.
        #     1.0, 1.0, 1.0,  # waist.
        #     1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,  # left arm.
        #     1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,  # right arm.
        # ])
        # fmt: on

        self._torso_body_id = self._mj_model.body(consts.ROOT_BODY).id
        self._torso_mass = self._mj_model.body_subtreemass[self._torso_body_id]
        self._torso_imu_site_id = self._mj_model.site("imu_in_torso").id
        self._pelvis_imu_site_id = self._mj_model.site("imu_in_pelvis").id

        self._feet_site_id = np.array(
            [self._mj_model.site(name).id for name in consts.FEET_SITES]
        )
        self._hands_site_id = np.array(
            [self._mj_model.site(name).id for name in consts.HAND_SITES]
        )
        self._floor_geom_id = self._mj_model.geom("floor").id
        self._feet_geom_id = np.array(
            [self._mj_model.geom(name).id for name in consts.FEET_GEOMS]
        )

        foot_linvel_sensor_adr = []
        for site in consts.FEET_SITES:
            sensor_id = self._mj_model.sensor(f"{site}_global_linvel").id
            sensor_adr = self._mj_model.sensor_adr[sensor_id]
            sensor_dim = self._mj_model.sensor_dim[sensor_id]
            foot_linvel_sensor_adr.append(
                list(range(sensor_adr, sensor_adr + sensor_dim))
            )
        self._foot_linvel_sensor_adr = jp.array(foot_linvel_sensor_adr)

        self._left_foot_geom_name = "left_foot"
        self._right_foot_geom_name = "right_foot"
        self._left_hand_geom_id = self._mj_model.geom("left_hand_collision").id
        self._right_hand_geom_id = self._mj_model.geom("right_hand_collision").id
        self._left_foot_geom_id = self._mj_model.geom("left_foot").id
        self._right_foot_geom_id = self._mj_model.geom("right_foot").id
        self._left_shin_geom_id = self._mj_model.geom("left_shin").id
        self._right_shin_geom_id = self._mj_model.geom("right_shin").id
        self._left_thigh_geom_id = self._mj_model.geom("left_thigh").id
        self._right_thigh_geom_id = self._mj_model.geom("right_thigh").id

        self.hub_reward = HubRewardJAX(self._mj_model)

    def reset(self, rng: jax.Array) -> mjx_env.State:
        qpos = self._init_q
        qvel = jp.zeros(self.mjx_model.nv)

        # x=+U(-0.5, 0.5), y=+U(-0.5, 0.5), yaw=U(-3.14, 3.14).
        rng, key = jax.random.split(rng)
        dxy = jax.random.uniform(key, (2,), minval=-0.5, maxval=0.5)
        qpos = qpos.at[0:2].set(qpos[0:2] + dxy)
        rng, key = jax.random.split(rng)
        yaw = jax.random.uniform(key, (1,), minval=-3.14, maxval=3.14)
        quat = math.axis_angle_to_quat(jp.array([0, 0, 1]), yaw)
        new_quat = math.quat_mul(qpos[3:7], quat)
        qpos = qpos.at[3:7].set(new_quat)

        # qpos[7:]=*U(0.5, 1.5)
        rng, key = jax.random.split(rng)
        qpos = qpos.at[7:].set(
            qpos[7:] * jax.random.uniform(key, (29,), minval=0.5, maxval=1.5)
        )

        # d(xyzrpy)=U(-0.5, 0.5)
        rng, key = jax.random.split(rng)
        qvel = qvel.at[0:6].set(
            jax.random.uniform(key, (6,), minval=-0.5, maxval=0.5)
        )

        data = mjx_env.init(self.mjx_model, qpos=qpos, qvel=qvel, ctrl=qpos[7:])

        rng, cmd_rng = jax.random.split(rng)

        # Sample push interval.
        rng, push_rng = jax.random.split(rng)
        push_interval = jax.random.uniform(
            push_rng,
            minval=self._config.push_config.interval_range[0],
            maxval=self._config.push_config.interval_range[1],
        )
        push_interval_steps = jp.round(push_interval / self.dt).astype(jp.int32)

        info = {
            "rng": rng,
            "step": 0,
            "feet_air_time": jp.zeros(2),
            "last_act": jp.zeros(self.mjx_model.nu),
            "last_last_act": jp.zeros(self.mjx_model.nu),
            "last_contact": jp.zeros(2, dtype=bool),
            "motor_targets": jp.zeros(self.mjx_model.nu),
            "push": jp.array([0.0, 0.0]),
            "push_step": 0,
            "push_interval_steps": push_interval_steps,
        }

        metrics = {}
        for k in self._config.reward_config.scales.keys():
            metrics[f"reward/{k}"] = jp.zeros(())
        metrics["qpos_diff"] = jp.zeros(())
        metrics["qvel_diff"] = jp.zeros(())

        contact = jp.array([
            geoms_colliding(data, geom_id, self._floor_geom_id)
            for geom_id in self._feet_geom_id
        ])
        obs = self._get_obs(data, info, contact)
        # obs = self.get_history_obs(obs, info)
        reward, done = jp.zeros(2)
        return mjx_env.State(data, obs, reward, done, metrics, info)

    def step(self, state: mjx_env.State, action: jax.Array) -> mjx_env.State:
        state.info["rng"], push1_rng, push2_rng = jax.random.split(
            state.info["rng"], 3
        )
        push_theta = jax.random.uniform(push1_rng, maxval=2 * jp.pi)
        push_magnitude = jax.random.uniform(
            push2_rng,
            minval=self._config.push_config.magnitude_range[0],
            maxval=self._config.push_config.magnitude_range[1],
        )
        push = jp.array([jp.cos(push_theta), jp.sin(push_theta)])
        push *= (
                jp.mod(state.info["push_step"] + 1, state.info["push_interval_steps"])
                == 0
        )
        push *= self._config.push_config.enable
        qvel = state.data.qvel
        qvel = qvel.at[:2].set(push * push_magnitude + qvel[:2])
        data = state.data.replace(qvel=qvel)
        state = state.replace(data=data)
        motor_targets = self._default_pose + action * self._config.action_scale
        data = mjx_env.step(
            self.mjx_model, state.data, motor_targets, self.n_substeps
        )
        state.info["motor_targets"] = motor_targets
        contact = jp.array([
            geoms_colliding(data, geom_id, self._floor_geom_id)
            for geom_id in self._feet_geom_id
        ])
        contact_filt = contact | state.info["last_contact"]
        first_contact = (state.info["feet_air_time"] > 0.0) * contact_filt
        state.info["feet_air_time"] += self.dt

        obs = self._get_obs(data, state.info, contact)
        # obs = self.get_history_obs(obs, state.info)
        done = self._get_termination(data)

        rewards = self._get_reward(
            data, action, state.info, state.metrics, first_contact, done
        )
        # com_reward = self._reward_support_polygon(data, self.obs_traj_geom_xpos[state.info['step']])
        # jax.debug.print("com_reward={}", com_reward)
        rewards = {
            k: v * self._config.reward_config.scales[k] for k, v in rewards.items()
        }
        # jax.debug.print("rewards = {}", rewards)
        reward = sum(rewards.values()) * self.dt
        state.metrics["qpos_diff"] = jp.mean(jp.abs(self.obs_traj_qpos[state.info["step"], 7:] - data.qpos[7:]))
        state.metrics["qvel_diff"] = jp.mean(jp.abs(self.obs_traj_qvel[state.info["step"], 6:] - data.qvel[6:]))

        state.info["push"] = push
        state.info["step"] += 1
        state.info["push_step"] += 1
        state.info["last_last_act"] = state.info["last_act"]
        state.info["last_act"] = action
        state.info["rng"], cmd_rng = jax.random.split(state.info["rng"])
        state.info["step"] = jp.where(
            done | (state.info["step"] > 500),
            0,
            state.info["step"],
        )
        state.info["feet_air_time"] *= ~contact
        state.info["last_contact"] = contact
        for k, v in rewards.items():
            state.metrics[f"reward/{k}"] = v

        done = done.astype(reward.dtype)
        state = state.replace(data=data, obs=obs, reward=reward, done=done)
        return state

    def _get_termination(self, data: mjx.Data) -> jax.Array:
        fall_termination = self.get_gravity(data, "torso")[-1] < 0.0
        return (
                fall_termination
                | jp.isnan(data.qpos).any()
                | jp.isnan(data.qvel).any()
        )

    def _get_obs(
            self, data: mjx.Data, info: dict[str, Any], contact: jax.Array
    ) -> mjx_env.Observation:
        gyro = self.get_gyro(data, "pelvis")
        info["rng"], noise_rng = jax.random.split(info["rng"])
        noisy_gyro = (
                gyro
                + (2 * jax.random.uniform(noise_rng, shape=gyro.shape) - 1)
                * self._config.noise_config.level
                * self._config.noise_config.scales.gyro
        )

        gravity = data.site_xmat[self._pelvis_imu_site_id].T @ jp.array([0, 0, -1])
        info["rng"], noise_rng = jax.random.split(info["rng"])
        noisy_gravity = (
                gravity
                + (2 * jax.random.uniform(noise_rng, shape=gravity.shape) - 1)
                * self._config.noise_config.level
                * self._config.noise_config.scales.gravity
        )

        joint_angles = data.qpos[7:]
        info["rng"], noise_rng = jax.random.split(info["rng"])
        noisy_joint_angles = (
                joint_angles
                + (2 * jax.random.uniform(noise_rng, shape=joint_angles.shape) - 1)
                * self._config.noise_config.level
                * self._config.noise_config.scales.joint_pos
        )

        joint_vel = data.qvel[6:]
        info["rng"], noise_rng = jax.random.split(info["rng"])
        noisy_joint_vel = (
                joint_vel
                + (2 * jax.random.uniform(noise_rng, shape=joint_vel.shape) - 1)
                * self._config.noise_config.level
                * self._config.noise_config.scales.joint_vel
        )



        linvel = self.get_local_linvel(data, "pelvis")
        info["rng"], noise_rng = jax.random.split(info["rng"])
        noisy_linvel = (
                linvel
                + (2 * jax.random.uniform(noise_rng, shape=linvel.shape) - 1)
                * self._config.noise_config.level
                * self._config.noise_config.scales.linvel
        )


        state = jp.hstack([
            data.xpos[1:].flatten(),
            data.xquat[1:].flatten(),
            data.cvel[1:].flatten(),
            (data.xpos[1:] - self.obs_traj_xpos[info['step']][1:]).flatten(),
            (data.xquat[1:] - self.obs_traj_xquat[info['step']][1:]).flatten(),
            (data.cvel[1:] - self.obs_traj_cvel[info['step']][1:]).flatten(),
            # jp.dot(self.obs_traj_xpos[info['step']][1:] - self.obs_traj_xpos[info['step']][1], self.obs_traj_xmat[1].reshape(3, 3)).flatten(),
            self.obs_traj_local_pos_no_world[info['step']].flatten(),
            # quat_mul(quat_inv(self.obs_traj_xquat[info['step']][1]), self.obs_traj_xquat[info['step']][1:]).flatten(),
            self.obs_traj_local_xquat_no_world[info['step']].flatten(),
            # self.obs_traj_xpos[info['step']][1:].flatten(),
            # self.obs_traj_xquat[info['step']][1:].flatten(),
            info["last_act"].flatten(),
        ])

        accelerometer = self.get_accelerometer(data, "pelvis")
        global_angvel = self.get_global_angvel(data, "pelvis")
        feet_vel = data.sensordata[self._foot_linvel_sensor_adr].ravel()
        root_height = data.qpos[2]

        privileged_state = jp.hstack([
            state,
            gyro,  # 3
            accelerometer,  # 3
            gravity,  # 3
            linvel,  # 3
            global_angvel,  # 3
            joint_angles,
            joint_vel,
            root_height,  # 1
            data.actuator_force,  # 29
            contact,  # 2
            feet_vel,  # 4*3
            info["feet_air_time"],  # 2
        ])

        return {
            "state": state,
            "privileged_state": state,
        }

    def get_history_obs(self, obs, info):
        if 'obs_history' not in info:
            obs_history = jp.tile(jp.zeros_like(obs['state']), self._config.obs_history_len)
        else:
            obs_history = info['obs_history']
            obs_history = jp.concatenate([obs_history[obs['state'].shape[0]:], obs['state']])

        info['obs_history'] = obs_history
        full_state = jp.concatenate([obs['state'], info["obs_history"]])
        return {
            "state": full_state,
            "privileged_state": obs['privileged_state'],
        }

    def is_health(self, data):
        return (self.traj_height_min * 0.9 < data.qpos[2]) & (data.qpos[2] < self.traj_height_max * 1.1)

    def _get_reward(
            self,
            data: mjx.Data,
            action: jax.Array,
            info: dict[str, Any],
            metrics: dict[str, Any],
            first_contact: jax.Array,
            done: jax.Array,
    ) -> dict[str, jax.Array]:
        del metrics  # Unused.
        l_contact_force = mjx_env.get_sensor_data(
            self.mj_model, data, "left_foot_force"
        )
        r_contact_force = mjx_env.get_sensor_data(
            self.mj_model, data, "right_foot_force"
        )
        balance_support, balance_mismatch, balance_close = self._reward_support_polygon(data, self.obs_traj_geom_xpos[info["step"]])
        return {
            "balance_support": balance_support,
            "balance_mismatch": balance_mismatch,
            "balance_close": balance_close,
            "tracking_body_position": self._reward_tracking_body_position(data, info),
            "tracking_body_rotation": self._reward_tracking_body_rotation(data, info),
            # "tracking_body_rotation": self._reward_tracking_body_rotation(data.xquat, self.obs_traj_xquat[info["step"]]),
            "tracking_body_velocity": self._reward_tracking_body_velocity(data, info),
            "tracking_body_angular_velocity": self._reward_tracking_body_angular_velocity(data, info),
            "tracking_dof_position": self._reward_tracking_dof_qpos(data.qpos, self.obs_traj_qpos[info["step"]]),
            "tracking_dof_velocity":self._reward_tracking_dof_qvel(data.qvel, self.obs_traj_qvel[info["step"]]),

            "limit_dof_position_limits":self._cost_joint_pos_limits(data),
            "limit_termination": self._cost_termination_limits(done),

            "regularization_torque": self._cost_torques_regularization(data),
            "regularization_dof_velocity": self._cost_dof_velocity_regularization(data),
            "regularization_dof_acceleration": self._cost_dof_acceleration_regularization(data),
            "regularization_action_rate": self._cost_action_rate_regularization(action, info["last_act"]),
            "regularization_feet_air_time": self._reward_feet_air_time_regularization(info["feet_air_time"], first_contact),
            "regularization_feet_contact_force": self._cost_contact_force_regularization(data, l_contact_force, r_contact_force),
            "regularization_stumble": self._cost_stumble_regularization(data, l_contact_force, r_contact_force),
            "regularization_slippage": self._cost_slippage_regularization(data, l_contact_force, r_contact_force),
            "regularization_feet_orientation": self._cost_orientation_regularization(data),
            # "regularization_in_the_air": self._cost_in_the_air_regularization(l_contact_force, r_contact_force),
        }

    def _cost_collision(self, data: mjx.Data) -> jax.Array:
        c = collision.geoms_colliding(
            data, self._left_hand_geom_id, self._left_thigh_geom_id
        )
        c |= collision.geoms_colliding(
            data, self._right_hand_geom_id, self._right_thigh_geom_id
        )
        return jp.any(c)

    # def _reward_tracking_qpos(self,
    #                      agent_pos: jax.Array,
    #                      expert_pos: jax.Array) -> jax.Array:
    #     joint_ranges = self._uppers - self._lowers
    #     joint_ranges = jp.maximum(joint_ranges, 1e-8)
    #     diff = jp.sum(jp.square((agent_pos[7:] - expert_pos[7:]) / joint_ranges))
    #     return jp.exp(-diff / self._config.reward_config.tracking_sigma_qpos)
    #
    #
    # def _reward_tracking_qvel(self,
    #                      agent_vel: jax.Array,
    #                      expert_vel: jax.Array) -> jax.Array:
    #     diff = jp.sum(jp.square((agent_vel[6:] - expert_vel[6:]) / (8*jp.pi)))
    #     return jp.exp(-diff / self._config.reward_config.tracking_sigma_qvel)

    def _reward_health(self, data) -> jax.Array:
        return self.is_health(data)

    # def _reward_tracking_qpos(self, expert_pos: jax.Array, agent_pos: jax.Array):
    #     pos_error = jp.linalg.norm(agent_pos[7:] - expert_pos[7:])
    #     pos_tolerance = tolerance(pos_error, bounds=(0.0, 0.0), margin=1.0, sigmoid="linear")
    #     return pos_tolerance
    #
    # def _reward_tracking_qvel(self, expert_vel: jax.Array, agent_vel: jax.Array):
    #     # 位置和速度的误差
    #     vel_error = jp.linalg.norm(agent_vel[6:] - expert_vel[6:])
    #     # jax.debug.print("pos_diff = {}", pos_error)
    #     # jax.debug.print("vel_diff = {}", vel_error)
    #     vel_tolerance = tolerance(vel_error, bounds=(0.0, 0.0), margin=10.0, sigmoid="linear")
    #     return vel_tolerance

    def _cost_termination_limits(self, done: jax.Array) -> jax.Array:
        weight = -60.0
        return done*weight

    def _reward_alive(self) -> jax.Array:
        return jp.array(1.0)

    # finished
    def _reward_tracking_body_position(self, data: mjx.Data, info):
        agent_xpos = (data.xpos[1:] - data.xpos[1:2]) @ (data.xmat[1].reshape(3, 3))
        # expert_xpos = (self.obs_traj_xpos[info["step"]][1:] - self.obs_traj_xpos[info["step"]][1:2]) @ (self.obs_traj_xmat[info["step"]][1].reshape(3, 3))
        expert_xpos = self.obs_traj_local_pos_no_world[info["step"]]
        diff = jp.linalg.norm(agent_xpos - expert_xpos)
        sigma_pos = 0.6
        weight = 30
        return weight*jp.exp(-diff ** 2 / sigma_pos ** 2)

    # def _reward_tracking_body_rotation(self, agent_xquat, expert_xquat):
    #     rot_batch1 = Rotation.from_quat(agent_xquat[1:])  # (N,4)数组
    #     rot_batch2 = Rotation.from_quat(expert_xquat[1:])  # (N,4)数组
    #     diff = rot_batch2 * rot_batch1.inv()
    #     # norm
    #     diff = diff.magnitude() / jp.pi
    #     # mean
    #     diff = jp.mean(diff)
    #     # goss
    #     sigma_pos = 0.3
    #     weight = 20
    #     jax.debug.print("body_rotation={}", [diff, weight * jp.exp(-diff ** 2 / sigma_pos ** 2)])
    #     return weight*jp.exp(-diff ** 2 / sigma_pos ** 2)

    # finished
    def _reward_tracking_body_rotation(self, data: mjx.Data, info):
        agent_xquat = vmap(quat_mul, in_axes=(None, 0))(quat_inv(data.xquat[1]), data.xquat[1:])
        expert_xquat = self.obs_traj_local_xquat_no_world[info['step']]
        rot_batch1 = Rotation.from_quat(agent_xquat)  # (N,4)数组
        rot_batch2 = Rotation.from_quat(expert_xquat)  # (N,4)数组
        diff = rot_batch2 * rot_batch1.inv()
        # norm
        diff = diff.magnitude() / jp.pi
        # mean
        diff = jp.mean(diff)
        # goss
        sigma_pos = 0.3
        weight = 20
        # jax.debug.print("body_rotation={}", [diff, weight * jp.exp(-diff ** 2 / sigma_pos ** 2)])
        return weight*jp.exp(-diff ** 2 / sigma_pos ** 2)

    # finished
    def _reward_tracking_body_velocity(self, data: mjx.Data, info):
        agent_cvel = self.global_to_local_vel(data.xmat[1:], data.cvel[1:, :3])
        expert_cvel = self.global_to_local_vel(self.obs_traj_xmat[info["step"]][1:], self.obs_traj_cvel[info["step"]][1:, :3])
        diff = jp.linalg.norm(agent_cvel - expert_cvel)
        sigma_pos = 3
        weight = 8
        return weight * jp.exp(-diff ** 2 / sigma_pos ** 2)

    # finished
    def _reward_tracking_body_angular_velocity(self, data: mjx.Data, info):
        agent_cvel = self.global_to_local_vel(data.xmat[1:], data.cvel[1:, 3:])
        expert_cvel = self.global_to_local_vel(self.obs_traj_xmat[info["step"]][1:], self.obs_traj_cvel[info["step"]][1:, 3:])
        diff = jp.linalg.norm(agent_cvel - expert_cvel)
        sigma_pos = 10.0
        weight = 8
        return weight * jp.exp(-diff ** 2 / sigma_pos ** 2)

    def _reward_tracking_dof_qpos(self, agent_pos, expert_pos):
        diff = jp.linalg.norm(agent_pos[7:] - expert_pos[7:])
        sigma_pos = 0.7
        weight = 32
        return weight*jp.exp(-diff ** 2 / sigma_pos ** 2)

    def _reward_tracking_dof_qvel(self, agent_qvel, expert_qvel):
        diff = jp.linalg.norm(agent_qvel[6:] - expert_qvel[6:])
        sigma_pos = 10
        weight = 16
        return weight*jp.exp(-diff ** 2 / sigma_pos ** 2)

    def _cost_joint_pos_limits(self, data: mjx.Data) -> jax.Array:
        qpos = data.qpos[7:]
        low = self._lowers
        upp = self._uppers
        violate = ((qpos < low) | (qpos > upp)).astype(jp.float32)
        return jp.mean(violate * (-30.0))

    def _cost_torques_regularization(self, data: mjx.Data) -> jax.Array:
        weight = -2.5e-5
        return weight * jp.linalg.norm(data.qfrc_actuator[6:])

    def _cost_dof_velocity_regularization(self, data: mjx.Data) -> jax.Array:
        weight = -1e-3
        return weight*jp.linalg.norm(data.qvel[6:]) ** 2

    def _cost_dof_acceleration_regularization(self, data: mjx.Data) -> jax.Array:
        weight = -3.0e-6
        return weight*jp.linalg.norm(data.qacc[6:])

    def _cost_action_rate_regularization(
            self, act: jax.Array, last_act: jax.Array
    ) -> jax.Array:
        weight = -1.5
        return weight*jp.linalg.norm(act - last_act) ** 2

    def _reward_feet_air_time_regularization(
            self,
            air_time: jax.Array,
            first_contact: jax.Array,
    ) -> jax.Array:
        air_time = (air_time - 0.25) * first_contact
        weight = 250.0
        return weight*jp.max(air_time)


    def _cost_contact_force_regularization(self, data: mjx.Data, l_contact_force, r_contact_force) -> jax.Array:
        weight = -0.2
        return weight*(jp.linalg.norm(r_contact_force / self._config.reward_config.max_contact_force)**2 + jp.linalg.norm(l_contact_force / self._config.reward_config.max_contact_force)**2)

    def _cost_stumble_regularization(self, data: mjx.Data, l_contact_force, r_contact_force):
        def _single_penalty(force: jax.Array) -> jax.Array:
            """对单脚力的惩罚"""
            f_xy = jp.linalg.norm(force[:2])  # sqrt(Fx²+Fy²)
            f_z = force[2] + 1e-6  # 防止除 0
            # 条件成立 -> 1，否则 0
            trigger = jp.where(f_xy > 5.0 * f_z, 1.0, 0.0)
            return trigger * (-3e-4)

        l_pen = _single_penalty(l_contact_force)
        r_pen = _single_penalty(r_contact_force)
        return l_pen + r_pen  # 两脚同时检查

    # Finished
    def _cost_slippage_regularization(self,  data: mjx.Data, l_contact_force, r_contact_force):
        l_vel, l_ang = self.agent_world_to_root_vel(data, data.cvel[self._mj_model.body("left_ankle_roll_link").id][:3], data.cvel[self._mj_model.body("left_ankle_roll_link").id][3:])
        r_vel, r_ang = self.agent_world_to_root_vel(data, data.cvel[self._mj_model.body("right_ankle_roll_link").id][:3], data.cvel[self._mj_model.body("right_ankle_roll_link").id][3:])
        weight = -30.0
        def _single(v: jax.Array, fz: jax.Array) -> jax.Array:
            in_contact = fz >= 1.0
            v_sq = jp.square(jp.linalg.norm(v))
            return jp.where(in_contact,  weight * v_sq, 0.0)
        return _single(l_vel[:2], l_contact_force[2]) + _single(r_vel[:2], r_contact_force[2])

    def _cost_orientation_regularization(self, data: mjx.Data) -> jax.Array:
        left_up = mjx_env.get_sensor_data(
            self.mj_model, data, "left_foot_upvector"
        )
        right_up = mjx_env.get_sensor_data(
            self.mj_model, data, "left_foot_upvector"
        )

        left_z = data.geom_xpos[self._mj_model.geom("left_foot").id][2]
        right_z = data.geom_xpos[self._mj_model.geom("right_foot").id][2]

        left_indicator = jp.where(left_z < 0.05, 1.0, 0.0)
        right_indicator = jp.where(right_z < 0.05, 1.0, 0.0)

        left_gz_norm = jp.abs(left_up[2])
        right_gz_norm = jp.abs(right_up[2])

        weight = -62.5
        return weight*(
                left_gz_norm * left_indicator +
                right_gz_norm * right_indicator
        )


    def _cost_in_the_air_regularization(
            self,
            l_contact_force,
            r_contact_force
    ) -> jax.Array:
        weight = -50
        return weight*(((l_contact_force[2] < 1.0) & (r_contact_force[2] < 1.0)).astype(jp.float32))

    def _reward_support_polygon(self, data, traj_geom_xpos):
        left_pos = self.agent_geom_xyz_xpos_to_domain(data, self._mj_model.geom("left_foot").id)
        right_pos = self.agent_geom_xyz_xpos_to_domain(data, self._mj_model.geom("right_foot").id)
        left_pos_xy = left_pos[:2]
        right_pos_xy = left_pos[:2]
        left_pos_z = data.geom_xpos[self._mj_model.geom("left_foot").id][2]
        right_pos_z = data.geom_xpos[self._mj_model.geom("right_foot").id][2]

        left_contact = left_pos_z < right_pos_z
        right_contact = left_pos_z > right_pos_z

        support_center = jp.where(
            left_contact,
            left_pos_xy,
            jp.where(
                right_contact,
                right_pos_xy,
                jp.array([0.0, 0.0])
            )
        )

        height_left_sub_right = traj_geom_xpos[self._mj_model.geom("left_foot").id][2] - traj_geom_xpos[self._mj_model.geom("right_foot").id][2]
        is_single_support = (jp.abs(height_left_sub_right) > 0.05).astype(jp.float32)

        com_xy = self.agent_world_to_root_xy(data, data.subtree_com[1])
        com_dist = jp.linalg.norm(com_xy - support_center)
        sigma = 0.1
        reward_com = jp.exp(-com_dist ** 2 / sigma ** 2) * is_single_support

        left_grounded_contact = mjx_env.get_sensor_data(
            self.mj_model, data, "left_foot_force"
        )[2] >= 1.0
        right_grounded_contact = mjx_env.get_sensor_data(
            self.mj_model, data, "right_foot_force"
        )[2] >= 1.0

        # left_grounded_contact = data.cfrc_ext[self._mj_model.body('left_ankle_roll_link').id][2] >= 1.0
        # right_grounded_contact = data.cfrc_ext[self._mj_model.body('right_ankle_roll_link').id][2] >= 1.0

        traj_left_grounded_contact = jp.where(
            is_single_support,
            height_left_sub_right < 0,
            True
        )
        traj_right_grounded_contact = jp.where(
            is_single_support,
            height_left_sub_right > 0,
            True
        )
        cost_mismatch = (left_grounded_contact != traj_left_grounded_contact).astype(jp.float32) + (right_grounded_contact != traj_right_grounded_contact).astype(jp.float32)

        feet_dist = jp.linalg.norm(left_pos - right_pos)
        cost_close = jp.maximum(0.16 - feet_dist, 0.0)
        return 160*reward_com, (-250)*cost_mismatch, (-1000)*cost_close

    def agent_world_to_root_vel(self, data, world_linear_vel, world_angular_vel):
        root_rot = data.xmat[1].reshape(3, 3)
        local_linear_vel = root_rot.T @ world_linear_vel
        local_angular_vel = root_rot.T @ world_angular_vel
        return local_linear_vel, local_angular_vel

    def agent_geom_xyz_xpos_to_domain(self, data, geom_id):
        base_pos = data.xpos[1]
        global_pos = data.geom_xpos[geom_id]
        torso_mat = data.xmat[1].reshape(3, 3)
        domain_xyz = torso_mat.T @ (global_pos - base_pos)
        return jp.clip(domain_xyz, -1e5, 1e5)

    def agent_world_to_root_xy(self, data: mjx.Data, world_pos):
        rel = world_pos - data.xpos[1]
        local = data.xmat[1].reshape(3, 3).T @ rel
        return local[:2]

    def global_to_local_vel(self, xmat, cvel):
        rot_matrices = xmat[1].reshape(3, 3)
        return jp.einsum('ij,bj->bi', rot_matrices.T, cvel)







