# Copyright 2025 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Joystick task for Unitree G1."""

from typing import Any, Dict, Optional, Union

import jax
import jax.numpy as jp
from ml_collections import config_dict
from mujoco import mjx
from mujoco.mjx._src import math
import numpy as np

from mujoco_playground._src import collision
from mujoco_playground._src import gait
from mujoco_playground._src import mjx_env
from mujoco_playground._src.collision import geoms_colliding
from mujoco_playground._src.locomotion.g1 import base as g1_base
from mujoco_playground._src.locomotion.g1 import g1_constants as consts
from mujoco_playground._src.reward import tolerance


def default_config() -> config_dict.ConfigDict:
  return config_dict.create(
      ctrl_dt=0.02,
      sim_dt=0.002,
      episode_length=1000,
      action_repeat=1,
      action_scale=0.1,
      history_len=1,
      restricted_joint_range=False,
      soft_joint_pos_limit_factor=0.95,
      noise_config=config_dict.create(
          level=1.0,  # Set to 0.0 to disable noise.
          scales=config_dict.create(
              joint_pos=0.03,
              joint_vel=1.5,
              gravity=0.05,
              linvel=0.1,
              gyro=0.2,
          ),
      ),
      reward_config=config_dict.create(
          scales=config_dict.create(
              # Tracking related rewards.
              tracking_qpos=2.0,
              tracking_qvel=10.0,
              alive=0.0,
              termination=-100.0,
              collision=-0.1,
              health = 4.0
          ),
          tracking_sigma_qpos=0.25,
          tracking_sigma_qvel=0.25,
          max_foot_height=0.15,
          base_height_target=0.5,
          max_contact_force=500.0,
      ),
      push_config=config_dict.create(
          enable=True,
          interval_range=[5.0, 10.0],
          magnitude_range=[0.1, 2.0],
      ),
      command_config=config_dict.create(
          # Uniform distribution for command amplitude.
          a=[1.0, 0.8, 1.0],
          # Probability of not zeroing out new command.
          b=[0.9, 0.25, 0.5],
      ),
      lin_vel_x=[-1.0, 1.0],
      lin_vel_y=[-0.5, 0.5],
      ang_vel_yaw=[-1.0, 1.0],
  )

from pathlib import Path
def get_project_root():
    """自动向上查找包含 pyproject.toml 或 .git 的目录"""
    current = Path(__file__).parent
    while not any((current / marker).exists() for marker in ["pyproject.toml", ".git"]):
        if current.parent == current:  # 已经到根目录了还没找到
            raise FileNotFoundError("找不到项目根目录！")
        current = current.parent
    return current


class Traj(g1_base.G1Env):
  """Track a joystick command."""

  def __init__(
      self,
      task: str = "traj",
      config: config_dict.ConfigDict = default_config(),
      config_overrides: Optional[Dict[str, Union[str, int, list[Any]]]] = None,
  ):
    super().__init__(
        xml_path=consts.task_to_xml(task).as_posix(),
        config=config,
        config_overrides=config_overrides,
    )
    self._post_init()

    # load ctrl traj
    traj = jp.load(str(get_project_root()) + '/npz/g1/dance1_subject1.npz')
    traj_qpos = traj['qpos'][2000:2500]
    # 假设 traj.shape == (T, D)
    D = traj_qpos.shape[0]
    traj_qpos = jp.concatenate([
        traj_qpos[:, :20],  # (20, D)
        jp.zeros((D, 2)),  # 插入一行 0
        traj_qpos[:, 20:25],
        jp.zeros((D, 2)),
        traj_qpos[:, 25:],
        jp.zeros((D, 2)),
    ], axis=-1)  # 沿着时间轴拼接
    self.obs_traj_qpos  = jp.array(traj_qpos)
    self.ctrl_traj_qpos = jp.array(traj_qpos[:, 7:])

    traj_qvel = traj['qvel'][2000:2500]
    # 假设 traj.shape == (T, D)
    D = traj_qvel.shape[0]
    traj_qvel = jp.concatenate([
        traj_qvel[:, :19],  # (20, D)
        jp.zeros((D, 2)),  # 插入一行 0
        traj_qvel[:, 19:24],
        jp.zeros((D, 2)),
        traj_qvel[:, 24:],
        jp.zeros((D, 2)),
    ], axis=-1)  # 沿着时间轴拼接
    self.obs_traj_qvel = jp.array(traj_qvel)

    self.traj_height_min = jp.min(self.obs_traj_qpos[:, 2])
    self.traj_height_max = jp.max(self.obs_traj_qpos[:, 2])

  def _post_init(self) -> None:
    self._init_q = jp.array(self._mj_model.keyframe("knees_bent").qpos)
    self._default_pose = jp.array(
        self._mj_model.keyframe("knees_bent").qpos[7:]
    )

    # Note: First joint is freejoint.
    self._lowers, self._uppers = self.mj_model.jnt_range[1:].T
    c = (self._lowers + self._uppers) / 2
    r = self._uppers - self._lowers
    self._soft_lowers = c - 0.5 * r * self._config.soft_joint_pos_limit_factor
    self._soft_uppers = c + 0.5 * r * self._config.soft_joint_pos_limit_factor

    waist_indices = []
    waist_joint_names = [
        "waist_yaw",
        "waist_roll",
        "waist_pitch",
    ]
    for joint_name in waist_joint_names:
      waist_indices.append(
          self._mj_model.joint(f"{joint_name}_joint").qposadr - 7
      )
    self._waist_indices = jp.array(waist_indices)

    arm_indices = []
    arm_joint_names = [
        "shoulder_roll",
        "shoulder_yaw",
        "wrist_roll",
        "wrist_pitch",
        "wrist_yaw",
    ]
    for side in ["left", "right"]:
      for joint_name in arm_joint_names:
        arm_indices.append(
            self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
        )
    self._arm_indices = jp.array(arm_indices)

    hip_indices = []
    hip_joint_names = [
        "hip_roll",
        "hip_yaw",
    ]
    for side in ["left", "right"]:
      for joint_name in hip_joint_names:
        hip_indices.append(
            self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
        )
    self._hip_indices = jp.array(hip_indices)

    knee_indices = []
    knee_joint_names = ["knee"]
    for side in ["left", "right"]:
      for joint_name in knee_joint_names:
        knee_indices.append(
            self._mj_model.joint(f"{side}_{joint_name}_joint").qposadr - 7
        )
    self._knee_indices = jp.array(knee_indices)

    # fmt: off
    self._weights = jp.array([
        0.01, 1.0, 1.0, 0.01, 1.0, 1.0,  # left leg.
        0.01, 1.0, 1.0, 0.01, 1.0, 1.0,  # right leg.
        1.0, 1.0, 1.0,  # waist.
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,  # left arm.
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,  # right arm.
    ])
    # fmt: on

    self._torso_body_id = self._mj_model.body(consts.ROOT_BODY).id
    self._torso_mass = self._mj_model.body_subtreemass[self._torso_body_id]
    self._torso_imu_site_id = self._mj_model.site("imu_in_torso").id
    self._pelvis_imu_site_id = self._mj_model.site("imu_in_pelvis").id

    self._feet_site_id = np.array(
        [self._mj_model.site(name).id for name in consts.FEET_SITES]
    )
    self._hands_site_id = np.array(
        [self._mj_model.site(name).id for name in consts.HAND_SITES]
    )
    self._floor_geom_id = self._mj_model.geom("floor").id
    self._feet_geom_id = np.array(
        [self._mj_model.geom(name).id for name in consts.FEET_GEOMS]
    )

    foot_linvel_sensor_adr = []
    for site in consts.FEET_SITES:
      sensor_id = self._mj_model.sensor(f"{site}_global_linvel").id
      sensor_adr = self._mj_model.sensor_adr[sensor_id]
      sensor_dim = self._mj_model.sensor_dim[sensor_id]
      foot_linvel_sensor_adr.append(
          list(range(sensor_adr, sensor_adr + sensor_dim))
      )
    self._foot_linvel_sensor_adr = jp.array(foot_linvel_sensor_adr)

    self._left_hand_geom_id = self._mj_model.geom("left_hand_collision").id
    self._right_hand_geom_id = self._mj_model.geom("right_hand_collision").id
    self._left_foot_geom_id = self._mj_model.geom("left_foot").id
    self._right_foot_geom_id = self._mj_model.geom("right_foot").id
    self._left_shin_geom_id = self._mj_model.geom("left_shin").id
    self._right_shin_geom_id = self._mj_model.geom("right_shin").id
    self._left_thigh_geom_id = self._mj_model.geom("left_thigh").id
    self._right_thigh_geom_id = self._mj_model.geom("right_thigh").id

  def reset(self, rng: jax.Array) -> mjx_env.State:
    qpos = self._init_q
    qvel = jp.zeros(self.mjx_model.nv)

    # x=+U(-0.5, 0.5), y=+U(-0.5, 0.5), yaw=U(-3.14, 3.14).
    rng, key = jax.random.split(rng)
    dxy = jax.random.uniform(key, (2,), minval=-0.5, maxval=0.5)
    qpos = qpos.at[0:2].set(qpos[0:2] + dxy)
    rng, key = jax.random.split(rng)
    yaw = jax.random.uniform(key, (1,), minval=-3.14, maxval=3.14)
    quat = math.axis_angle_to_quat(jp.array([0, 0, 1]), yaw)
    new_quat = math.quat_mul(qpos[3:7], quat)
    qpos = qpos.at[3:7].set(new_quat)

    # qpos[7:]=*U(0.5, 1.5)
    rng, key = jax.random.split(rng)
    qpos = qpos.at[7:].set(
        qpos[7:] * jax.random.uniform(key, (29,), minval=0.5, maxval=1.5)
    )

    # d(xyzrpy)=U(-0.5, 0.5)
    rng, key = jax.random.split(rng)
    qvel = qvel.at[0:6].set(
        jax.random.uniform(key, (6,), minval=-0.5, maxval=0.5)
    )

    data = mjx_env.init(self.mjx_model, qpos=qpos, qvel=qvel, ctrl=qpos[7:])

    rng, cmd_rng = jax.random.split(rng)

    # Sample push interval.
    rng, push_rng = jax.random.split(rng)
    push_interval = jax.random.uniform(
        push_rng,
        minval=self._config.push_config.interval_range[0],
        maxval=self._config.push_config.interval_range[1],
    )
    push_interval_steps = jp.round(push_interval / self.dt).astype(jp.int32)

    info = {
        "rng": rng,
        "step": 0,
        "last_act": jp.zeros(self.mjx_model.nu),
        "last_last_act": jp.zeros(self.mjx_model.nu),
        "motor_targets": jp.zeros(self.mjx_model.nu),
        "push": jp.array([0.0, 0.0]),
        "push_step": 0,
        "push_interval_steps": push_interval_steps,
    }

    metrics = {}
    for k in self._config.reward_config.scales.keys():
      metrics[f"reward/{k}"] = jp.zeros(())
    metrics["qpos_diff"] = jp.zeros(())
    metrics["qvel_diff"] = jp.zeros(())
    obs = self._get_obs(data, info)
    reward, done = jp.zeros(2)
    return mjx_env.State(data, obs, reward, done, metrics, info)

  def step(self, state: mjx_env.State, action: jax.Array) -> mjx_env.State:
    state.info["rng"], push1_rng, push2_rng = jax.random.split(
        state.info["rng"], 3
    )
    push_theta = jax.random.uniform(push1_rng, maxval=2 * jp.pi)
    push_magnitude = jax.random.uniform(
        push2_rng,
        minval=self._config.push_config.magnitude_range[0],
        maxval=self._config.push_config.magnitude_range[1],
    )
    push = jp.array([jp.cos(push_theta), jp.sin(push_theta)])
    push *= (
        jp.mod(state.info["push_step"] + 1, state.info["push_interval_steps"])
        == 0
    )
    push *= self._config.push_config.enable
    qvel = state.data.qvel
    qvel = qvel.at[:2].set(push * push_magnitude + qvel[:2])
    data = state.data.replace(qvel=qvel)
    state = state.replace(data=data)
    # print(self.ctrl_traj[state.info["step"]].shape)
    # norm_action = jp.tanh(action)*(self._uppers - self._lowers)
    norm_action = jp.tanh(action)
    motor_targets = self.ctrl_traj_qpos[state.info["step"]] + norm_action*self._config.action_scale
    data = mjx_env.step(
        self.mjx_model, state.data, motor_targets, self.n_substeps
    )
    state.info["motor_targets"] = motor_targets

    obs = self._get_obs(data, state.info)
    done = (~self.is_health(data)) & self._get_termination(data)

    rewards = self._get_reward(
        data, state.info, state.metrics, done
    )
    rewards = {
        k: v * self._config.reward_config.scales[k] for k, v in rewards.items()
    }
    # jax.debug.print("rewards = {}", rewards)
    reward = sum(rewards.values()) * self.dt
    state.metrics["qpos_diff"] = jp.mean(jp.abs(self.obs_traj_qpos[state.info["step"], 7:] - data.qpos[7:]))
    state.metrics["qvel_diff"] = jp.mean(jp.abs(self.obs_traj_qvel[state.info["step"], 6:] - data.qvel[6:]))

    state.info["push"] = push
    state.info["step"] += 1
    state.info["push_step"] += 1
    state.info["last_last_act"] = state.info["last_act"]
    state.info["last_act"] = norm_action * self._config.action_scale
    state.info["rng"], cmd_rng = jax.random.split(state.info["rng"])
    state.info["step"] = jp.where(
        done | (state.info["step"] > 500),
        0,
        state.info["step"],
    )

    for k, v in rewards.items():
      state.metrics[f"reward/{k}"] = v

    done = done.astype(reward.dtype)
    state = state.replace(data=data, obs=obs, reward=reward, done=done)
    return state

  def _get_termination(self, data: mjx.Data) -> jax.Array:
    fall_termination = self.get_gravity(data, "torso")[-1] < 0.0
    return (
        fall_termination
        | jp.isnan(data.qpos).any()
        | jp.isnan(data.qvel).any()
    )

  def _get_obs(
      self, data: mjx.Data, info: dict[str, Any], reset=False
  ) -> mjx_env.Observation:
    gyro = self.get_gyro(data, "pelvis")
    info["rng"], noise_rng = jax.random.split(info["rng"])
    noisy_gyro = (
        gyro
        + (2 * jax.random.uniform(noise_rng, shape=gyro.shape) - 1)
        * self._config.noise_config.level
        * self._config.noise_config.scales.gyro
    )

    gravity = data.site_xmat[self._pelvis_imu_site_id].T @ jp.array([0, 0, -1])
    info["rng"], noise_rng = jax.random.split(info["rng"])
    noisy_gravity = (
        gravity
        + (2 * jax.random.uniform(noise_rng, shape=gravity.shape) - 1)
        * self._config.noise_config.level
        * self._config.noise_config.scales.gravity
    )

    joint_angles = data.qpos[7:]
    info["rng"], noise_rng = jax.random.split(info["rng"])
    noisy_joint_angles = joint_angles

    joint_vel = data.qvel[6:]
    info["rng"], noise_rng = jax.random.split(info["rng"])
    noisy_joint_vel = (
        joint_vel
        + (2 * jax.random.uniform(noise_rng, shape=joint_vel.shape) - 1)
    )


    linvel = self.get_local_linvel(data, "pelvis")
    info["rng"], noise_rng = jax.random.split(info["rng"])
    noisy_linvel = (
        linvel
        + (2 * jax.random.uniform(noise_rng, shape=linvel.shape) - 1)
    )

    state = jp.hstack([
        linvel,  # 3
        gyro,  # 3
        gravity,  # 3
        joint_angles-self.ctrl_traj_qpos[info['step']],  # 29
        joint_vel-self.obs_traj_qvel[info['step'], 6:],  # 29
    ])

    accelerometer = self.get_accelerometer(data, "pelvis")
    global_angvel = self.get_global_angvel(data, "pelvis")
    root_height = data.qpos[2]

    privileged_state = jp.hstack([
        state,
        gyro,  # 3
        accelerometer,  # 3
        gravity,  # 3
        linvel,  # 3
        global_angvel,  # 3
        joint_angles if reset else self.ctrl_traj_qpos[info["step"]],
        joint_vel,
        root_height,  # 1
        data.actuator_force,  # 29
    ])

    return {
        "state": state,
        "privileged_state": state,
    }

  def is_health(self, data):
      return (self.traj_height_min * 0.9 < data.qpos[2]) & (data.qpos[2] < self.traj_height_max * 1.1)

  def _get_reward(
      self,
      data: mjx.Data,
      info: dict[str, Any],
      metrics: dict[str, Any],
      done: jax.Array,
  ) -> dict[str, jax.Array]:
    del metrics  # Unused.
    return {
        # Tracking rewards.
        "tracking_qpos": self._reward_tracking_qpos(
            self.obs_traj_qpos[info["step"]], data.qpos
        ),
        # "tracking_tolerance": self._reward_tracking_tolerance(self.obs_traj_qpos[info["step"]], data.qpos, self.obs_traj_qvel[info["step"]], data.qvel),
        "health": self._reward_health(data),
        "tracking_qvel": self._reward_tracking_qvel(
            self.obs_traj_qvel[info["step"]], data.qvel
        ),
        # "alive": self._reward_alive(),
        "termination": self._cost_termination(done),
        # "collision": self._cost_collision(data),
    }

  def _cost_collision(self, data: mjx.Data) -> jax.Array:
    c = collision.geoms_colliding(
        data, self._left_hand_geom_id, self._left_thigh_geom_id
    )
    c |= collision.geoms_colliding(
        data, self._right_hand_geom_id, self._right_thigh_geom_id
    )
    return jp.any(c)

  # def _reward_tracking_qpos(self,
  #                      agent_pos: jax.Array,
  #                      expert_pos: jax.Array) -> jax.Array:
  #     joint_ranges = self._uppers - self._lowers
  #     joint_ranges = jp.maximum(joint_ranges, 1e-8)
  #     diff = jp.sum(jp.square((agent_pos[7:] - expert_pos[7:]) / joint_ranges))
  #     return jp.exp(-diff / self._config.reward_config.tracking_sigma_qpos)
  #
  #
  # def _reward_tracking_qvel(self,
  #                      agent_vel: jax.Array,
  #                      expert_vel: jax.Array) -> jax.Array:
  #     diff = jp.sum(jp.square((agent_vel[6:] - expert_vel[6:]) / (8*jp.pi)))
  #     return jp.exp(-diff / self._config.reward_config.tracking_sigma_qvel)

  def _reward_health(self, data) -> jax.Array:
      return self.is_health(data)

  def _reward_tracking_qpos(self, expert_pos: jax.Array, agent_pos: jax.Array):
      pos_error = jp.linalg.norm(agent_pos[7:] - expert_pos[7:])
      pos_tolerance = tolerance(pos_error, bounds=(0.0, 0.0), margin=1.0, sigmoid="linear")
      return pos_tolerance

  def _reward_tracking_qvel(self, expert_vel: jax.Array, agent_vel: jax.Array):
      # 位置和速度的误差
      vel_error = jp.linalg.norm(agent_vel[6:] - expert_vel[6:])
      # jax.debug.print("pos_diff = {}", pos_error)
      # jax.debug.print("vel_diff = {}", vel_error)
      vel_tolerance = tolerance(vel_error, bounds=(0.0, 0.0), margin=10.0, sigmoid="linear")
      return vel_tolerance

  def _cost_termination(self, done: jax.Array) -> jax.Array:
    return done

  def _reward_alive(self) -> jax.Array:
    return jp.array(1.0)
