from gymnasium import spaces
import mujoco
import numpy as np
from scipy.spatial.transform import Rotation as R
import os


class PiperEnv:

    def __init__(self, render_mode=None):
        self.xml_path = "piper_description/mujoco_model/piper_description.xml"

        # load piper model and data from .xml file
        self.model = mujoco.MjModel.from_xml_path(self.xml_path)
        self.data = mujoco.MjData(self.model)
        mujoco.mj_forward(self.model, self.data)

        self.qpos_init = self.data.qpos.copy()
        self.qvel_init = self.data.qvel.copy()
        self.time_init = self.data.time

        # target_site: 0  end-effector: 1
        self.ee_site_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "ee")
        self.target_site_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "target")

        # constrain the target space: used to sample the target points
        xyz_low_limit, xyz_high_limit = np.array([-1, -1, 0]), np.array([1, 1, 1])
        self.target_space = spaces.Box(low=xyz_low_limit, high=xyz_high_limit, shape=(3,), dtype=np.float32)

        # actions are all joints
        self.action_low, self.action_high = self.model.actuator_ctrlrange.T.astype("float32")
        self.n_actions = self.model.nu
        self.action_space = spaces.Box(low=self.action_low, high=self.action_high, shape=(self.n_actions,), dtype=np.float32)

        # observations: joint6d + ee_pos6d + tar_pos6d
        self.n_obs = len(self.get_obs())
        self.max_episode_steps = 1000
        self.current_step = 0

        # reccord current distance
        self.last_dist2tar = self.get_dist_to_target()
        self.success_dist = 0.005
        self.sparse_reward = False

        self.render_mode = render_mode

    def get_dist_to_target(self):
        ee_pos = self.get_site_pos(self.ee_site_id)
        target_pos = self.get_site_pos(self.target_site_id)
        return np.sqrt(np.sum((ee_pos - target_pos) ** 2, axis=-1))

    def set_target_site(self, xyz, quat=None):
        """
        xyz: target position in meters
        quat: quaternion (w xyz) for the target, initialized to be [0, 0, 1, 0]
              quat is related to the world frame
        use  mujoco.mj_forward to set the position
        model: define the initial pos
        data: give the current pos
        use  mujoco.mj_forward to reset to the initial state
        """
        self.model.site_pos[self.target_site_id] = xyz
        self.data = mujoco.MjData(self.model)  # fetch initial data
        if quat:
            self.model.site_quat[self.target_site_id] = quat
        mujoco.mj_forward(self.model, self.data)

    def get_site_pos(self, site_id: int):
        """
        return (x, y, z, rx, ry, rz)
        """
        assert site_id < self.model.nsite, f"Only accepte site id < {self.moel.nsite}"
        xpos = self.data.site_xpos[site_id]
        rot_matrix = self.data.site_xmat[self.ee_site_id].reshape((3, 3))
        rpos = R.from_matrix(rot_matrix).as_euler("xyz")
        return np.concat([xpos, rpos], axis=-1)

    def get_obs(self):
        """
        xpos: in meters
        rpos: in radians
        The reason for using (x, y, z, rx, ry, rz) as the observations is that piper sdk
        returns such tuple of values
        currently the observations are joints(6d) + ee_pos(6d) + taregt_pos(6d)
        """
        joints = self.data.qpos
        ee_pos = self.get_site_pos(self.ee_site_id)
        taregt_pos = self.get_site_pos(self.target_site_id)
        return np.concat([joints, ee_pos, taregt_pos], axis=-1)

    def _get_reward(self):
        """
        distance reward: given by the change of distances
        """
        new_dist = self.get_dist_to_target()
        achieved_tar = int(new_dist < self.success_dist)
        if self.sparse_reward:
            return achieved_tar

        dist_progress = self.last_dist2tar - new_dist
        return dist_progress + 10 * achieved_tar  # final reward scale

    def _is_done(self):
        # reached the maximal number of steps
        if self.current_step >= self.max_episode_steps:
            return True

        # reached the goal
        if self.get_dist_to_target() < self.success_dist:
            return True
        return False

    def get_info(self):

        dist = self.get_dist_to_target()

        info = {"dist2tar": dist, "success": dist < self.success_dist}
        return info

    def reset(self, random_start: bool = False):

        self.current_step = 0

        if random_start:
            pass

        # reset the model data, keep the target site
        self.data.qpos[:] = self.qpos_init
        self.data.qvel[:] = self.qvel_init
        self.data.time = self.time_init
        mujoco.mj_forward(self.model, self.data)

        # TODO: randomly set the target locations
        observation = self.get_obs()
        info = self.get_info()

        # 渲染（如果需要）
        if self.render_mode == "human":
            self._render_frame()
        return observation, info

    def step(self, action):

        action = np.clip(action, self.action_low, self.action_high)

        self.data.qpos = action
        mujoco.mj_step(self.model, self.data)
        self.current_step += 1

        if self.render_mode == "human":
            self._render_frame()

        return self.get_obs(), self._get_reward(), self._is_done(), self.get_info()

    def render(self):
        """渲染环境"""
        if self.render_mode == "rgb_array":
            return self._render_frame()

    def _render_frame(self):
        """渲染一帧图像"""
        if self.viewer is None and self.render_mode is not None:
            from mujoco import viewer

            self.viewer = viewer.launch_passive(self.model, self.data)

            # 设置相机视角
            if self.render_mode == "human":
                self.viewer.cam.lookat[0] = 0.3  # x位置
                self.viewer.cam.lookat[1] = 0  # y位置
                self.viewer.cam.lookat[2] = 0.2  # z位置
                self.viewer.cam.distance = 0.8  # 距离
                self.viewer.cam.elevation = -30  # 仰角
                self.viewer.cam.azimuth = 90  # 方位角

        if self.render_mode == "rgb_array":
            # 创建渲染上下文
            if not hasattr(self, "render_context_offscreen"):
                self.render_context_offscreen = mujoco.Renderer(self.model)

            # 更新渲染上下文
            self.render_context_offscreen.update_scene(self.data)

            # 渲染图像
            return self.render_context_offscreen.render(self.render_width, self.render_height)

    def close(self):
        if self.viewer is not None:
            self.viewer.close()


# 测试环境
if __name__ == "__main__":

    self = PiperEnv()
    # env = MujocoURDFReachEnv(render_mode="human")
    rewards = []
    # # 随机策略测试
    obs, info = self.reset()
    for _ in range(1000):
        action = self.action_space.sample()
        obs, reward, done, info = self.step(action)
        rewards.append(reward)
        if done:
            obs, info = self.reset()

    self.close()
