import jax
import jax.numpy as jp
from functools import partial


class HubRewardJAX:
    def __init__(self, _mj_model):
        self._mj_model = _mj_model
        self._left_foot_geom_id = self._mj_model.geom("left_foot").id
        self._right_foot_geom_id = self._mj_model.geom("right_foot").id
        self._left_foot_geom_name = "left_foot"
        self._right_foot_geom_name = "right_foot"

        # 奖励权重参数（来自Table 5）
        self.weight_com = 160.0
        self.weight_foot_contact = -250.0
        self.weight_close_feet = -1000.0
        self.weight_pos = 30.0
        self.weight_rot = 20.0
        self.weight_vel = 8.0
        self.weight_ang_vel = 8.0
        self.weight_dof_pos = 32.0
        self.weight_dof_vel = 16.0

        # 标准差参数
        self.sigma_com = 0.1
        self.sigma_pos = 0.6
        self.sigma_rot = 0.3
        self.sigma_vel = 3.0
        self.sigma_ang_vel = 10.0
        self.sigma_dof_pos = 0.7
        self.sigma_dof_vel = 10.0

    @partial(jax.jit, static_argnums=(0,))
    def compute_rewards(self, data, action, prev_action):
        """ JAX-jittable 的奖励计算函数 """
        reward_dict = {}

        # 1. Balance Shaping Rewards
        reward_dict['com_reward'] = self._compute_com_reward(data)
        reward_dict['foot_contact_penalty'] = self._compute_foot_contact_mismatch(data)
        reward_dict['close_feet_penalty'] = self._compute_close_feet_penalty(data)

        # 2. Tracking Rewards
        reward_dict['pos_reward'] = self._compute_position_reward(data)
        reward_dict['rot_reward'] = self._compute_rotation_reward(data)
        reward_dict['vel_reward'] = self._compute_velocity_reward(data)
        reward_dict['ang_vel_reward'] = self._compute_angular_velocity_reward(data)
        reward_dict['dof_pos_reward'] = self._compute_dof_position_reward(data)
        reward_dict['dof_vel_reward'] = self._compute_dof_velocity_reward(data)

        # 3. Penalties
        reward_dict['torque_limit_penalty'] = self._compute_torque_limit_penalty(data)
        reward_dict['dof_pos_limit_penalty'] = self._compute_dof_pos_limit_penalty(data)
        reward_dict['dof_vel_limit_penalty'] = self._compute_dof_vel_limit_penalty(data)
        reward_dict['termination_penalty'] = self._compute_termination_penalty(data)

        # 4. Regularization
        reward_dict['torque_reg'] = self._compute_torque_regularization(data)
        reward_dict['dof_vel_reg'] = self._compute_dof_velocity_regularization(data)
        reward_dict['dof_acc_reg'] = self._compute_dof_acceleration_regularization(data)
        reward_dict['action_rate_reg'] = self._compute_action_rate_regularization(action, prev_action)
        reward_dict['feet_air_time_reward'] = self._compute_feet_air_time_reward(data)
        reward_dict['feet_contact_force_reg'] = self._compute_feet_contact_force_regularization(data)
        reward_dict['stumble_penalty'] = self._compute_stumble_penalty(data)
        reward_dict['slippage_penalty'] = self._compute_slippage_penalty(data)
        reward_dict['feet_orientation_penalty'] = self._compute_feet_orientation_penalty(data)
        reward_dict['in_air_penalty'] = self._compute_in_air_penalty(data)

        # 加权求和
        total_reward = (
                reward_dict['com_reward'] * self.weight_com +
                reward_dict['foot_contact_penalty'] * self.weight_foot_contact +
                reward_dict['close_feet_penalty'] * self.weight_close_feet +
                reward_dict['pos_reward'] * self.weight_pos +
                reward_dict['rot_reward'] * self.weight_rot +
                reward_dict['vel_reward'] * self.weight_vel +
                reward_dict['ang_vel_reward'] * self.weight_ang_vel +
                reward_dict['dof_pos_reward'] * self.weight_dof_pos +
                reward_dict['dof_vel_reward'] * self.weight_dof_vel +
                reward_dict['torque_limit_penalty'] +  # 权重已在函数内应用
                reward_dict['dof_pos_limit_penalty'] +
                reward_dict['dof_vel_limit_penalty'] +
                reward_dict['termination_penalty'] +
                reward_dict['torque_reg'] +
                reward_dict['dof_vel_reg'] +
                reward_dict['dof_acc_reg'] +
                reward_dict['action_rate_reg'] +
                reward_dict['feet_air_time_reward'] +
                reward_dict['feet_contact_force_reg'] +
                reward_dict['stumble_penalty'] +
                reward_dict['slippage_penalty'] +
                reward_dict['feet_orientation_penalty'] +
                reward_dict['in_air_penalty']
        )

        return total_reward, reward_dict

    def _compute_com_reward(self, model, data):
        """ 质心奖励（JAX实现） """
        com_pos = data.qpos[:2]  # [x,y]
        support_center = self.compute_support_polygon_center(model, data)
        left_foot = data['left_foot_pos'][:, :2]
        right_foot = data['right_foot_pos'][:, :2]

        feet_dist = jp.abs(left_foot - right_foot)
        is_single_support = (feet_dist > 0.05).astype(jp.float32)

        com_dist = jp.linalg.norm(com_pos - support_center, axis=1)
        return jp.exp(-com_dist ** 2 / self.sigma_com ** 2) * is_single_support

    def _compute_foot_contact_mismatch(self, data):
        """ 脚部接触不匹配惩罚（JAX实现） """
        robot_left = data['robot_left_contact']
        robot_right = data['robot_right_contact']
        ref_left = data['ref_left_contact']
        ref_right = data['ref_right_contact']

        mismatch = (robot_left != ref_left).astype(jp.float32) + \
                   (robot_right != ref_right).astype(jp.float32)
        return mismatch

    def _compute_close_feet_penalty(self, data):
        """ 双脚距离过近惩罚（JAX实现） """
        left_foot = data['left_foot_pos']
        right_foot = data['right_foot_pos']

        dist = jp.linalg.norm(left_foot - right_foot, axis=1)
        return jp.maximum(0.16 - dist, 0.0)

    def _compute_position_reward(self, data):
        """ 位置跟踪奖励（JAX实现） """
        pos_diff = jp.linalg.norm(data['body_pos'] - data['ref_body_pos'], axis=1)
        return jp.exp(-pos_diff ** 2 / self.sigma_pos ** 2)

    def _compute_rotation_reward(self, data):
        """ 旋转跟踪奖励（JAX实现） """
        q1 = data['body_rot']
        q2 = data['ref_body_rot']

        # 四元数角度差
        dot = jp.clip(jp.sum(q1 * q2, axis=1), -1.0, 1.0)
        angle = 2 * jp.arccos(jp.abs(dot))
        return jp.exp(-angle ** 2 / self.sigma_rot ** 2)

    def _compute_velocity_reward(self, data):
        """ 线速度跟踪奖励（JAX实现） """
        vel_diff = jp.linalg.norm(data['body_vel'] - data['ref_body_vel'], axis=1)
        return jp.exp(-vel_diff ** 2 / self.sigma_vel ** 2)

    def _compute_angular_velocity_reward(self, data):
        """ 角速度跟踪奖励（JAX实现） """
        ang_vel_diff = jp.linalg.norm(data['body_ang_vel'] - data['ref_body_ang_vel'], axis=1)
        return jp.exp(-ang_vel_diff ** 2 / self.sigma_ang_vel ** 2)

    def _compute_dof_position_reward(self, data):
        """ 关节位置跟踪奖励（JAX实现） """
        pos_diff = jp.linalg.norm(data['dof_pos'] - data['ref_dof_pos'], axis=1)
        return jp.exp(-pos_diff ** 2 / self.sigma_dof_pos ** 2)

    def _compute_dof_velocity_reward(self, data):
        """ 关节速度跟踪奖励（JAX实现） """
        vel_diff = jp.linalg.norm(data['dof_vel'] - data['ref_dof_vel'], axis=1)
        return jp.exp(-vel_diff ** 2 / self.sigma_dof_vel ** 2)

    def _compute_torque_limit_penalty(self, data):
        """ 扭矩限制惩罚（JAX实现） """
        torque = data['torque']
        min_limit = data['torque_limits'][:, 0]
        max_limit = data['torque_limits'][:, 1]

        out_of_limits = jp.any((torque < min_limit) | (torque > max_limit), axis=1)
        return out_of_limits.astype(jp.float32) * -0.5

    def _compute_dof_pos_limit_penalty(self, data):
        """ 关节位置限制惩罚（JAX实现） """
        pos = data['dof_pos']
        min_limit = data['dof_pos_limits'][:, 0]
        max_limit = data['dof_pos_limits'][:, 1]

        out_of_limits = jp.any((pos < min_limit) | (pos > max_limit), axis=1)
        return out_of_limits.astype(jp.float32) * -30.0

    def _compute_dof_vel_limit_penalty(self, data):
        """ 关节速度限制惩罚（JAX实现） """
        vel = data['dof_vel']
        min_limit = data['dof_vel_limits'][:, 0]
        max_limit = data['dof_vel_limits'][:, 1]

        out_of_limits = jp.any((vel < min_limit) | (vel > max_limit), axis=1)
        return out_of_limits.astype(jp.float32) * -12.0

    def _compute_termination_penalty(self, data):
        """ 终止条件惩罚（JAX实现） """
        return data['terminated'].astype(jp.float32) * -60.0

    def _compute_torque_regularization(self, data):
        """ 扭矩正则化（JAX实现） """
        return -2.5e-5 * jp.linalg.norm(data['torque'], axis=1)

    def _compute_dof_velocity_regularization(self, data):
        """ 关节速度正则化（JAX实现） """
        return -1e-3 * jp.linalg.norm(data['dof_vel'], axis=1) ** 2

    def _compute_dof_acceleration_regularization(self, data):
        """ 关节加速度正则化（JAX实现） """
        return -3e-6 * jp.linalg.norm(data['dof_acc'], axis=1)

    def _compute_action_rate_regularization(self, action, prev_action):
        """ 动作变化率正则化（JAX实现） """
        return -1.5 * jp.linalg.norm(action - prev_action, axis=1) ** 2

    def _compute_feet_air_time_reward(self, data):
        """ 脚部空中时间奖励（JAX实现） """
        return (data['feet_air_time'] - 0.25) * 250.0

    def _compute_feet_contact_force_regularization(self, data):
        """ 脚部接触力正则化（JAX实现） """
        return -0.2 * jp.linalg.norm(data['feet_contact_force'], axis=1) ** 2

    def _compute_stumble_penalty(self, data):
        """ 绊倒惩罚（JAX实现） """
        front_force = data['front_foot_force']
        rear_force = data['rear_foot_force']
        return (front_force > 5 * rear_force).astype(jp.float32) * -3e-4

    def _compute_slippage_penalty(self, data):
        """ 脚部滑动惩罚（JAX实现） """
        has_contact = (data['feet_contact_force'] >= 1.0).astype(jp.float32)
        return -30.0 * jp.linalg.norm(data['feet_vel'], axis=1) ** 2 * has_contact

    def _compute_feet_orientation_penalty(self, data):
        """ 脚部方向惩罚（JAX实现） """
        near_ground = (data['feet_height'] < 0.05).astype(jp.float32)
        return -62.5 * jp.abs(data['feet_orient'][:, 0]) * near_ground

    def _compute_in_air_penalty(self, data):
        """ 双脚离地惩罚（JAX实现） """
        both_in_air = (data['feet_contact_force'][:, 0] < 1.0) & \
                      (data['feet_contact_force'][:, 1] < 1.0)
        return both_in_air.astype(jp.float32) * -50.0

    def compute_support_polygon_center(self, data, force_threshold=1.0):

        left_force = data.cfrc_ext[self._left_foot_geom_id][2]
        right_force = data.cfrc_ext[self._right_foot_geom_id][2]
        jax.debug.print("yyyyy={}", [left_force, right_force])
        left_contact = left_force > force_threshold
        right_contact = right_force > force_threshold

        left_pos = self._mj_model.geom(self._left_foot_geom_name).pos[:2]
        right_pos = self._mj_model.geom(self._right_foot_geom_name).pos[:2]

        # 使用 jnp.where 表达分支逻辑
        center = jp.where(
            left_contact & right_contact,
            (left_pos + right_pos) / 2,
            jp.where(
                left_contact,
                left_pos,
                jp.where(
                    right_contact,
                    right_pos,
                    jp.array([0.0, 0.0])
                )
            )
        )
        return center