import torch
import numpy as np


class HubReward:
    def __init__(self, device='cuda'):
        self.device = device
        # 奖励权重参数（来自Table 5）
        self.weight_com = 160.0  # Center of mass reward
        self.weight_foot_contact = -250.0  # Foot contact mismatch penalty
        self.weight_close_feet = -1000.0  # Close feet penalty
        self.weight_pos = 30.0  # Body position tracking
        self.weight_rot = 20.0  # Body rotation tracking
        self.weight_vel = 8.0  # Body velocity tracking
        self.weight_ang_vel = 8.0  # Body angular velocity tracking
        self.weight_dof_pos = 32.0  # DoF position tracking
        self.weight_dof_vel = 16.0  # DoF velocity tracking
        # 其他正则化和惩罚项权重...

        # 标准差参数（来自Table 5备注）
        self.sigma_com = 0.1
        self.sigma_pos = 0.6
        self.sigma_rot = 0.3
        self.sigma_vel = 3.0
        self.sigma_ang_vel = 10.0
        self.sigma_dof_pos = 0.7
        self.sigma_dof_vel = 10.0

    def compute_rewards(self, state_dict, action, prev_action):
        """
        计算所有奖励组件
        Args:
            state_dict: 包含当前状态的字典，包括机器人状态和参考状态
            action: 当前动作
            prev_action: 前一步动作
        Returns:
            total_reward: 总奖励值
            reward_dict: 各奖励组件的详细值
        """
        reward_dict = {}

        # 1. Balance Shaping Rewards (平衡塑造奖励)
        reward_dict['com_reward'] = self._compute_com_reward(state_dict)
        reward_dict['foot_contact_penalty'] = self._compute_foot_contact_mismatch(state_dict)
        reward_dict['close_feet_penalty'] = self._compute_close_feet_penalty(state_dict)

        # 2. Tracking Rewards (跟踪奖励)
        reward_dict['pos_reward'] = self._compute_position_reward(state_dict)
        reward_dict['rot_reward'] = self._compute_rotation_reward(state_dict)
        reward_dict['vel_reward'] = self._compute_velocity_reward(state_dict)
        reward_dict['ang_vel_reward'] = self._compute_angular_velocity_reward(state_dict)
        reward_dict['dof_pos_reward'] = self._compute_dof_position_reward(state_dict)
        reward_dict['dof_vel_reward'] = self._compute_dof_velocity_reward(state_dict)

        # 3. Penalties (惩罚项)
        reward_dict['torque_limit_penalty'] = self._compute_torque_limit_penalty(state_dict)
        reward_dict['dof_pos_limit_penalty'] = self._compute_dof_pos_limit_penalty(state_dict)
        reward_dict['dof_vel_limit_penalty'] = self._compute_dof_vel_limit_penalty(state_dict)
        reward_dict['termination_penalty'] = self._compute_termination_penalty(state_dict)

        # 4. Regularization (正则化项)
        reward_dict['torque_reg'] = self._compute_torque_regularization(state_dict)
        reward_dict['dof_vel_reg'] = self._compute_dof_velocity_regularization(state_dict)
        reward_dict['dof_acc_reg'] = self._compute_dof_acceleration_regularization(state_dict)
        reward_dict['action_rate_reg'] = self._compute_action_rate_regularization(action, prev_action)
        reward_dict['feet_air_time_reward'] = self._compute_feet_air_time_reward(state_dict)
        reward_dict['feet_contact_force_reg'] = self._compute_feet_contact_force_regularization(state_dict)
        reward_dict['stumble_penalty'] = self._compute_stumble_penalty(state_dict)
        reward_dict['slippage_penalty'] = self._compute_slippage_penalty(state_dict)
        reward_dict['feet_orientation_penalty'] = self._compute_feet_orientation_penalty(state_dict)
        reward_dict['in_air_penalty'] = self._compute_in_air_penalty(state_dict)

        # 计算总奖励
        total_reward = 0.0
        for key, value in reward_dict.items():
            # 应用对应的权重
            if key == 'com_reward':
                total_reward += value * self.weight_com
            elif key == 'foot_contact_penalty':
                total_reward += value * self.weight_foot_contact
            elif key == 'close_feet_penalty':
                total_reward += value * self.weight_close_feet
            elif key == 'pos_reward':
                total_reward += value * self.weight_pos
            elif key == 'rot_reward':
                total_reward += value * self.weight_rot
            elif key == 'vel_reward':
                total_reward += value * self.weight_vel
            elif key == 'ang_vel_reward':
                total_reward += value * self.weight_ang_vel
            elif key == 'dof_pos_reward':
                total_reward += value * self.weight_dof_pos
            elif key == 'dof_vel_reward':
                total_reward += value * self.weight_dof_vel
            else:
                # 其他奖励项有固定权重（在各自函数中已应用）
                total_reward += value

        return total_reward, reward_dict

    def _compute_com_reward(self, state_dict):
        """
        计算质心奖励：鼓励质心水平投影在支撑多边形内
        """
        com_pos = state_dict['com_pos']  # 当前质心位置 [x, y, z]
        support_polygon_center = state_dict['support_polygon_center']  # 支撑多边形中心 [x, y]
        left_foot_pos = state_dict['left_foot_pos']  # 左脚位置 [x, y, z]
        right_foot_pos = state_dict['right_foot_pos']  # 右脚位置 [x, y, z]

        # 计算水平距离（忽略z轴）
        com_xy = com_pos[:, :2]
        support_xy = support_polygon_center

        # 计算两脚水平距离（判断是否单脚支撑）
        feet_dist_xy = torch.norm(left_foot_pos[:, :2] - right_foot_pos[:, :2], dim=1)
        is_single_support = (feet_dist_xy > 0.05).float()

        # 高斯奖励函数
        com_dist = torch.norm(com_xy - support_xy, dim=1)
        reward = torch.exp(-com_dist ** 2 / self.sigma_com ** 2) * is_single_support

        return reward

    def _compute_foot_contact_mismatch(self, state_dict):
        """
        计算脚部接触不匹配惩罚
        """
        robot_left_contact = state_dict['robot_left_contact']  # 机器人左脚接触状态 (F >= 1N)
        robot_right_contact = state_dict['robot_right_contact']
        ref_left_contact = state_dict['ref_left_contact']  # 参考左脚接触状态
        ref_right_contact = state_dict['ref_right_contact']

        # 计算不匹配情况 (XOR运算)
        left_mismatch = (robot_left_contact != ref_left_contact).float()
        right_mismatch = (robot_right_contact != ref_right_contact).float()

        penalty = left_mismatch + right_mismatch
        return penalty

    def _compute_close_feet_penalty(self, state_dict):
        """
        计算双脚距离过近的惩罚
        """
        left_foot_pos = state_dict['left_foot_pos']
        right_foot_pos = state_dict['right_foot_pos']

        feet_dist = torch.norm(left_foot_pos - right_foot_pos, dim=1)
        penalty = torch.max(0.16 - feet_dist, torch.zeros_like(feet_dist))
        return penalty

    def _compute_position_reward(self, state_dict):
        """
        计算身体位置跟踪奖励
        """
        body_pos = state_dict['body_pos']  # 当前身体位置
        ref_pos = state_dict['ref_body_pos']  # 参考身体位置

        pos_diff = torch.norm(body_pos - ref_pos, dim=1)
        reward = torch.exp(-pos_diff ** 2 / self.sigma_pos ** 2)
        return reward

    def _compute_rotation_reward(self, state_dict):
        """
        计算身体旋转跟踪奖励
        """
        body_rot = state_dict['body_rot']  # 当前身体旋转（四元数或欧拉角）
        ref_rot = state_dict['ref_body_rot']  # 参考身体旋转

        # 假设旋转表示为四元数，计算角度差
        rot_diff = self._quaternion_angular_diff(body_rot, ref_rot)
        reward = torch.exp(-rot_diff ** 2 / self.sigma_rot ** 2)
        return reward

    def _quaternion_angular_diff(self, q1, q2):
        """
        计算两个四元数之间的角度差
        """
        dot_product = torch.sum(q1 * q2, dim=1)
        dot_product = torch.clamp(dot_product, -1.0, 1.0)
        angle_diff = 2 * torch.acos(torch.abs(dot_product))
        return angle_diff

    def _compute_velocity_reward(self, state_dict):
        """
        计算线速度跟踪奖励
        """
        body_vel = state_dict['body_vel']
        ref_vel = state_dict['ref_body_vel']

        vel_diff = torch.norm(body_vel - ref_vel, dim=1)
        reward = torch.exp(-vel_diff ** 2 / self.sigma_vel ** 2)
        return reward

    def _compute_angular_velocity_reward(self, state_dict):
        """
        计算角速度跟踪奖励
        """
        body_ang_vel = state_dict['body_ang_vel']
        ref_ang_vel = state_dict['ref_body_ang_vel']

        ang_vel_diff = torch.norm(body_ang_vel - ref_ang_vel, dim=1)
        reward = torch.exp(-ang_vel_diff ** 2 / self.sigma_ang_vel ** 2)
        return reward

    def _compute_dof_position_reward(self, state_dict):
        """
        计算关节位置跟踪奖励
        """
        dof_pos = state_dict['dof_pos']
        ref_dof_pos = state_dict['ref_dof_pos']

        pos_diff = torch.norm(dof_pos - ref_dof_pos, dim=1)
        reward = torch.exp(-pos_diff ** 2 / self.sigma_dof_pos ** 2)
        return reward

    def _compute_dof_velocity_reward(self, state_dict):
        """
        计算关节速度跟踪奖励
        """
        dof_vel = state_dict['dof_vel']
        ref_dof_vel = state_dict['ref_dof_vel']

        vel_diff = torch.norm(dof_vel - ref_dof_vel, dim=1)
        reward = torch.exp(-vel_diff ** 2 / self.sigma_dof_vel ** 2)
        return reward

    def _compute_torque_limit_penalty(self, state_dict):
        """
        计算扭矩限制惩罚
        """
        torque = state_dict['torque']
        torque_limits = state_dict['torque_limits']  # [min, max] for each joint

        # 检查是否超出限制
        out_of_limits = ((torque < torque_limits[:, 0]) | (torque > torque_limits[:, 1])).any(dim=1).float()
        penalty = out_of_limits * -0.5
        return penalty

    def _compute_dof_pos_limit_penalty(self, state_dict):
        """
        计算关节位置限制惩罚
        """
        dof_pos = state_dict['dof_pos']
        dof_limits = state_dict['dof_pos_limits']  # [min, max] for each joint

        # 检查是否超出限制
        out_of_limits = ((dof_pos < dof_limits[:, 0]) | (dof_pos > dof_limits[:, 1])).any(dim=1).float()
        penalty = out_of_limits * -30.0
        return penalty

    def _compute_dof_vel_limit_penalty(self, state_dict):
        """
        计算关节速度限制惩罚
        """
        dof_vel = state_dict['dof_vel']
        dof_vel_limits = state_dict['dof_vel_limits']  # [min, max] for each joint

        # 检查是否超出限制
        out_of_limits = ((dof_vel < dof_vel_limits[:, 0]) | (dof_vel > dof_vel_limits[:, 1])).any(dim=1).float()
        penalty = out_of_limits * -12.0
        return penalty

    def _compute_termination_penalty(self, state_dict):
        """
        计算终止条件惩罚（如跌倒）
        """
        terminated = state_dict['terminated'].float()
        penalty = terminated * -60.0
        return penalty

    def _compute_torque_regularization(self, state_dict):
        """
        计算扭矩正则化项（鼓励使用较小扭矩）
        """
        torque = state_dict['torque']
        reg = -2.5e-5 * torch.norm(torque, dim=1)
        return reg

    def _compute_dof_velocity_regularization(self, state_dict):
        """
        计算关节速度正则化项
        """
        dof_vel = state_dict['dof_vel']
        reg = -1e-3 * torch.norm(dof_vel, dim=1) ** 2
        return reg

    def _compute_dof_acceleration_regularization(self, state_dict):
        """
        计算关节加速度正则化项
        """
        dof_acc = state_dict['dof_acc']
        reg = -3e-6 * torch.norm(dof_acc, dim=1)
        return reg

    def _compute_action_rate_regularization(self, action, prev_action):
        """
        计算动作变化率正则化项（鼓励平滑控制）
        """
        action_diff = torch.norm(action - prev_action, dim=1)
        reg = -1.5 * action_diff ** 2
        return reg

    def _compute_feet_air_time_reward(self, state_dict):
        """
        计算脚部空中时间奖励（来自RSS论文）
        """
        feet_air_time = state_dict['feet_air_time']
        reward = (feet_air_time - 0.25) * 250.0
        return reward

    def _compute_feet_contact_force_regularization(self, state_dict):
        """
        计算脚部接触力正则化项
        """
        feet_force = state_dict['feet_contact_force']
        reg = -0.2 * torch.norm(feet_force, dim=1) ** 2
        return reg

    def _compute_stumble_penalty(self, state_dict):
        """
        计算绊倒惩罚（前后脚力不平衡）
        """
        front_foot_force = state_dict['front_foot_force']
        rear_foot_force = state_dict['rear_foot_force']

        # 检查前脚力是否远大于后脚力
        stumble = (front_foot_force > 5 * rear_foot_force).float()
        penalty = stumble * -3e-4
        return penalty

    def _compute_slippage_penalty(self, state_dict):
        """
        计算脚部滑动惩罚
        """
        feet_vel = state_dict['feet_vel']
        feet_force = state_dict['feet_contact_force']

        # 只有当脚部有接触时才计算滑动惩罚
        has_contact = (feet_force >= 1.0).float()
        slippage = torch.norm(feet_vel, dim=1) ** 2 * has_contact
        penalty = -30.0 * slippage
        return penalty

    def _compute_feet_orientation_penalty(self, state_dict):
        """
        计算脚部方向惩罚（鼓励脚部水平）
        """
        feet_orient = state_dict['feet_orient']  # 脚部朝向（如重力在脚部坐标系中的x分量）
        feet_height = state_dict['feet_height']

        # 只有当脚部接近地面时才计算
        near_ground = (feet_height < 0.05).float()
        penalty = -62.5 * torch.abs(feet_orient[:, 0]) * near_ground
        return penalty

    def _compute_in_air_penalty(self, state_dict):
        """
        计算双脚离地惩罚
        """
        feet_force = state_dict['feet_contact_force']
        both_in_air = (feet_force[:, 0] < 1.0) & (feet_force[:, 1] < 1.0)
        penalty = both_in_air.float() * -50.0
        return penalty