# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""Common functions that can be used to define rewards for the learning environment.

The functions can be passed to the :class:`isaaclab.managers.RewardTermCfg` object to
specify the reward function and its parameters.
"""

from __future__ import annotations

import torch
from typing import TYPE_CHECKING

from isaaclab.managers import SceneEntityCfg
from isaaclab.sensors import ContactSensor
from isaaclab.utils.math import quat_rotate_inverse, yaw_quat, euler_xyz_from_quat

if TYPE_CHECKING:
    from isaaclab.envs import ManagerBasedRLEnv


def feet_air_time(
    env: ManagerBasedRLEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float
) -> torch.Tensor:
    """Reward long steps taken by the feet using L2-kernel.

    This function rewards the agent for taking steps that are longer than a threshold. This helps ensure
    that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of
    the time for which the feet are in the air.

    If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
    """
    # extract the used quantities (to enable type-hinting)
    contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
    # compute the reward
    first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids]
    last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids]
    reward = torch.sum((last_air_time - threshold) * first_contact, dim=1)
    # no reward for zero command
    reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
    return reward


def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
    """Reward long steps taken by the feet for bipeds.

    This function rewards the agent for taking steps up to a specified threshold and also keep one foot at
    a time in the air.

    If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
    """
    contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
    # compute the reward
    air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids]
    contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids]
    in_contact = contact_time > 0.0
    # 对于每个传感器（或脚），如果它与地面接触（in_contact为True），则in_mode_time在该位置上的值取自contact_time；如果它没有与地面接触（in_contact为False），则in_mode_time在该位置上的值取自air_time
    in_mode_time = torch.where(in_contact, contact_time, air_time)
    # single_stance 是一个布尔张量，其中每个元素表示对应的时间点上机器人是否处于单脚站立的状态，如果某个元素为 True，则表示在那个时间点上机器人是单脚站立的；如果为 False，则表示不是单脚站立（可能是双脚都站立，或者两只脚都不与地面接触）
    single_stance = torch.sum(in_contact.int(), dim=1) == 1
    # reward 是一个一维张量，包含了每个时间点上（如果处于单脚站立状态）最小的 in_mode_time 值，或者如果该时间点不是单脚站立，则为 0.0
    reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0]
    # 如果 threshold 设置得很高，机器人可能会更有动力去尝试延长空中时间（即采取更长的步伐），因为它可以获得更高的奖励。相反，如果 threshold 设置得很低，机器人可能不会那么努力地延长空中时间，因为它从额外的努力中获得的奖励增加很少或没有增加。
    reward = torch.clamp(reward, max=threshold)
    # no reward for zero command
    reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
    return reward


def feet_slide(env, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
    """Penalize feet sliding.

    This function penalizes the agent for sliding its feet on the ground. The reward is computed as the
    norm of the linear velocity of the feet multiplied by a binary contact sensor. This ensures that the
    agent is penalized only when the feet are in contact with the ground.
    """
    # Penalize feet sliding
    contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
    contacts = contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0
    asset = env.scene[asset_cfg.name]

    body_vel = asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2]
    reward = torch.sum(body_vel.norm(dim=-1) * contacts, dim=1)
    return reward

def head_keep_level(env, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
    asset = env.scene[asset_cfg.name]
    asset_roll, asset_pitch, asset_yaw = euler_xyz_from_quat(asset.data.root_quat_w)
    reward = torch.cos(asset_pitch)
    return reward

def track_lin_vel_xy_yaw_frame_exp(
    env, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
    """Reward tracking of linear velocity commands (xy axes) in the gravity aligned robot frame using exponential kernel."""
    # extract the used quantities (to enable type-hinting)
    asset = env.scene[asset_cfg.name]
    vel_yaw = quat_rotate_inverse(yaw_quat(asset.data.root_quat_w), asset.data.root_lin_vel_w[:, :3])
    lin_vel_error = torch.sum(
        torch.square(env.command_manager.get_command(command_name)[:, :2] - vel_yaw[:, :2]), dim=1
    )
    return torch.exp(-lin_vel_error / std**2)

def track_ang_vel_z_world_exp(
    env, command_name: str, std: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
    """Reward tracking of angular velocity commands (yaw) in world frame using exponential kernel."""
    # extract the used quantities (to enable type-hinting)
    asset = env.scene[asset_cfg.name]
    ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w[:, 2])
    return torch.exp(-ang_vel_error / std**2)

def track_ang_vel_y_exp(
    env: ManagerBasedRLEnv,
    std: float,
    vel_gain: float = 1.5,
    harmonics_ratio: tuple = (0.7, 0.3),
    max_ang_vel: float = 3.0,
    command_name: str = "base_velocity",
    asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
    """基于动态频率耦合的Y轴角速度跟踪奖励函数（鸭子钟摆步态专用）
    
    功能特性：
    1. 动态相位生成：线速度驱动摆动频率 (f = vel_y / step_length)
    2. 谐波叠加模型：基波+二次谐波模拟生物步态
    3. 稳定性约束：最大角速度限制防止侧翻
    4. 方向敏感性：反向摆动时惩罚加倍
    
    参数说明：
    - vel_gain: 线速度-角速度增益系数(默认1.5)
    - harmonics_ratio: 基波与二次谐波比例(默认0.7:0.3)
    - max_ang_vel: 最大允许角速度(rad/s)
    """
    # 获取物理实体
    asset = env.scene[asset_cfg.name]
    
    # 计算当前线速度Y轴分量（重力坐标系）
    vel_y = asset.data.root_lin_vel_b[:, 1]
    
    # 动态频率生成（网页9步长参数）
    step_length = 0.3  # 鸭子典型步长0.3m[9](@ref)
    f_base = torch.clamp(vel_y.abs() / step_length, 0.5, 4.0)
    phase = 2 * torch.pi * f_base * 0.01  # 动态更新相位
    
    # 谐波叠加角速度目标[6](@ref)
    k1, k2 = harmonics_ratio
    ang_vel_target = vel_gain * vel_y * (
        k1 * torch.sin(phase) + 
        k2 * torch.sin(2 * phase)
    )
    
    # 稳定性约束（网页8奇异点约束）
    ang_vel_target = torch.clamp(ang_vel_target, -max_ang_vel, max_ang_vel)
    
    # 获取实际角速度Y轴分量（机体坐标系）
    ang_vel_actual = asset.data.root_ang_vel_b[:, 1]
    
    # 方向敏感误差计算[5](@ref)
    sign_penalty = torch.where(
        ang_vel_target * ang_vel_actual < 0,  # 方向相反时
        2.0,  # 惩罚加倍
        1.0
    )
    ang_vel_error = sign_penalty * torch.square(ang_vel_target - ang_vel_actual)
    
    # 自适应标准差（高速放宽约束）
    adaptive_std = std**2 + 0.05 * vel_y.abs()
    
    return torch.exp(-ang_vel_error / adaptive_std)