# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

from __future__ import annotations

import torch
from typing import TYPE_CHECKING

from isaaclab.assets import Articulation, RigidObject
from isaaclab.managers import SceneEntityCfg
from isaaclab.sensors import FrameTransformer
from isaaclab.utils.math import combine_frame_transforms

if TYPE_CHECKING:
    from isaaclab.envs import ManagerBasedRLEnv

"""
    该函数用于判断目标物体是否被抬起到指定高度,并生成二进制奖励信号。当物体高度超过设定阈值时返回1.0,否则返回0.0
"""
def object_is_lifted(
    env: ManagerBasedRLEnv, minimal_height: float, object_cfg: SceneEntityCfg = SceneEntityCfg("object")
) -> torch.Tensor:
    """Reward the agent for lifting the object above the minimal height."""
    object: RigidObject = env.scene[object_cfg.name]
    return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0)

def object_is_lifted_for_pick_place(
    env: ManagerBasedRLEnv, 
    minimal_height: float, 
    place_threshold: float, # 判断可以开始放下物体的物体位置和目标点之间的平面距离
    command_name: str,
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
) -> torch.Tensor:
    """奖励物体被抬起超过指定高度, 同时当接近目标点上方时取消该抬起的奖励"""
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    command = env.command_manager.get_command(command_name) 
    # 目标点的位置
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离
    return (torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0))*(distance > place_threshold)

def object_is_placed_for_pick_place(
    env: ManagerBasedRLEnv, 
    minimal_height: float, 
    place_threshold: float, # 判断可以开始放下物体的物体位置和目标点之间的距离
    command_name: str,
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
) -> torch.Tensor:
    """奖励物体在接近目标点时放下物体"""
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    command = env.command_manager.get_command(command_name) 
    # 目标点的位置
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的平面距离
    return (torch.where(object.data.root_pos_w[:, 2] <= minimal_height, 1.0, 0.0))*(distance <= place_threshold)

"""
    reward = exp(-distance^2 / (2*std^2))
    当末端距离物体越近,奖励值越高 最大为1 

    作用阶段：早期引导机械臂靠近物体

    参数建议：
    std=0.1 表示当距离为0.1米时奖励衰减到 0.76
    控制奖励随距离增加的衰减速度,std值越大,奖励随距离下降越平缓
"""
def object_ee_distance(
    env: ManagerBasedRLEnv,
    std: float,
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
    ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
) -> torch.Tensor:
    """Reward the agent for reaching the object using tanh-kernel."""
    # extract the used quantities (to enable type-hinting)
    object: RigidObject = env.scene[object_cfg.name]
    ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
    # Target object position: (num_envs, 3)
    cube_pos_w = object.data.root_pos_w
    # End-effector position: (num_envs, 3)
    ee_w = ee_frame.data.target_pos_w[..., 0, :]
    # Distance of the end-effector to the object: (num_envs,)
    object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1)

    return 1 - torch.tanh(object_ee_distance / std)

def object_ee_distance_for_pick_place(
    env: ManagerBasedRLEnv,
    std: float,
    finish_place_threshold: float,  # 判断完成放置的物体和目标点之间的距离
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
    ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
) -> torch.Tensor:
    """奖励末端夹爪接近物体, 但当到达目标点后取消奖励，以使夹爪松开"""
    # extract the used quantities (to enable type-hinting)
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
    command = env.command_manager.get_command(command_name)
    # Target object position: (num_envs, 3)
    cube_pos_w = object.data.root_pos_w
    # End-effector position: (num_envs, 3)
    ee_w = ee_frame.data.target_pos_w[..., 0, :]
    # Distance of the end-effector to the object: (num_envs,)
    object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) 
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的平面距离

    return (1 - torch.tanh(object_ee_distance / std))*(distance > finish_place_threshold)

def object_ee_distance_for_back(
    env: ManagerBasedRLEnv,
    std: float,
    finish_place_threshold: float,  # 判断完成放置的物体和目标点之间的距离
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
    ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame"),
) -> torch.Tensor:
    """奖励末端夹爪远离物体, 当物体到达目标点后，以使夹爪松开并回到默认位置"""
    # extract the used quantities (to enable type-hinting)
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name]
    command = env.command_manager.get_command(command_name)
    # Target object position: (num_envs, 3)
    cube_pos_w = object.data.root_pos_w
    # End-effector position: (num_envs, 3)
    ee_w = ee_frame.data.target_pos_w[..., 0, :]
    # Distance of the end-effector to the object: (num_envs,)
    object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) 
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离

    return (torch.tanh(object_ee_distance / std))*(distance <= finish_place_threshold)

def object_goal_distance(
    env: ManagerBasedRLEnv,
    std: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """Reward the agent for tracking the goal pose using tanh-kernel."""
    # extract the used quantities (to enable type-hinting)
    robot: RigidObject = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    command = env.command_manager.get_command(command_name)
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1)
    return 1 - torch.tanh(distance / std)

def object_goal_distance_when_pick(
    env: ManagerBasedRLEnv,
    std: float,
    place_threshold: float,
    minimal_height: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """Reward the agent for tracking the goal pose using tanh-kernel."""
    # extract the used quantities (to enable type-hinting)
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    command = env.command_manager.get_command(command_name)
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :2] - object.data.root_pos_w[:, :2], dim=1) # 计算物体位置到期望目标位置的距离
    # rewarded if the object is lifted above the threshold
    return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std)) * (distance > place_threshold)

def object_goal_distance_when_place(
    env: ManagerBasedRLEnv,
    std: float,
    place_threshold: float,
    minimal_height: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """Reward the agent for tracking the goal pose using tanh-kernel."""
    # extract the used quantities (to enable type-hinting)
    robot: Articulation = env.scene[robot_cfg.name]
    object: RigidObject = env.scene[object_cfg.name]
    command = env.command_manager.get_command(command_name) 
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :2] - object.data.root_pos_w[:, :2], dim=1) # 计算物体位置到期望目标位置的距离
    # 当判断开始放置时进行奖励
    return (distance <= place_threshold) * (1 - torch.tanh(distance / std)) * (object.data.root_pos_w[:, 2] <= minimal_height)

def ee_vel_limit(
    env: ManagerBasedRLEnv,
    vel_limit: float,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot", body_names=""),
) -> torch.Tensor:
    """惩罚末端速度超过限制速度"""
    robot: Articulation = env.scene[robot_cfg.name]
    ee_vel = torch.norm(robot.data.body_link_lin_vel_w[:, robot_cfg.body_ids, :], dim=-1)
    return torch.sum(torch.clip(ee_vel - vel_limit, min=0.0), dim=1)

def joint_default_pos_penalize(
    env: ManagerBasedRLEnv,
    finish_place_threshold: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot", joint_names=".*"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """惩罚完成放置后关节恢复默认位置"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name)
    object: RigidObject = env.scene[object_cfg.name] 
    joint_error = torch.linalg.norm(
        (robot.data.joint_pos[:, robot_cfg.joint_ids] - robot.data.default_joint_pos[:, robot_cfg.joint_ids]), dim=1
    )
     # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离
    return (distance <= finish_place_threshold) * (joint_error)

def joint_default_pos_penalize_always(
    env: ManagerBasedRLEnv,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot", joint_names=".*"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """惩罚完成放置后关节恢复默认位置"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name)
    object: RigidObject = env.scene[object_cfg.name] 
    joint_error = torch.linalg.norm(
        (robot.data.joint_pos[:, robot_cfg.joint_ids] - robot.data.default_joint_pos[:, robot_cfg.joint_ids]), dim=1
    )
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :2] - object.data.root_pos_w[:, :2], dim=1) # 计算物体位置到期望目标位置的距离
    return (joint_error)


def joint_default_pos_reward(
    env: ManagerBasedRLEnv,
    std: float,
    finish_place_threshold: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot", joint_names=".*"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """奖励完成放置后关节恢复默认位置"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name)
    object: RigidObject = env.scene[object_cfg.name]
    joint_error = torch.linalg.norm(
        (robot.data.joint_pos[:, robot_cfg.joint_ids] - robot.data.default_joint_pos[:, robot_cfg.joint_ids]), dim=1
    )
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离
    distance_error = finish_place_threshold - distance
    reward = 1 - torch.tanh(joint_error / std)
    reward *= torch.clamp(distance_error, 0.0 , finish_place_threshold) / finish_place_threshold
    return reward

def open_gripper(
    env: ManagerBasedRLEnv,
    finish_place_threshold: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot", joint_names=""),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """奖励以使到达目标位置后松开夹爪"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name)
    object: RigidObject = env.scene[object_cfg.name]
    joint_error = torch.linalg.norm(
        (robot.data.joint_pos[:, robot_cfg.joint_ids] - robot.data.default_joint_pos[:, robot_cfg.joint_ids]), dim=1
    )
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :2] - object.data.root_pos_w[:, :2], dim=1) # 计算物体位置到期望目标位置的距离
    distance_error = finish_place_threshold - distance
    reward = torch.where(joint_error < 0.01, 1.0, 0.0)
    reward *= torch.clamp(distance_error, 0.0 , finish_place_threshold) / finish_place_threshold
    return reward

def height_penalize(
    env: ManagerBasedRLEnv,
    height_maximum: float, # 最多比目标点上方高多少
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """惩罚物体超过目标点上方太高"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name)
    object: RigidObject = env.scene[object_cfg.name]
    # compute the desired position in the world frame
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    return torch.clamp(object.data.root_pos_w[:, 2] - (des_pos_w[:, 2] + height_maximum), 0.0)

def object_goal_arrival(
    env:ManagerBasedRLEnv,
    finish_place_threshold: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """物体到达目标点后给予奖励"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name) 
    object: RigidObject = env.scene[object_cfg.name]
    # compute the desired position in the world frame 
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离
    return torch.where(distance < finish_place_threshold, 1.0, 0.0)

def object_velocity_penalize(
    env:ManagerBasedRLEnv,
    finish_place_threshold: float,
    command_name: str,
    robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
    object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
    """惩罚物体放置到目标位置后的速度"""
    robot: Articulation = env.scene[robot_cfg.name]
    command = env.command_manager.get_command(command_name) 
    object: RigidObject = env.scene[object_cfg.name]
    # compute the desired position in the world frame 
    des_pos_b = command[:, :3]
    des_pos_w, _ = combine_frame_transforms(robot.data.root_state_w[:, :3], robot.data.root_state_w[:, 3:7], des_pos_b)
    # distance of the end-effector to the object: (num_envs,)
    distance = torch.norm(des_pos_w[:, :3] - object.data.root_pos_w[:, :3], dim=1) # 计算物体位置到期望目标位置的距离
    object_velocity = torch.norm(object.data.root_lin_vel_w[:, :3], dim=1)
    return (object_velocity) * (distance < finish_place_threshold)