#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2025/07/22 16:00
# @Author  : Yida Hao
# @File    : pred_joint_action_srv.py

import os
if os.environ.get('LEROBOT_ROS2_LEROBOT_INJECTION', 'false') == 'true':
    import sys
    import yaml
    from ament_index_python.packages import get_package_share_directory
    config_path = get_package_share_directory('policy_management') + '/config/paths.yaml'
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
        sys.path.insert(0, config['paths']['lerobot_deps_path']) if config['paths']['lerobot_deps_path'] not in sys.path else None
        sys.path.insert(0, config['paths']['lerobot_path']) if config['paths']['lerobot_path'] not in sys.path else None


from rclpy.node import Node
from pathlib import Path
from interfaces.srv import PredJointAction
from interfaces.msg import CamImages
from sensor_msgs.msg import JointState
from std_msgs.msg import Bool

from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.utils import build_inference_frame
from lerobot.policies.act.modeling_act import ACTPolicy

from torch import Tensor
import torch

from cv_bridge import CvBridge
import numpy as np
import lerobot

print(f"lerobot path: {lerobot.__file__}")

"""
# Action prediction service. given robot observations, predict joint actions.
"""

# TODO: 使用 ros2 args 封装
policy_path_act = Path("/home/ch3cooh/Workspace/TrainedModels/act_car_arm/pretrained_model")

SRV_NAME = 'pred_joint_action_service'
cvbridge = CvBridge()

class PredJointActionService(Node):
    def __init__(self):
        super().__init__(f'{SRV_NAME}_srv')
        policy_path = policy_path_act
        self.policy = ACTPolicy.from_pretrained(policy_path)
        self.preprocess, self.postprocess = make_pre_post_processors(self.policy.config, policy_path_act)

        self.get_logger().info(f"ACT om enabled: {getattr(self.policy.config, 'is_ascend_om_enabled', False)}")

        self.srv = self.create_service(PredJointAction, SRV_NAME, self.srv_callback)

        self.get_logger().info(f"{ACTPolicy.__name__} loaded from {policy_path}")
        self.get_logger().info(f"input features: {self.policy.config.input_features}")
        self.get_logger().info(f"output features: {self.policy.config.output_features}")
        self.get_logger().info("service ready")
    
    def srv_callback(self, request, response):
        self.pred_action(request, response)
        self.get_logger().info(f"service called, predicted action: {response.joint_action.position}")
        return response

    def pred_action(self, request, response):
        self.get_logger().info("pred_action called")

        obs = {}

        if request.is_curr_pred_need_obs.data:
            obs = self.make_obs(request.joint_state, request.cam_images)

        action = self.policy.select_action(obs)
        action = self.postprocess(action)

        # Tensor -> sensor_msgs
        response.joint_action = JointState()
        response.joint_action.name = request.joint_state.name
        response.is_next_pred_need_obs = Bool()
        response.is_next_pred_need_obs.data = self.policy.is_next_pred_need_obs()
        response.joint_action.position = action.detach().numpy().tolist()

        self.get_logger().info(f'output action: {response.joint_action}')
        
        return response

    def make_obs(self, states: JointState, cam_imgs: CamImages) -> dict[str, Tensor]:
        obs = {}

        # JointState -> Tensor
        for i, name in enumerate(states.name):
            obs[name] = torch.from_numpy(np.array(states.position[i], dtype=np.float32))

        # Camera -> Tensor
        for i, name in enumerate(cam_imgs.name):
            image = cvbridge.imgmsg_to_cv2(cam_imgs.image[i])
            image = np.array(image, dtype=np.uint8)
            obs[name] = torch.from_numpy(image)

        action_features = hw_to_dataset_features(self.policy.config.output_features, "action")
        obs_features = hw_to_dataset_features(self.policy.config.input_features, "observation")
        dataset_features = {**action_features, **obs_features}
        obs_frame = build_inference_frame(
            observation=obs, ds_features=dataset_features, device=torch.device('cuda') # TODO: device 支持配置
        )
        obs = self.preprocess(obs_frame)

        return obs
    