#!/usr/bin/env python3

"""
Base detection camera tf publisher for ROS2
直接通过pyrealsense2初始化相机, 包含对Camera初始化和ideal_camera TF的发布
所有的Apriltag/YOLO Detection Node都继承自这个类
"""

import rclpy
import std_msgs.msg
import cv2
import numpy as np
from rclpy.node import Node
from cv_bridge import CvBridge
from tf2_ros import TransformBroadcaster
from scipy.spatial.transform import Rotation
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import Image, PointCloud2, PointField
from sensor_msgs_py import point_cloud2 as pc2

from manipulation.scripts.constants.camera import CAMERA_CONFIG
from manipulation.scripts.detection.cameras import BaseCamera, D435Camera, DummyCamera
from manipulation.scripts.detection.commans.utils import fill_holes_inpaint

AXIS_COLORS = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]  # BGR colors for x, y, z axes

class BaseDetectionNode(Node):
    """ Base class for detection nodes that handle camera initialization and TF publishing.
    Subclasses should do:
    1. Use `pub_detected_img` to publish detected images and implement their own detection logic.
    2. Update `targets_list` to store detected targets, which is a list of dictionaries with keys
    ```
    {
        'child_frame_id': str,  # Frame ID of the detected object
        'R': np.ndarray,  # Rotation matrix of the detected object in camera frame
        't': np.ndarray,  # Translation vector of the detected object in camera frame
    }
    ```
    3. Implement `detect` method to perform detection logic and update `targets_list`.
    """
    def __init__(self, node_name='base_detection_node'):
        super().__init__(node_name)
        self.declare_parameter('camera_class', 'D435Camera')  # Options: ['D435Camera, DummyCamera']
        self.declare_parameter('camera_config', 'rgbd')  # Options: ['rgb', 'rgbd']
        self.declare_parameter('publish_tf_topic', True)  # Send to ROS1
        self.declare_parameter('publish_tfbroadcaster', True)  # Show on ROS2
        self.declare_parameter('publish_rgb_image', True)  # Publish RGB image to /camera/color/image_raw
        self.declare_parameter('publish_depth_image', True) # Publish Depth image to /camera/depth/image_raw
        self.declare_parameter('publish_point_cloud', False) # Publish PointCloud2 to /camera/depth/points
        camera_class = self.get_parameter('camera_class').get_parameter_value().string_value
        camera_cfg_name = self.get_parameter('camera_config').get_parameter_value().string_value
        self.publish_tf_topic = self.get_parameter('publish_tf_topic').get_parameter_value().bool_value
        self.publish_tfbroadcaster = self.get_parameter('publish_tfbroadcaster').get_parameter_value().bool_value
        self.publish_rgb_image = self.get_parameter('publish_rgb_image').get_parameter_value().bool_value
        self.publish_depth_image = self.get_parameter('publish_depth_image').get_parameter_value().bool_value
        self.publish_point_cloud = self.get_parameter('publish_point_cloud').get_parameter_value().bool_value

        if camera_cfg_name not in CAMERA_CONFIG:
            raise ValueError(f"Camera config '{camera_cfg_name}' is not defined, use {CAMERA_CONFIG.keys()}")
        camera_cfg = CAMERA_CONFIG[camera_cfg_name]
        if camera_class not in globals():
            raise ValueError(f"Camera class '{camera_class}' is not defined, use [D435Camera, DummyCamera].")
        self.camera: BaseCamera = eval(camera_class)(
            **camera_cfg,
            node=self
        )

        self.cv_bridge = CvBridge()
        self.tf_broadcaster = TransformBroadcaster(self)
        self.pub_tf = self.create_publisher(TransformStamped, 'camera_detection_tf', 1)
        self.pub_rgb_img = self.create_publisher(Image, 'camera/color/image_raw', 1)
        self.pub_depth_img = self.create_publisher(Image, 'camera/depth/image_raw', 1)
        self.pub_point_cloud = self.create_publisher(PointCloud2, 'camera/depth/points', 1)

        # Use / Update by subclass
        self.pub_detected_img = self.create_publisher(Image, 'detected_image', 1)
        self.targets_list = []
        
        self.get_logger().info(f"CameraTFPublisher initialized with camera class: {camera_class}, config: {camera_cfg_name}")

    def publish_ideal_camera_tf(self):
        """ Publish the ideal camera transform. """
        tf_msg = TransformStamped()
        tf_msg.header.stamp = self.get_clock().now().to_msg()
        tf_msg.header.frame_id = 'camera'
        tf_msg.child_frame_id = f'ideal_camera'

        rot = Rotation.from_euler('xyz', [-90, 0, -90], degrees=True)
        q = rot.as_quat()
        tf_msg = self.build_tf_msg_from_quat_transform(
            frame_id='camera',
            child_frame_id='ideal_camera',
            quaternion=q,
            translation=[0.0, 0.0, 0.0]
        )
        if self.publish_tf_topic:
            self.pub_tf.publish(tf_msg)
        if self.publish_tfbroadcaster:
            self.tf_broadcaster.sendTransform(tf_msg)
    
    def draw_axis_in_image(self, img, R, t, text=None):
        """ Draw the coordinate frame in the image.
        Args:
            img (np.ndarray): The image to draw on.
            R (np.ndarray): The rotation matrix of the detected object in camera frame.
            t (np.ndarray): The translation vector of the detected object in camera frame.
        Returns:
            img (np.ndarray): The image with the coordinate frame drawn on it.
        """
        axis_world_pos = np.array([[0,0,0], [0.05,0,0], [0,0.05,0], [0,0,0.05]], np.float32)
        axis_img_pos = self.camera.world_pos2img_pos(axis_world_pos, R, t)
        origin_img_pos = axis_img_pos[0]
        for idx, color in enumerate(AXIS_COLORS):
            img = cv2.line(img, origin_img_pos, axis_img_pos[idx+1], color, thickness=2)
        img = cv2.circle(img, origin_img_pos, radius=5, color=(255,255,255), thickness=-1)
        if text:
            cv2.putText(img, text, (origin_img_pos[0] + 10, origin_img_pos[1] + 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
        return img
    
    def publish_tf_from_detect_info(self, child_frame_id, info):
        """ Publish the TF from the detected info.
        Args:
            child_frame_id (str): The child frame ID for the TF.
            info (dict): The detected tag information containing 'R' and 't'.
        Returns:
            img (np.ndarray): The image with the frame drawn on it.
        """
        R, t = info['R'], info['t']

        rot = Rotation.from_matrix(R)
        q = rot.as_quat()

        tf_msg = self.build_tf_msg_from_quat_transform(
            frame_id='ideal_camera',
            child_frame_id=child_frame_id,
            quaternion=q,
            translation=t.flatten()
        )
        if self.publish_tf_topic:
            self.pub_tf.publish(tf_msg)
        if self.publish_tfbroadcaster:
            self.tf_broadcaster.sendTransform(tf_msg)
    
    def get_frame(self):
        rgb_img, depth_img = self.camera.get_frame()
        frame_id = 'camera'
        now = self.get_clock().now().to_msg()

        if self.publish_rgb_image:
            rgb_msg = self.cv_bridge.cv2_to_imgmsg(rgb_img, encoding='bgr8')
            rgb_msg.header.stamp = now
            rgb_msg.header.frame_id = frame_id
            self.pub_rgb_img.publish(rgb_msg)
        
        if depth_img is not None:
            # depth_img = fill_holes_inpaint(depth_img)  # 使用原图给graspnet作为输入效果好些
            if self.publish_depth_image:
                depth_msg = self.cv_bridge.cv2_to_imgmsg(depth_img, encoding='16UC1')
                depth_msg.header.stamp = now
                depth_msg.header.frame_id = frame_id
                self.pub_depth_img.publish(depth_msg)
            
            if self.publish_point_cloud:
                pc_msg = self._create_point_cloud_from_rgbd(rgb_img, depth_img, now, frame_id)
                self.pub_point_cloud.publish(pc_msg)
                
        return rgb_img, depth_img
    
    def _create_point_cloud_from_rgbd(self, rgb_img, depth_img, stamp, frame_id):
        """ Create a PointCloud2 message from RGB and Depth images. """
        xs_pos = np.array(np.meshgrid(np.arange(self.camera.width), np.arange(self.camera.height)), dtype=np.int32)
        xs_pos = xs_pos.reshape(2, -1).T  # (N, 2)
        xs_camera = self.camera.img_pos2camera_pos(xs_pos, depth_img)  # (N, 3)
        colors = rgb_img[xs_pos[:, 1], xs_pos[:, 0]]  # (N, 3)
        fields = [
            PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
            PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
            PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
            PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1),
        ]
        points = []
        for i in range(colors.shape[0]):
            b, g, r = colors[i]
            rgb = (int(r) << 16) | (int(g) << 8) | int(b)
            points.append([xs_camera[i, 0], xs_camera[i, 1], xs_camera[i, 2], rgb])
        header = std_msgs.msg.Header(stamp=stamp, frame_id=frame_id)
        point_cloud_msg = pc2.create_cloud(header, fields, points)
        return point_cloud_msg

    def build_tf_msg_from_quat_transform(self, frame_id, child_frame_id, quaternion, translation):
        tf_msg = TransformStamped()
        tf_msg.header.stamp = self.get_clock().now().to_msg()
        tf_msg.header.frame_id = frame_id
        tf_msg.child_frame_id = child_frame_id
        tf_msg.transform.translation.x = float(translation[0])
        tf_msg.transform.translation.y = float(translation[1])
        tf_msg.transform.translation.z = float(translation[2])
        tf_msg.transform.rotation.x = float(quaternion[0])
        tf_msg.transform.rotation.y = float(quaternion[1])
        tf_msg.transform.rotation.z = float(quaternion[2])
        tf_msg.transform.rotation.w = float(quaternion[3])
        return tf_msg  

    def detect(self):
        """ This method should be implemented by subclasses to perform detection logic.
        It should update `self.targets_list` with detected targets.
        """
        raise NotImplementedError("Subclasses must implement the detect method.")

if __name__ == '__main__':
    rclpy.init()
    node = BaseDetectionNode()
    rclpy.spin(node)
    node.destroy_node()
    rclpy.shutdown()
