# vision_node.py 毕业版 v2.2
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from ultralytics import YOLO
import math
import os
from ament_index_python.packages import get_package_share_directory
from geometry_msgs.msg import Point, Vector3
from my_vision_interfaces.msg import TargetPose

def rvec_to_quaternion(rvec):
    angle = np.linalg.norm(rvec)
    if angle < 1e-9: return [0.0, 0.0, 0.0, 1.0]
    axis = rvec.flatten() / angle
    half_angle = angle / 2
    sin_half_angle = math.sin(half_angle)
    qx = axis[0] * sin_half_angle
    qy = axis[1] * sin_half_angle
    qz = axis[2] * sin_half_angle
    qw = math.cos(half_angle)
    return [qx, qy, qz, qw]

class VisionNode(Node):
    def __init__(self):
        super().__init__('vision_node')
        self.bridge = CvBridge()
        self.image_subscription = self.create_subscription(Image, '/image_raw', self.image_callback, 10)
        self.annotated_image_publisher = self.create_publisher(Image, '/image_annotated', 10)
        self.pose_publisher = self.create_publisher(TargetPose, '/target_pose', 10)
        
        self.STATE_SEARCHING = "SEARCHING"
        self.STATE_TRACKING = "TRACKING"
        self.current_state = self.STATE_SEARCHING
        
        self.get_logger().info('正在加载模型和标定数据...')
        package_share_directory = get_package_share_directory('my_vision_pkg')
        model_path = os.path.join(package_share_directory, 'best.pt')
        calibration_path = os.path.join(package_share_directory, 'calibration_data.npz')
        self.model = YOLO(model_path)
        with np.load(calibration_path) as data:
            self.camera_matrix = data['camera_matrix']
            self.dist_coeffs = data['dist_coeffs']
        
        self.OBJECT_WIDTH_MM = 62.0; self.OBJECT_HEIGHT_MM = 110.0
        self.object_points = np.array([[-self.OBJECT_WIDTH_MM/2, -self.OBJECT_HEIGHT_MM/2, 0], [self.OBJECT_WIDTH_MM/2, -self.OBJECT_HEIGHT_MM/2, 0], [self.OBJECT_WIDTH_MM/2, self.OBJECT_HEIGHT_MM/2, 0], [-self.OBJECT_WIDTH_MM/2, self.OBJECT_HEIGHT_MM/2, 0]], dtype=np.float32)
        self.axis_points = np.float32([[0,0,0], [50,0,0], [0,50,0], [0,0,-50]])
        
        self.tracker = None
        self.kalman = cv2.KalmanFilter(4, 2)
        self.kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]], np.float32)
        self.kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]], np.float32)
        self.kalman.processNoiseCov = np.eye(4, dtype=np.float32) * 0.03
        
        self.smoothed_w, self.smoothed_h = 0, 0
        self.ema_alpha = 0.3
        
        self.last_known_class_name = ""
        self.last_known_confidence = 0.0
        self.frame_count = 0
        self.RE_DETECT_INTERVAL = 30
        
        self.get_logger().info('视觉节点已启动！')

    def image_callback(self, msg):
        try:
            frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
        except CvBridgeError as e:
            self.get_logger().error(f'CV Bridge Error: {e}'); return

        self.frame_count += 1
        frame_h, frame_w = frame.shape[:2]
        margin = 15
        
        # --- 核心逻辑：YOLO监督的检测-跟踪状态机 ---
        
        # 1. 尝试使用跟踪器
        if self.current_state == self.STATE_TRACKING:
            success, bbox = self.tracker.update(frame)
            if success:
                x_t, y_t, w_t, h_t = map(int, bbox)
                if x_t < margin or y_t < margin or (x_t + w_t) > (frame_w - margin) or (y_t + h_t) > (frame_h - margin):
                    self.current_state = self.STATE_SEARCHING
                else:
                    bbox_measured = bbox
            else:
                self.current_state = self.STATE_SEARCHING
        
        # 2. 运行YOLO进行搜索或校准
        is_recalibrating = self.frame_count % self.RE_DETECT_INTERVAL == 0
        if self.current_state == self.STATE_SEARCHING or is_recalibrating:
            if self.current_state == self.STATE_SEARCHING:
                cv2.putText(frame, "STATE: SEARCHING", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # --- 在送入模型前，注意图像格式，此处训练使用BGR ---
            # rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            results = self.model(frame, verbose=False)

            best_box = None; max_conf = 0.0
            for r in results:
                for b in r.boxes:
                    if float(b.conf[0]) > max_conf and float(b.conf[0]) > 0.75:
                        max_conf=float(b.conf[0]); best_box=b
            
            if best_box is not None:
                x1,y1,x2,y2 = best_box.xyxy[0].cpu().numpy().astype(int)
                if x1 > margin and y1 > margin and x2 < (frame_w - margin) and y2 < (frame_h - margin):
                    bbox_yolo = (x1,y1,x2-x1,y2-y1)
                    self.tracker = cv2.TrackerCSRT_create(); self.tracker.init(frame, bbox_yolo)
                    self.kalman.statePost = np.array([x1+(x2-x1)/2, y1+(y2-y1)/2, 0,0], dtype=np.float32)
                    self.smoothed_w, self.smoothed_h = (x2-x1), (y2-y1)
                    self.current_state = self.STATE_TRACKING
                    self.last_known_class_name = self.model.names[int(best_box.cls[0])]
                    self.last_known_confidence = float(best_box.conf[0])
                    bbox_measured = bbox_yolo
                    self.get_logger().info("YOLO已初始化/校准跟踪器!")
            elif self.current_state == self.STATE_TRACKING:
                self.current_state = self.STATE_SEARCHING
                self.get_logger().info("YOLO校准失败，跟踪器可能已漂移，重置状态。")

        # 3. 如果当前帧有有效的目标框，则进行位姿解算和发布
        if self.current_state == self.STATE_TRACKING and 'bbox_measured' in locals():
            bbox = bbox_measured
            kalman_measurement = np.array([bbox[0]+bbox[2]/2, bbox[1]+bbox[3]/2], dtype=np.float32)
            self.kalman.correct(kalman_measurement); self.kalman.predict()
            state = self.kalman.statePost
            center_x_s, center_y_s = state[0], state[1]
            w_m, h_m = bbox[2], bbox[3]
            self.smoothed_w = self.ema_alpha*w_m + (1-self.ema_alpha)*self.smoothed_w
            self.smoothed_h = self.ema_alpha*h_m + (1-self.ema_alpha)*self.smoothed_h
            x_s,y_s,w_s,h_s = int(center_x_s-self.smoothed_w/2), int(center_y_s-self.smoothed_h/2), int(self.smoothed_w), int(self.smoothed_h)

            image_points = np.array([[x_s,y_s], [x_s+w_s,y_s], [x_s+w_s,y_s+h_s], [x_s,y_s+h_s]], dtype=np.float32)
            try:
                success_pnp, rvec, tvec = cv2.solvePnP(self.object_points, image_points, self.camera_matrix, self.dist_coeffs)
                if success_pnp:
                    pose_msg = TargetPose(); tvec_msg = Point(); rvec_msg = Vector3()
                    pose_msg.object_name = self.last_known_class_name
                    pose_msg.confidence = self.last_known_confidence
                    tvec_msg.x=tvec[0][0]/1000.0; tvec_msg.y=tvec[1][0]/1000.0; tvec_msg.z=tvec[2][0]/1000.0
                    rvec_msg.x=rvec[0][0]; rvec_msg.y=rvec[1][0]; rvec_msg.z=rvec[2][0]
                    pose_msg.tvec=tvec_msg; pose_msg.rvec=rvec_msg
                    self.pose_publisher.publish(pose_msg)
                    
                    cv2.rectangle(frame, (x_s, y_s), (x_s+w_s, y_s+h_s), (0, 255, 0), 2)
                    imgpts, _ = cv2.projectPoints(self.axis_points, rvec, tvec, self.camera_matrix, self.dist_coeffs)
                    if not np.isnan(imgpts).any():
                        origin=tuple(imgpts[0].ravel().astype(int)); cv2.line(frame,origin,tuple(imgpts[1].ravel().astype(int)),(0,0,255),3); cv2.line(frame,origin,tuple(imgpts[2].ravel().astype(int)),(0,255,0),3); cv2.line(frame,origin,tuple(imgpts[3].ravel().astype(int)),(255,0,0),3)
            except cv2.error: pass
        
        # 发布带标注的图像
        try:
            # --- 修正 2: 发布时，将 BGR 图像转换为 RGB，并使用 'rgb8' 编码 ---
            annotated_rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.annotated_image_publisher.publish(self.bridge.cv2_to_imgmsg(annotated_rgb_frame, 'rgb8'))
        except CvBridgeError as e:
            self.get_logger().error(f'发布图像时转换失败: {e}')

def main(args=None):
    rclpy.init(args=args)
    vision_node = VisionNode()
    rclpy.spin(vision_node)
    vision_node.destroy_node()
    rclpy.shutdown()

if __name__ == '__main__':
    main()
