#!/usr/bin/env python3

import rospy
import numpy as np
import math
import tf.transformations as tf

from geometry_msgs.msg import PoseStamped
from vision_msgs.msg import Detection2D, Detection2DArray, BoundingBox2D  # pip install ros-noetic-vision-msgs
from std_msgs.msg import Header

# Import calibration utility
from calibrate_parser import initialize_calibration
import os


class MockYOLO:
    def __init__(self):
        rospy.init_node('mock_yolo_node', anonymous=True)
        rospy.loginfo("Mock YOLO Node Initializing...")

        self.rate = rospy.Rate(10)  # YOLO detection rate

        # --- Parameters ---
        script_dir = rospy.get_param('~script_dir')
        calibration_file_path = os.path.join(script_dir, 'calibrate.txt')
        self.K_matrix, self.image_width, self.image_height = initialize_calibration(calibration_file_path)
        if self.K_matrix is None or self.image_width == 0 or self.image_height == 0:
            rospy.logerr("Failed to load camera calibration for Mock YOLO! Shutting down.")
            rospy.signal_shutdown("Camera calibration error.")
            return

        # Simulated Target in World (NED) Coordinates
        self.target_world_pos = np.array([5.0, 0.0, -1.0])  # Target is 5m North, 0m East, 1m BELOW origin (Z-down)
        self.target_real_size = 0.5  # meters, approximate real-world size of the target for bbox estimation

        self.drone_current_pose = np.zeros(6)  # [x, y, z, roll, pitch, yaw]

        # --- Publishers ---
        self.detection_pub = rospy.Publisher('/yolo/detections', Detection2DArray, queue_size=1)

        # --- Subscribers ---
        rospy.Subscriber('/uav1/mavros/local_position/pose', PoseStamped, self._drone_pose_cb)

        rospy.loginfo("Mock YOLO Node Ready.")

    def _drone_pose_cb(self, msg: PoseStamped):
        self.drone_current_pose[0] = msg.pose.position.x
        self.drone_current_pose[1] = msg.pose.position.y
        self.drone_current_pose[2] = msg.pose.position.z  # Z-down

        q = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]
        (self.drone_current_pose[3], self.drone_current_pose[4], self.drone_current_pose[5]) = tf.euler_from_quaternion(
            q)

    def _world_to_image(self, world_point: np.ndarray, drone_pose: np.ndarray):
        """
        Projects a world point onto the image plane given drone's pose and camera intrinsics.
        Assumes camera is rigidly attached to drone, looking forward (X-axis of drone)
        and leveled (no roll/pitch offset from drone).
        For IBVS with gimbal, this function needs to also account for gimbal angles.
        For simplicity, let's assume a fixed camera on drone for this mock YOLO.
        """
        drone_pos = drone_pose[0:3]
        drone_roll, drone_pitch, drone_yaw = drone_pose[3:6]

        # 1. Transform world point to drone's body frame
        # World to Body Rotation Matrix (from NED to Body)
        # R_body_ned = R_yaw * R_pitch * R_roll (inverse of NED to body transformation)
        # So we need R_ned_body which is inverse of R_body_ned
        R_ned_body = tf.quaternion_matrix(tf.quaternion_from_euler(drone_roll, drone_pitch, drone_yaw))[:3, :3]
        R_body_ned = R_ned_body.T  # Transpose is inverse for rotation matrix

        # Vector from drone to world point in world (NED) frame
        vec_world = world_point - drone_pos

        # Convert world vector to drone body frame
        vec_body = R_body_ned @ vec_world

        # In a typical camera setup:
        # Camera X axis maps to drone's body X (forward)
        # Camera Y axis maps to drone's body Y (right)
        # Camera Z axis maps to drone's body -Z (down) -> NO, this is wrong for camera frame.
        # Standard camera frame: X=right, Y=down, Z=forward.
        # If drone is X-forward, Y-right, Z-down (NED body frame), then:
        # Camera X = Drone Y (right)
        # Camera Y = Drone Z (down)
        # Camera Z = Drone X (forward)

        # So, the point in camera frame (p_cam) from point in body frame (vec_body):
        # p_cam_x = vec_body_y
        # p_cam_y = vec_body_z
        # p_cam_z = vec_body_x

        # Let's verify standard conventions. PX4 body frame: X-forward, Y-right, Z-down.
        # Standard camera frame: X-right, Y-down, Z-forward.
        # This means:
        # Camera X points along Drone Y
        # Camera Y points along Drone Z
        # Camera Z points along Drone X

        # Correct mapping from drone body coords (x_b, y_b, z_b) to camera coords (x_c, y_c, z_c):
        p_cam = np.array([vec_body[1], vec_body[2], vec_body[0]])  # [y_b, z_b, x_b]

        # Ensure point is in front of camera (Z > 0 in camera frame)
        if p_cam[2] <= 0:
            return None, None  # Point is behind or at camera plane

        # 2. Project to image plane using intrinsic matrix
        fx, fy = self.K_matrix[0, 0], self.K_matrix[1, 1]
        cx, cy = self.K_matrix[0, 2], self.K_matrix[1, 2]

        u = fx * (p_cam[0] / p_cam[2]) + cx
        v = fy * (p_cam[1] / p_cam[2]) + cy

        # Check if projected point is within image boundaries
        if 0 <= u < self.image_width and 0 <= v < self.image_height:
            # Estimate bounding box size based on distance (inverse perspective)
            # A fixed real-world size object will appear smaller further away
            # Perceived size is inversely proportional to distance (depth Z_c)
            bbox_size_pixels = (self.target_real_size * fx) / p_cam[2]  # Simplified model
            bbox_size_pixels = max(10,
                                   min(bbox_size_pixels, min(self.image_width, self.image_height) * 0.8))  # Clamp size

            return (u, v, bbox_size_pixels), p_cam[2]  # Return (center_u, center_v, size) and depth
        else:
            return None, None  # Point is outside image bounds

    def run(self):
        rospy.loginfo("Mock YOLO Node Running.")
        while not rospy.is_shutdown():
            detection_array_msg = Detection2DArray()
            detection_array_msg.header.stamp = rospy.Time.now()
            detection_array_msg.header.frame_id = "camera_frame"  # Or drone's body frame

            projected_info, depth = self._world_to_image(self.target_world_pos, self.drone_current_pose)

            if projected_info is not None:
                u, v, size = projected_info

                detection = Detection2D()
                detection.header = detection_array_msg.header

                bbox = BoundingBox2D()
                bbox.center.x = u
                bbox.center.y = v
                bbox.size_x = size
                bbox.size_y = size  # Assuming square bounding box

                detection.bbox = bbox
                detection.id = "target"
                detection.score = 0.95  # High confidence

                detection_array_msg.detections.append(detection)
                rospy.loginfo_throttle(1.0,
                                       f"Mock YOLO: Detected target at U={u:.1f}, V={v:.1f}, Size={size:.1f}, Depth={depth:.1f}m")
            else:
                rospy.loginfo_throttle(1.0, "Mock YOLO: Target not in view.")

            self.detection_pub.publish(detection_array_msg)
            self.rate.sleep()


if __name__ == '__main__':
    try:
        mock_yolo = MockYOLO()
        mock_yolo.run()
    except rospy.ROSInterruptException:
        pass

