import cv2
import argparse
import numpy as np
import threading
import queue
import time
import copy
import os
import sys

# 动态目标跟随，根据单目视觉AI推理结果，更新动态跟随目标点

import math
from geometry_msgs.msg import PoseStamped, Quaternion
from rclpy.duration import Duration
# from nav2_simple_commander.robot_navigator import BasicNavigator, TaskResult
from jobot_ai_msgs.msg import DetectionResult, DetectionResultArray
import tf2_ros
# import tf_transformations
from visualization_msgs.msg import Marker


# ros2
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge
from std_msgs.msg import Int32
from rclpy.executors import MultiThreadedExecutor
from std_srvs.srv import SetBool

from ament_index_python.packages import get_package_share_directory
try:
    package_share_directory = get_package_share_directory('jobot_mono_follow')
except:
    package_share_directory = ''
    print('NO INSTALL MODE')

current_file_path = os.path.abspath(__file__)
current_dir = os.path.dirname(current_file_path)
sys.path.append(current_dir)
# ours
from spacemit_cv import AGVDetection

class GoalUpdateFollowPoint(Node):
    def __init__(self):
        super().__init__('goal_update_follow_point_node')

        self.srv = self.create_service(SetBool, 'toggle_follow', self.callback)
        self.ai_enabled = False  # 初始状态
        self.get_logger().info('AI 跟踪控制服务已启动, 当前状态为暂停')
        self.lock = threading.Lock() # 锁，用于线程数据同步

        self.declare_parameter('video_device', '/dev/video20')
        self.declare_parameter('publish_result_img', False)

        self.video_device = self.get_parameter('video_device').get_parameter_value().string_value
        self.publish_result_img = self.get_parameter('publish_result_img').get_parameter_value().bool_value

        self.image_publisher = self.create_publisher(Image, 'detection_image', 30)
        self.bridge = CvBridge()

        self.goal_update_publisher_ = self.create_publisher(PoseStamped, '/goal_update', 10)
        self.goal_update_marker_publisher = self.create_publisher(Marker, 'visualization_marker', 10)

        # 参数        
        self.pixel_w = 640 # 像素宽
        self.pixel_h = 480 # 像素高
        self.person_h = 1.7 # 身高
        self.distance = 0.8 # 标定距离
        self.width = 0.6 # 标定宽度
        self.theta_h = 0.35 # 仰角高度
        self.theta_d = 0.76 # 仰角距离

        self.tf_buffer = tf2_ros.Buffer()
        self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self)
        self.timer = self.create_timer(1.0, self.timer_callback)
        self.goal_update_pose = PoseStamped()
        self.goal_update_pose.pose.position.x = 0.0
        self.goal_update_pose.pose.position.y = 0.0
        self.goal_update_pose.pose.orientation = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)

        self.goal_update_pose_temp = PoseStamped()
        self.goal_update_pose_temp.pose.position.x = 0.0
        self.goal_update_pose_temp.pose.position.y = 0.0
        self.goal_update_pose_temp.pose.orientation = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)


    def callback(self, request, response):
        with self.lock:
            self.ai_enabled = request.data
        if self.ai_enabled:
            msg = 'AI 跟踪模块已开启'
        else:
            msg = 'AI 跟踪模块已关闭'

        self.get_logger().info(f'收到请求: data={request.data} -> {msg}')
        response.success = True
        response.message = msg
        return response

    def publish_image(self, frame):
        """发布 ROS2 图像消息"""
        msg = self.bridge.cv2_to_imgmsg(frame, encoding='bgr8')
        self.image_publisher.publish(msg)
    
    def check_tolerance(self, pose1, pose2):
        """
        检测目标位姿是否满足更新阈值
        """
        _, _, yaw1 = self.quaternion_to_euler(
            Quaternion(x=pose1.pose.orientation.x, y=pose1.pose.orientation.y, z=pose1.pose.orientation.z, w=pose1.pose.orientation.w)
        )
        _, _, yaw2 = self.quaternion_to_euler(
            Quaternion(x=pose2.pose.orientation.x, y=pose2.pose.orientation.y, z=pose2.pose.orientation.z, w=pose2.pose.orientation.w)
        )
        d_x = abs(pose1.pose.position.x - pose2.pose.position.x)
        d_y = abs(pose1.pose.position.y - pose2.pose.position.y)
        d_theta = abs(yaw1 - yaw2) # 0.4

        if (d_x > 0.2 and d_x < 2.0) or (d_y > 0.2 and d_y < 2.0) or (d_theta > 0.2 and d_theta < 1.0):
            return True        
        return False

    def timer_callback(self):
        # 发布目标坐标位置
        if self.goal_update_pose_temp is not None and self.goal_update_pose_temp!= self.goal_update_pose:
        # if self.goal_update_pose_temp is not None:
            # if self.check_tolerance(self.goal_update_pose, self.goal_update_pose_temp):
            self.goal_update_pose = self.goal_update_pose_temp
            # 发布目标点可视化
            marker = Marker()
            marker.header.frame_id = 'map'
            marker.header.stamp = self.get_clock().now().to_msg()
            marker.ns = 'goal_update_marker'
            marker.id = 0
            marker.type = marker.SPHERE
            marker.action = marker.ADD
            marker.pose = self.goal_update_pose.pose
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 1.0
            marker.color.r = 1.0
            marker.color.g = 0.0
            marker.color.b = 0.0
            self.goal_update_marker_publisher.publish(marker)
            # 更新时间戳
            self.goal_update_pose.header.stamp = self.get_clock().now().to_msg()
            self.goal_update_publisher_.publish(self.goal_update_pose)
            self.get_logger().info(f'更新目标点: x={self.goal_update_pose.pose.position.x}, y={self.goal_update_pose.pose.position.y} yaw={self.goal_update_pose.pose.orientation.z}')
        else:
            self.get_logger().info("等待更新目点")

    def inference_result_callback(self, msg):
        for result in msg.results:
            if result.label == 'person'and result.conf > 0.7:
                # self.get_logger().info(f'******Received inference result: {result.label}******')
                # 更新目标坐标位置
                self.goal_update_pose_temp = self.calculate_goal_pose(result.x_min, result.y_min, result.width, result.height)                

    def deg360_to_rad_pi(self, angle_deg):
        """
        将 0~360 度（逆时针为正）转换为 -π~π 弧度
        """
        angle_deg = angle_deg % 360  # 保证角度在 0~360 之间
        if angle_deg > 180:
            angle_deg -= 360  # 转换为 [-180, 180]
        angle_rad = math.radians(angle_deg)
        return angle_rad
    
    def normalize_angle(self, angle):
        """将任意角度归一化到 [-pi, pi)"""
        return (angle + math.pi) % (2 * math.pi) - math.pi
    
    def euler_to_quaternion(self, roll, pitch, yaw):
        """
        将欧拉角转换为四元数
        """
        q = Quaternion()
        cy = math.cos(yaw * 0.5)
        sy = math.sin(yaw * 0.5)
        cp = math.cos(pitch * 0.5)
        sp = math.sin(pitch * 0.5)
        cr = math.cos(roll * 0.5)
        sr = math.sin(roll * 0.5)
        q.w = cr * cp * cy + sr * sp * sy
        q.x = sr * cp * cy - cr * sp * sy
        q.y = cr * sp * cy + sr * cp * sy
        q.z = cr * cp * sy - sr * sp * cy
        return q
            
    def quaternion_to_euler(self, q):
        """
        将四元数转换为欧拉角 (roll, pitch, yaw)
        """
        # 归一化四元数
        norm = math.sqrt(q.w**2 + q.x**2 + q.y**2 + q.z**2)
        w = q.w / norm
        x = q.x / norm
        y = q.y / norm
        z = q.z / norm

        # 计算yaw (z-axis rotation)
        siny_cosp = 2 * (w * z + x * y)
        cosy_cosp = 1 - 2 * (y**2 + z**2)
        yaw = math.atan2(siny_cosp, cosy_cosp)

        return 0.0, 0.0, yaw

    def calculate_goal_pose(self, x_min, y_min, width, height):
        """
        更新目标点位姿
        """
        # # 计算目标距离
        # # dis = self.distance * 480 / height - self.distance
        # k = height / 480
        # dis = (1.88 * (1 - k)) / (0.41 * k + 0.65)
        # # 计算目标角度
        # e = 320 - x_min - width / 2 
        # e = e / 320 * self.width
        # angle_rad = math.atan2(e, self.distance + dis)
        # # angle_rad = self.deg360_to_rad_pi(angle_rad)

        theta = math.atan2(self.theta_h , self.theta_d) # 下视线仰角
        alpha = math.atan2(self.person_h, self.distance) # 上视线仰角
        gamma = alpha - theta # 视场角
        beta = (math.pi - gamma) / 2 # 视野成像面夹角
        h1 = self.distance * math.tan(theta) # 辅助高度计算
        h2 = self.person_h - h1
        s1 = self.distance / math.cos(theta) + h2 * math.sin(theta) + h2 * math.cos(theta) / math.tan(beta) # 辅助距离计算
        k1 = height / self.pixel_h # 辅助比例计算
        k2 = math.sin(beta - theta) / math.sin(math.pi - beta)
        k3 = 2 * self.width / self.pixel_w

        # 计算目标距离
        dis = s1 * h2 * (1 - k1) / (k1 * k2 * h2 + math.tan(theta) * s1)
        # print(f"theta:{theta}, alpha:{alpha}, gamma:{gamma}, beta:{beta}, h1:{h1}, h2:{h2}, s1:{s1}, k1:{k1}, k2:{k2}, k3:{k3}, dis:{dis}")

        # 计算目标角度
        e = self.pixel_w / 2 - x_min - width / 2 
        e = e * k3
        angle_rad = math.atan2(e, self.distance + dis)

        # # 完整人像判断
        # k = height / width 
        # # print(f"完整人像判断，标准比例{1.7/0.5}，实际比例{k}")
        # if k < 1.8:
        #     return None

        # 计算目标坐标
        now = rclpy.time.Time()
        try:
            trans = self.tf_buffer.lookup_transform('map', 'base_footprint', now) 
        except Exception as e:
            self.get_logger().warn(f"TF transform failed: {e}")
            return
        current_x = trans.transform.translation.x
        current_y = trans.transform.translation.y
        orientation_q = trans.transform.rotation
        
        _, _, yaw = self.quaternion_to_euler(
            Quaternion(x=orientation_q.x, 
               y=orientation_q.y, 
               z=orientation_q.z, 
               w=orientation_q.w)
        )
        # print(f"current_x:{current_x}, current_y:{current_y}")

        angle_rad = self.normalize_angle(angle_rad + yaw)

        target_x = current_x + (dis + self.distance) * math.cos(angle_rad)
        target_y = current_y + (dis + self.distance) * math.sin(angle_rad)

        # print(f"target_x:{target_x}, target_y:{target_y}")

        goal_update_pose = PoseStamped()
        goal_update_pose.header.stamp = self.get_clock().now().to_msg()
        goal_update_pose.header.frame_id = 'map'
        goal_update_pose.pose.position.x = target_x
        goal_update_pose.pose.position.y = target_y
        goal_update_pose.pose.position.z = 0.0

        quat = self.euler_to_quaternion(0, 0, angle_rad)
        goal_update_pose.pose.orientation = quat

        return goal_update_pose



class MyDetectionThread(threading.Thread):
    def __init__(self, result_queue, model_path, label_path, follow_control:GoalUpdateFollowPoint, publish_result_img, video_device):
        threading.Thread.__init__(self)
        self.result_queue = result_queue
        self.detector = AGVDetection(model_path, label_path)
        self.video_devices = video_device
        self.publish_result_img = publish_result_img
        try:
            self.cap = cv2.VideoCapture(self.video_devices, cv2.CAP_V4L2)  
        # 尝试从 /dev/ 中自动获取可用的视频设备
        except:
            self.cap = None
            video_devices = [f for f in os.listdir('/dev/') if f.startswith('video')]

            for device in video_devices:
                device_path = os.path.join('/dev/', device)
                cap = cv2.VideoCapture(device_path, cv2.CAP_V4L2)
                if cap.isOpened():  # 检查设备是否成功打开
                    self.cap = cap
                    print(f"成功打开设备: {device_path}")
                    self.video_devices = device_path
                    break
                cap.release()  # 释放不可用的设备

            if self.cap is None:
                raise RuntimeError("未找到可用的摄像头设备")

        # 设置分辨率（宽度和高度）
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # 设置宽度
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 设置高度

        # self.cap.release()
        self.running = True
        self.follow_control = follow_control

    def run(self):
        while self.running:
            follow_me_flag = self.follow_control.ai_enabled
            if not follow_me_flag:
                time.sleep(0.03)
                continue

            if not self.cap.isOpened():
                self.cap.open(self.video_devices)  # 或缓存的设备路径

            ret, frame = self.cap.read()
            # print(frame.shape)
            if ret:
                # t1 = time.time()
                detections = self.detector.infer(frame)
                # print(f"推理耗时: {time.time() - t1} s")
                if detections:
                    height, width, _ = frame.shape
                    center_x = width // 2
                    center_y = height // 2
                    min_distance = float('inf')
                    closest_box = None
                    for det in detections:
                        # print(det)
                        x1, y1, x2, y2 = det
                        center = ((x1 + x2) // 2, (y1 + y2) // 2)
                        distance = ((center[0] - center_x) ** 2 + (center[1] - center_y) ** 2) ** 0.5
                        if distance < min_distance:
                            min_distance = distance
                            closest_box = det
                        
                    if closest_box:
                        self.result_queue.put(closest_box)

                        if self.publish_result_img:
                            x1, y1, x2, y2 = closest_box
                            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 5)
                
                if self.publish_result_img:
                    self.follow_control.publish_image(frame)
            else:
                break

    def stop(self):
        self.running = False
        self.cap.release()

def run_executor(executor):
    executor.spin()  # 运行 ROS 2 事件循环（不会阻塞主线程）

def main():
    rclpy.init()

    # 跟踪开关变量服务器
    follow_control = GoalUpdateFollowPoint()

    # ros2 多线程管理
    executor = MultiThreadedExecutor()
    executor.add_node(follow_control)

    executor_thread = threading.Thread(target=run_executor, args=(executor,), daemon=True)
    executor_thread.start()
   
    # 目标检测线程
    result_queue = queue.Queue(maxsize=2)
    model_path = os.path.join(package_share_directory, 'spacemit_cv/model/yolov8n.q.onnx')
    label_path = os.path.join(package_share_directory, 'spacemit_cv/data/label.txt')

    # 获取参数
    publish_result_img = follow_control.publish_result_img
    video_device = follow_control.video_device

    detection_thread = MyDetectionThread(result_queue, model_path, label_path, follow_control, publish_result_img, video_device)
    detection_thread.start()

    # 主线程，执行跟踪逻辑
    try:
        count = 0
        start_time = time.time()
        closest_box = []
        follow_me_flag = 0
        cmd_x_z = [0.0, 0.0]
        last_detection_time = time.time()

        linear_x_set = follow_control.linear_x_set
        angular_z_set = follow_control.angular_z_set

        while rclpy.ok():
            
            with follow_control.lock:
                follow_me_flag = follow_control.ai_enabled

            # 更新框
            if not result_queue.empty():
                closest_box = copy.deepcopy(result_queue.get())
                last_detection_time = time.time()  # 更新检测时间
                # print(f"最靠近摄像头中心的检测框: {closest_box}")

            # 是否开启跟随
            if not follow_me_flag:
                if abs(cmd_x_z[0]-0.0) >= 0.01 or abs(cmd_x_z[1]-0.0) >= 0.01:
                    cmd_x_z = [0.0, 0.0]
                    for i in range(0, 3):
                        follow_control.publish_velocity(0.0, 0.0)  # 停止小车
                        time.sleep(0.01)
                
                time.sleep(0.02)
                continue

            # 是否超过3秒没有检测到目标
            time_since_last_detection = time.time() - last_detection_time
            if time_since_last_detection > 4.0:
                if abs(cmd_x_z[0]-0.0) >= 0.01 or abs(cmd_x_z[1]-0.0) >= 0.01:
                    cmd_x_z = [0.0, 0.0]
                    for i in range(0, 3):
                        follow_control.publish_velocity(0.0, 0.0)
                        time.sleep(0.01)
                time.sleep(0.02)
                continue
            
            # 始终跟踪上一次框
            cmd_x_z = [0.0, 0.0]
            if len(closest_box) == 4:
                x1, y1, x2, y2 = closest_box
                center_x = (x1 + x2) / 2
                # print(f"中心位置：{center_x}, 顶部位置：{y1}")

                # 速度决策
                allowable_deviation = 100.0
                diff = center_x - 320.0
                if diff <= -allowable_deviation:
                    angular_z = angular_z_set
                    linear_x = 0.0
                elif diff >= allowable_deviation:
                    angular_z = -1.0 * angular_z_set
                    linear_x = 0.0
                else:
                    angular_z = 0.0

                    # 向前移动决策
                    if y1 > 10:
                        linear_x = linear_x_set
                    else:
                        linear_x = 0.0

                # 更新速度
                cmd_x_z = [linear_x, angular_z]


            time.sleep(0.003)
                
    except KeyboardInterrupt:
        print("停止检测线程...")
        detection_thread.stop()  # 先停止线程
        # detection_thread.join()  # 等待线程退出
        
        print("停止机器人...")
        follow_control.publish_velocity(0.0, 0.0)  # 停止小车
        time.sleep(0.2)

        # rclpy.shutdown()

    finally:
        follow_control.publish_velocity(0.0, 0.0)  # 停止小车
        time.sleep(0.2)
        rclpy.shutdown()
  


if __name__ == '__main__':
    main()