import cv2
import argparse
import numpy as np
import threading
import queue
import time
import copy
import os
import sys
import logging
from datetime import datetime

# ROS2
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge
from std_msgs.msg import Int32
from rclpy.executors import MultiThreadedExecutor
from std_srvs.srv import SetBool

from ament_index_python.packages import get_package_share_directory

try:
    package_share_directory = get_package_share_directory('jobot_mono_follow')
except:
    package_share_directory = ''
    print('NO INSTALL MODE')

current_file_path = os.path.abspath(__file__)
current_dir = os.path.dirname(current_file_path)
sys.path.append(current_dir)

# 创建日志目录
log_dir = os.path.join(current_dir, "operation_logs")
os.makedirs(log_dir, exist_ok=True)

# 设置日志文件
log_filename = datetime.now().strftime("%Y%m%d_%H%M%S") + ".log"
log_filepath = os.path.join(log_dir, log_filename)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(log_filepath),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("IntegratedSystem")

# 军事目标检测类
class MilitaryTargetDetection:
    def __init__(self, model_path, labels_path, play_device, continuous_mode=True):
        # 加载标签
        self.labels = []
        with open(labels_path, 'r') as f:
            self.labels = [label.strip() for label in f.readlines()]
        
        # 设置推理会话选项
        session_options = onnxruntime.SessionOptions()
        session_options.intra_op_num_threads = 1
        
        # 创建ONNX Runtime会话
        self.session = onnxruntime.InferenceSession(
            model_path, 
            sess_options=session_options, 
            providers=["CPUExecutionProvider"]
        )
        self.input_name = self.session.get_inputs()[0].name
        self.output_name = self.session.get_outputs()[0].name
        
        # 状态变量
        self.is_running = True
        self.detection_thread = None
        self.frame_queue = queue.Queue(maxsize=1)
        self.result_queue = queue.Queue(maxsize=1)
        
        # 当前检测结果
        self.current_target = "无目标"
        self.current_confidence = 0.0
        
        logger.info("军事目标检测模块初始化完成")
        
        # 启动检测线程
        self.detection_thread = threading.Thread(
            target=self._detection_worker,
            daemon=True
        )
        self.detection_thread.start()

    def preprocess(self, img):
        """预处理图像函数"""
        img = img / 255.
        img = cv2.resize(img, (256, 256))
        h, w = img.shape[0], img.shape[1]
        y0 = (h - 224) // 2
        x0 = (w - 224) // 2
        img = img[y0:y0+224, x0:x0+224, :]
        img = (img - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
        img = np.transpose(img, axes=[2, 0, 1])
        img = img.astype(np.float32)
        img = np.expand_dims(img, axis=0)
        return img

    def _detection_worker(self):
        """检测工作线程"""
        logger.info("军事目标检测线程启动")
        while self.is_running:
            try:
                # 获取帧
                frame = self.frame_queue.get(timeout=0.5)
                
                # 预处理图像
                img = self.preprocess(frame)
                
                # 执行推理
                result = self.session.run([self.output_name], {self.input_name: img})[0]
                
                # 处理结果
                result = np.squeeze(result)
                top_k = result.argsort()[-5:][::-1]
                top_class = self.labels[top_k[0]]
                confidence = result[top_k[0]]
                
                # 更新当前结果
                self.current_target = top_class
                self.current_confidence = confidence
                
                # 记录检测结果
                logger.info(f"检测到军事目标: {top_class}, 置信度: {confidence:.4f}")
                
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"军事目标检测错误: {e}")
                continue
        
        logger.info("军事目标检测线程退出")

    def process_frame(self, frame):
        """将帧放入处理队列"""
        try:
            if self.frame_queue.full():
                self.frame_queue.get_nowait()
            self.frame_queue.put(frame.copy(), block=False)
        except:
            pass

    def get_latest_result(self):
        """获取最新检测结果"""
        return self.current_target, self.current_confidence

    def stop_detection(self):
        """停止目标检测"""
        self.is_running = False
        if self.detection_thread and self.detection_thread.is_alive():
            self.detection_thread.join(timeout=1.0)
        logger.info("军事目标检测已停止")

# 跟踪控制节点
class FollowControl(Node):
    def __init__(self):
        super().__init__('follow_control_service')
        self.srv = self.create_service(SetBool, 'toggle_follow', self.callback)
        self.ai_enabled = False  # 初始状态
        logger.info('AI 跟踪控制服务已启动, 当前状态为暂停')
        self.lock = threading.Lock()  # 锁，用于线程数据同步

        self.declare_parameter('video_device', '/dev/video20')
        self.declare_parameter('publish_result_img', False)
        self.declare_parameter('linear_x', 0.4)
        self.declare_parameter('angular_z', 0.37)

        self.video_device = self.get_parameter('video_device').get_parameter_value().string_value
        self.publish_result_img = self.get_parameter('publish_result_img').get_parameter_value().bool_value
        self.linear_x_set = self.get_parameter('linear_x').get_parameter_value().double_value
        self.angular_z_set = self.get_parameter('angular_z').get_parameter_value().double_value

        self.velocity_publisher = self.create_publisher(Twist, 'cmd_vel', 30)
        self.image_publisher = self.create_publisher(Image, 'detection_image', 30)
        self.bridge = CvBridge()

    def callback(self, request, response):
        with self.lock:
            self.ai_enabled = request.data
            
        action = "开启" if self.ai_enabled else "关闭"
        logger.info(f"AI跟踪模块状态变更: {action}")
        
        response.success = True
        response.message = f"AI跟踪模块已{action}"
        return response

    def publish_velocity(self, linear_x, angular_z):
        msg = Twist()
        msg.linear.x = linear_x
        msg.angular.z = angular_z
        self.velocity_publisher.publish(msg)
        logger.debug(f"发布速度命令: linear_x={linear_x}, angular_z={angular_z}")

    def publish_image(self, frame):
        """发布 ROS2 图像消息"""
        msg = self.bridge.cv2_to_imgmsg(frame, encoding='bgr8')
        self.image_publisher.publish(msg)

class FollowDetectionThread(threading.Thread):
    def __init__(self, result_queue, model_path, label_path, follow_control, publish_result_img):
        threading.Thread.__init__(self)
        self.result_queue = result_queue
        self.detector = AGVDetection(model_path, label_path)
        self.publish_result_img = publish_result_img
        self.running = True
        self.follow_control = follow_control
        self.frame_queue = queue.Queue(maxsize=1)
        logger.info("跟随检测线程初始化完成")

    def process_frame(self, frame):
        """将帧放入处理队列"""
        try:
            if self.frame_queue.full():
                self.frame_queue.get_nowait()
            self.frame_queue.put(frame.copy(), block=False)
        except:
            pass

    def run(self):
        logger.info("跟随检测线程启动")
        while self.running:
            follow_me_flag = self.follow_control.ai_enabled
            if not follow_me_flag:
                time.sleep(0.03)
                continue

            try:
                frame = self.frame_queue.get(timeout=0.5)
                detections = self.detector.infer(frame)
                
                if detections:
                    height, width, _ = frame.shape
                    center_x = width // 2
                    center_y = height // 2
                    min_distance = float('inf')
                    closest_box = None
                    
                    for det in detections:
                        x1, y1, x2, y2 = det
                        center = ((x1 + x2) // 2, (y1 + y2) // 2)
                        distance = ((center[0] - center_x) ** 2 + (center[1] - center_y) ** 2) ** 0.5
                        if distance < min_distance:
                            min_distance = distance
                            closest_box = det
                    
                    if closest_box:
                        self.result_queue.put(closest_box)
                        logger.debug(f"检测到跟随目标: {closest_box}")
                        
                        if self.publish_result_img:
                            x1, y1, x2, y2 = closest_box
                            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 5)
                
                if self.publish_result_img:
                    self.follow_control.publish_image(frame)
                    
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"跟随检测错误: {e}")
                continue

    def stop(self):
        self.running = False
        logger.info("跟随检测线程停止")

def run_executor(executor):
    executor.spin()  # 运行 ROS 2 事件循环（不会阻塞主线程）

def initialize_camera(video_device):
    """初始化摄像头并返回捕获对象"""
    logger.info(f"尝试初始化摄像头: {video_device}")
    cap = cv2.VideoCapture(video_device, cv2.CAP_V4L2)
    
    if not cap.isOpened():
        logger.warning(f"无法打开摄像头设备: {video_device}")
        # 尝试自动查找可用设备
        video_devices = [f for f in os.listdir('/dev/') if f.startswith('video')]
        for device in video_devices:
            device_path = os.path.join('/dev/', device)
            cap = cv2.VideoCapture(device_path, cv2.CAP_V4L2)
            if cap.isOpened():
                logger.info(f"成功打开备用摄像头设备: {device_path}")
                break
    
    if cap.isOpened():
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        logger.info(f"摄像头初始化成功, 分辨率: 640x480")
        return cap
    else:
        logger.error("未找到可用的摄像头设备")
        return None

def main():
    rclpy.init()

    # 跟踪开关变量服务器
    follow_control = FollowControl()

    # ROS2 多线程管理
    executor = MultiThreadedExecutor()
    executor.add_node(follow_control)

    executor_thread = threading.Thread(target=run_executor, args=(executor,), daemon=True)
    executor_thread.start()
   
    # 目标检测线程
    result_queue = queue.Queue(maxsize=2)
    model_path = os.path.join(package_share_directory, 'spacemit_cv/model/yolov8n.q.onnx')
    label_path = os.path.join(package_share_directory, 'spacemit_cv/data/label.txt')

    # 军事目标检测初始化
    military_model_path = os.path.join(package_share_directory, '/home/sy/jobot-ai-pipeline/models/resnet50.q.onnx')
    military_label_path = os.path.join(package_share_directory, '/home/sy/jobot-ai-pipeline/data/labels.txt')
    
    if not os.path.exists(military_model_path):
        military_model_path = os.path.join(current_dir, "model/military_model.onnx")
        military_label_path = os.path.join(current_dir, "data/military_labels.txt")
    
    military_detector = MilitaryTargetDetection(
        military_model_path,
        military_label_path,
        play_device="default",
        continuous_mode=True
    )

    # 跟随检测线程
    follow_detector = FollowDetectionThread(
        result_queue, 
        model_path, 
        label_path, 
        follow_control,
        follow_control.publish_result_img
    )
    follow_detector.start()

    # 打开摄像头
    video_device = follow_control.video_device
    cap = initialize_camera(video_device)
    
    if cap is None:
        logger.error("摄像头初始化失败，系统退出")
        return

    # 摄像头状态变量
    camera_error_count = 0
    MAX_CAMERA_ERRORS = 10  # 最大连续错误次数
    REINIT_DELAY = 2.0     # 重新初始化延迟(秒)
    last_camera_error_time = 0

    # 主循环
    try:
        count = 0
        start_time = time.time()
        closest_box = []
        follow_me_flag = 0
        cmd_x_z = [0.0, 0.0]
        last_detection_time = time.time()

        linear_x_set = follow_control.linear_x_set
        angular_z_set = follow_control.angular_z_set

        logger.info("主循环启动")
        
        while rclpy.ok():
            # 检查并处理摄像头错误
            current_time = time.time()
            if camera_error_count > 0 and current_time - last_camera_error_time > REINIT_DELAY:
                logger.warning(f"尝试重新初始化摄像头 (错误计数: {camera_error_count}/{MAX_CAMERA_ERRORS})")
                cap.release()
                cap = initialize_camera(video_device)
                if cap is None:
                    camera_error_count += 1
                    last_camera_error_time = current_time
                    if camera_error_count >= MAX_CAMERA_ERRORS:
                        logger.critical("达到最大摄像头错误次数，系统停止")
                        break
                    # 等待一段时间再重试
                    time.sleep(1.0)
                    continue
                else:
                    camera_error_count = 0
                    logger.info("摄像头重新初始化成功")

            # 读取摄像头帧
            ret, frame = cap.read()
            if not ret:
                logger.warning("摄像头读取失败")
                camera_error_count += 1
                last_camera_error_time = time.time()
                
                # 发布停止命令确保安全
                for i in range(3):
                    follow_control.publish_velocity(0.0, 0.0)
                    time.sleep(0.01)
                
                # 如果错误次数过多，尝试重新初始化
                if camera_error_count > MAX_CAMERA_ERRORS:
                    logger.error("摄像头错误次数超过上限，尝试重新初始化")
                    cap.release()
                    cap = initialize_camera(video_device)
                    camera_error_count = 0
                    if cap is None:
                        logger.critical("摄像头重新初始化失败")
                        break
                    else:
                        logger.info("摄像头重新初始化成功")
                
                time.sleep(0.1)
                continue
            
            # 重置错误计数器
            camera_error_count = 0

            # 将帧分发给两个检测模块
            follow_detector.process_frame(frame)
            military_detector.process_frame(frame)
            
            with follow_control.lock:
                follow_me_flag = follow_control.ai_enabled

            # 更新框
            if not result_queue.empty():
                closest_box = copy.deepcopy(result_queue.get())
                last_detection_time = time.time()  # 更新检测时间
                logger.debug(f"更新跟随目标框: {closest_box}")

            # 是否开启跟随
            if not follow_me_flag:
                if abs(cmd_x_z[0]-0.0) >= 0.01 or abs(cmd_x_z[1]-0.0) >= 0.01:
                    cmd_x_z = [0.0, 0.0]
                    for i in range(0, 3):
                        follow_control.publish_velocity(0.0, 0.0)  # 停止小车
                        time.sleep(0.01)
                    logger.info("AI跟踪关闭，停止机器人")
                
                time.sleep(0.02)
                continue

            # 是否超过3秒没有检测到目标
            time_since_last_detection = time.time() - last_detection_time
            if time_since_last_detection > 4.0:
                if abs(cmd_x_z[0]-0.0) >= 0.01 or abs(cmd_x_z[1]-0.0) >= 0.01:
                    cmd_x_z = [0.0, 0.0]
                    for i in range(0, 3):
                        follow_control.publish_velocity(0.0, 0.0)
                        time.sleep(0.01)
                    logger.info("长时间未检测到目标，停止机器人")
                time.sleep(0.02)
                continue
            
            # 始终跟踪上一次框
            cmd_x_z = [0.0, 0.0]
            if len(closest_box) == 4:
                x1, y1, x2, y2 = closest_box
                center_x = (x1 + x2) / 2
                logger.debug(f"目标中心位置: {center_x}, 顶部位置: {y1}")

                # 速度决策
                allowable_deviation = 160.0
                diff = center_x - 320.0
                if diff <= -allowable_deviation:
                    angular_z = angular_z_set
                    linear_x = 0.0
                    action = "左转"
                elif diff >= allowable_deviation:
                    angular_z = -1.0 * angular_z_set
                    linear_x = 0.0
                    action = "右转"
                else:
                    angular_z = 0.0
                    # 向前移动决策
                    if y1 > 10:
                        linear_x = linear_x_set
                        action = "前进"
                    else:
                        linear_x = 0.0
                        action = "停止"

                # 更新速度
                cmd_x_z = [linear_x, angular_z]
                logger.info(f"执行动作: {action}, linear_x={linear_x}, angular_z={angular_z}")

            # 发布速度 
            follow_control.publish_velocity(cmd_x_z[0], cmd_x_z[1])

            count += 1
            elapsed_time = time.time() - start_time  # 计算运行时间
            
            if elapsed_time >= 1.0:  # 每秒打印一次
                frequency = count / elapsed_time
                logger.debug(f"循环频率: {frequency:.2f} Hz, 速度命令: linear_x={cmd_x_z[0]}, angular_z={cmd_x_z[1]}")
                count = 0
                start_time = time.time()

            time.sleep(0.003)
                
    except KeyboardInterrupt:
        logger.info("检测到键盘中断，停止系统...")
    except Exception as e:
        logger.error(f"主循环发生错误: {e}")
    finally:
        # 停止所有模块
        logger.info("正在停止所有模块...")
        follow_detector.stop()
        military_detector.stop_detection()
        # 确保停止机器人
        for i in range(5):
            follow_control.publish_velocity(0.0, 0.0)
            time.sleep(0.01)
        if cap and cap.isOpened():
            cap.release()
        rclpy.shutdown()
        logger.info("系统已安全停止")

if __name__ == '__main__':
    # 初始化 AGVDetection（假设在spacemit_cv模块中）
    from spacemit_cv import AGVDetection
    import onnxruntime
    
    main()