# -*- coding: utf-8 -*-
"""
高级目标跟踪检测系统 - 集成UKF+IMM算法
超快响应、多步预测、智能运动模式切换
"""

import argparse
import os
import platform
import sys
import time
import json
from pathlib import Path

import torch
import cv2
import numpy as np
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist

FILE = Path(__file__).resolve() 
ROOT = FILE.parents[0]   # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH 
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.general import (LOGGER, Profile, check_img_size, colorstr, cv2,
                           non_max_suppression, scale_boxes, xyxy2xywh)
from utils.plots import Annotator, colors
from utils.torch_utils import select_device, smart_inference_mode

# 导入高级跟踪器
from advanced_tracker import AdvancedMultiTargetTracker, draw_advanced_tracking_results


class CrosshairController(Node):
    """
    手动调整坐标原点的十字架控制器
    支持保存/加载位置，输出平滑的velocity命令
    """
    def __init__(self):
        super().__init__('crosshair_controller')
        
        # ROS发布器
        self.vel_publisher = self.create_publisher(Twist, '/cmd_vel', 10)
        
        # 十字架位置 (x, y) 在图像坐标系中
        self.crosshair_pos = [320, 240]  # 默认在图像中心
        self.image_size = [640, 480]
        
        # 鼠标交互状态
        self.dragging = False
        self.mouse_offset = [0, 0]
        
        # 速度控制参数
        self.max_velocity = 0.2  # 最大速度 0.2 m/s
        self.velocity_smooth_factor = 0.3  # 平滑系数
        self.current_velocity = [0.0, 0.0]  # 当前速度
        
        # 像素到米的转换系数 (可调整)
        self.pixel_to_meter_ratio = 0.0005  # 1像素 = 0.001米
        
        # 速度阈值，小于此值的速度将被视为零
        self.velocity_threshold = 0.001  # m/s
        
        # 目标跟踪状态标志位
        self.target_tracking_flag = 0  # 目标跟踪标志位
        self.target_aligned_flag = 0   # 目标对准标志位
        self.alignment_threshold = 30.0    # 对准阈值（像素）
        
        # 配置文件路径
        self.config_file = "crosshair_config.json"
        
        
        # 加载保存的位置
        self.load_crosshair_position()
        
        self.get_logger().info('十字架控制器已初始化')
    
    def set_image_size(self, width, height):
    

        """设置图像尺寸"""
        self.image_size = [width, height]
        # 如果十字架位置超出边界，重置到中心
        if (self.crosshair_pos[0] >= width or self.crosshair_pos[1] >= height):
            self.crosshair_pos = [width // 2, height // 2]
    
    def mouse_callback(self, event, x, y, flags, param):
        """鼠标回调函数"""
        if event == cv2.EVENT_LBUTTONDOWN:
            # 检查是否点击在十字架附近
            distance = np.sqrt((x - self.crosshair_pos[0])**2 + (y - self.crosshair_pos[1])**2)
            if distance < 20:  # 20像素范围内可以拖拽
                self.dragging = True
                self.mouse_offset = [
                    x - self.crosshair_pos[0], y - self.crosshair_pos[1]]
        
        elif event == cv2.EVENT_MOUSEMOVE and self.dragging:
            # 拖拽十字架
            new_x = max(20, min(self.image_size[0] - 20, x - self.mouse_offset[0]))
            new_y = max(20, min(self.image_size[1] - 20, y - self.mouse_offset[1]))
            self.crosshair_pos = [new_x, new_y]
        
        elif event == cv2.EVENT_LBUTTONUP:
            self.dragging = False
    
    def draw_crosshair(self, frame):
        """绘制十字架"""
        x, y = self.crosshair_pos
        
        # 绘制十字架
        color = (0, 255, 255)  # 黄色
        thickness = 2
        length = 15
        
        # 水平线
        cv2.line(frame, (x - length, y), (x + length, y), color, thickness)
        # 垂直线
        cv2.line(frame, (x, y - length), (x, y + length), color, thickness)
        
        # 绘制中心圆点
        cv2.circle(frame, (x, y), 3, color, -1)
        
        # 绘制坐标信息
        coord_text = f"Origin: ({x}, {y})"
        cv2.putText(frame, coord_text, (x + 20, y - 10),
                   cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
        
        return frame
    
    def calculate_velocity(self, target_center):
        """根据目标中心计算速度"""
        if target_center is None:
            # 没有目标时，直接返回零速度，避免平滑滤波器的残留
            self.current_velocity = [0.0, 0.0]
            self.target_tracking_flag = 0
            self.target_aligned_flag = 0
            return self.current_velocity
        
        # 有目标，设置跟踪标志位
        self.target_tracking_flag = 1
        
        # 计算目标相对于十字架的偏移
        dx = target_center[0] - self.crosshair_pos[0]
        dy = target_center[1] - self.crosshair_pos[1]
        
        # 计算距离
        distance = np.sqrt(dx**2 + dy**2)
        
        # 检查是否对准目标
        if distance <= self.alignment_threshold:
            self.target_aligned_flag = 1
        else:
            self.target_aligned_flag = 0
        
        # 转换为米制单位
        vel_x = dx * self.pixel_to_meter_ratio
        vel_y = -dy * self.pixel_to_meter_ratio  # 图像坐标系y轴向下，需要反转
        
        # 限制最大速度
        velocity_magnitude = np.sqrt(vel_x**2 + vel_y**2)
        if velocity_magnitude > self.max_velocity:
            scale = self.max_velocity / velocity_magnitude
            vel_x *= scale
            vel_y *= scale
        
        target_velocity = [vel_x, vel_y]
        
        # 平滑处理
        self.current_velocity[0] = (self.velocity_smooth_factor * target_velocity[0] + 
                                  (1 - self.velocity_smooth_factor) * self.current_velocity[0])
        self.current_velocity[1] = (self.velocity_smooth_factor * target_velocity[1] + 
                                  (1 - self.velocity_smooth_factor) * self.current_velocity[1])
        
        return self.current_velocity
    
    def publish_velocity(self, velocity):
        """发布速度命令，过滤小速度"""
        # 只有在识别到目标时才发布速度命令
        if not self.target_tracking_flag:
            # 没有目标时，不发送任何速度话题
            return
        
        # 有目标时，发布计算的速度
        msg = Twist()
        
        # 应用速度阈值，小于阈值的速度设为零
        vel_x = velocity[0] if abs(velocity[0]) > self.velocity_threshold else 0.0
        vel_y = velocity[1] if abs(velocity[1]) > self.velocity_threshold else 0.0
        
        msg.linear.x = vel_x
        msg.linear.y = vel_y
        msg.linear.z = float(self.target_aligned_flag)
        msg.angular.x = float(self.target_tracking_flag)
        msg.angular.y = 0.0
        msg.angular.z = 0.0
        
        self.vel_publisher.publish(msg)
    
    def save_crosshair_position(self):
        """保存十字架位置"""
        config = {
            "crosshair_pos": self.crosshair_pos,
            "image_size": self.image_size,
            "pixel_to_meter_ratio": self.pixel_to_meter_ratio,
            "max_velocity": self.max_velocity,
            "alignment_threshold": self.alignment_threshold
        }
        
        try:
            with open(self.config_file, 'w') as f:
                json.dump(config, f, indent=2)
            self.get_logger().info(f'十字架位置已保存到 {self.config_file}')
        except Exception as e:
            self.get_logger().error(f'保存配置失败: {e}')
    
    def load_crosshair_position(self):
        """加载十字架位置"""
        try:
            if os.path.exists(self.config_file):
                with open(self.config_file, 'r') as f:
                    config = json.load(f)
                
                self.crosshair_pos = config.get("crosshair_pos", [320, 240])
                self.image_size = config.get("image_size", [640, 480])
                self.pixel_to_meter_ratio = config.get("pixel_to_meter_ratio", 0.0005)
                self.max_velocity = config.get("max_velocity", 0.2)
                self.alignment_threshold = config.get("alignment_threshold", 30.0)
                
                self.get_logger().info(f'从 {self.config_file} 加载十字架位置: {self.crosshair_pos}')
        except Exception as e:
            self.get_logger().error(f'加载配置失败: {e}')
    
    def get_highest_confidence_target(self, tracked_targets):
        """获取置信度最高的目标"""
        if not tracked_targets:
            return None
        
        highest_conf_target = None
        max_confidence = 0.0
        
        for target in tracked_targets:
            # 获取目标的置信度
            if hasattr(target, 'confidence') and target.confidence > max_confidence:
                max_confidence = target.confidence
                highest_conf_target = target
        
        return highest_conf_target
    
    def get_tracking_status(self):
        """获取跟踪状态信息"""
        return {
            'target_tracking_flag': self.target_tracking_flag,
            'target_aligned_flag': self.target_aligned_flag,
            'alignment_threshold': self.alignment_threshold
        }


@smart_inference_mode()
def run_advanced_detection(
        weights=ROOT / 'weights/yolo11n.engine',
        data=ROOT / 'data/colo.yaml',
        conf_thres=0.25,
        iou_thres=0.45,
        max_det=1000,
        device='',
        classes=None,
        agnostic_nms=False,
        augment=False,
        visualize=False,
        line_thickness=3,
        hide_labels=False,
        hide_conf=False,
        half=False,
        dnn=False,
        camera_id=0,
        imgsz=640,
        use_imm=True,  # 使用IMM滤波器
        show_trajectory=True,
        show_prediction=True,
        show_future_predictions=True,
        show_model_info=True,
        max_distance_threshold=60.0,  # 降低阈值提高响应性
        enable_crosshair=True,  # 启用十字架控制
        
):
    """
    运行高级跟踪检测系统
    """
    print("🚀 启动YOLOv5 + UKF/IMM 高级跟踪系统")
    print(f"🧠 算法配置: {'IMM多模型' if use_imm else 'UKF单模型'}")
    print(f"📊 检测参数: conf={conf_thres}, iou={iou_thres}")
    print(f"🎯 跟踪参数: 关联阈值={max_distance_threshold}")
    
    # 禁用CuDNN避免CUDA版本兼容性问题
    torch.backends.cudnn.enabled = False
    
    # 初始化 - 强制使用GPU
    device = select_device('0')  # 强制使用CUDA:0
    
    # 加载模型
    print("⏳ 加载YOLOv5模型...")
    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
    stride, names, pt = model.stride, model.names, model.pt
    imgsz = check_img_size(imgsz, s=stride)
    
    # 模型预热
    print("🔥 模型预热中...")
    model.warmup(imgsz=(1, 3, imgsz, imgsz))
    
    # 初始化高级跟踪器
    tracker = AdvancedMultiTargetTracker(
        max_distance_threshold=max_distance_threshold,
        use_imm=use_imm
    )
    print(f"🎯 初始化{'IMM' if use_imm else 'UKF'}跟踪器")
    
    # 初始化十字架控制器
    crosshair_controller = None
    if enable_crosshair:
        try:
            rclpy.init()
            crosshair_controller = CrosshairController()
            print("🎯 十字架控制器已初始化")
        except Exception as e:
            print(f"❌ 十字架控制器初始化失败: {e}")
            enable_crosshair = False
    
    # 打开摄像头
    print(f"📷 连接摄像头 {camera_id}...")
    cap = cv2.VideoCapture(camera_id)
    
    if not cap.isOpened():
        print(f"❌ 无法打开摄像头 {camera_id}")
        return
        
    # 优化摄像头设置
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) #640
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) #480
    cap.set(cv2.CAP_PROP_FPS, 30)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # 减少缓冲延迟
    
    # 获取摄像头参数
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    
    print(f"📸 摄像头配置: {width}x{height} @ {fps}FPS")
    
    # 设置十字架控制器
    if enable_crosshair and crosshair_controller:
        crosshair_controller.set_image_size(width, height)
        cv2.namedWindow('Advanced YOLOv5 Tracking System')
        cv2.setMouseCallback('Advanced YOLOv5 Tracking System', crosshair_controller.mouse_callback)
    
    print("🎮 控制说明:")
    print("   - 按 'q' 键: 退出系统")
    print("   - 按 's' 键: 截图保存")
    print("   - 按 't' 键: 切换轨迹显示")
    print("   - 按 'p' 键: 切换预测显示")
    print("   - 按 'f' 键: 切换未来预测")
    print("   - 按 'm' 键: 切换模型信息")
    print("   - 按 'i' 键: 显示性能统计")
    print("   - 按 空格键: 暂停/继续")
    print("   - 按 'r' 键: 重置跟踪器")
    if enable_crosshair:
        print("   - 按 'c' 键: 保存十字架位置")
        print("   - 鼠标拖拽: 调整十字架位置")
    print("✨ 高级跟踪系统启动！\n")
    
    # 运行变量
    frame_count = 0
    fps_counter = 0
    fps_start_time = time.time()
    paused = False
    
    # 性能监控
    detection_times = []
    tracking_times = []
    total_times = []
    
    while True:
        if not paused:
            frame_start_time = time.time()
            
            ret, frame = cap.read()
            if not ret:
                print("❌ 摄像头读取失败")
                break
                
            frame_count += 1
            fps_counter += 1
            
            # YOLOv5检测
            detection_start = time.time()
            
            # 预处理
            im = cv2.resize(frame, (imgsz, imgsz))
            im = im.transpose((2, 0, 1))
            im = np.ascontiguousarray(im)
            im = torch.from_numpy(im).to(device)
            im = im.half() if model.fp16 else im.float()
            im /= 255.0
            if len(im.shape) == 3:
                im = im[None]
                
            # 推理
            pred = model(im, augment=augment, visualize=visualize)
            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
            
            # 处理检测结果 - 只保留置信度最高的单个目标
            detections = []
            best_detection = None
            max_confidence = 0.0
            
            for det in pred:
                if len(det):
                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], frame.shape).round()
                    for *xyxy, conf, cls in reversed(det):
                        confidence = float(conf)
                        if confidence > max_confidence:
                            max_confidence = confidence
                            x1, y1, x2, y2 = map(int, xyxy)
                            class_name = names[int(cls)]
                            best_detection = ((x1, y1, x2, y2), class_name, confidence)
            
            # 只添加置信度最高的检测结果
            if best_detection:
                detections.append(best_detection)
            
            detection_time = (time.time() - detection_start) * 1000
            detection_times.append(detection_time)
            
            # 高级跟踪
            tracking_start = time.time()
            tracked_targets = tracker.update(detections)
            tracking_time = (time.time() - tracking_start) * 1000
            tracking_times.append(tracking_time)
            
            # 绘制高级跟踪结果
            frame = draw_advanced_tracking_results(
                frame, tracked_targets,
                show_trajectory=show_trajectory,
                show_prediction=show_prediction,
                show_future_predictions=show_future_predictions,
                show_model_info=show_model_info
            )
            
            # 十字架控制和速度输出
            if enable_crosshair and crosshair_controller:
                # 绘制十字架
                frame = crosshair_controller.draw_crosshair(frame)
                
                # 获取置信度最高的目标
                highest_conf_target = crosshair_controller.get_highest_confidence_target(tracked_targets)
                
                # 计算并发布速度
                target_center = None
                if highest_conf_target:
                    motion_info = highest_conf_target.get_motion_info()
                    target_center = motion_info.get('position')
                
                velocity = crosshair_controller.calculate_velocity(target_center)
                crosshair_controller.publish_velocity(velocity)
                
                # 显示速度信息
                vel_text = f"Velocity: ({velocity[0]:.3f}, {velocity[1]:.3f}) m/s"
                cv2.putText(frame, vel_text, (10, height - 30),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
                
                # 显示跟踪状态标志位
                tracking_status = "跟踪中" if crosshair_controller.target_tracking_flag else "无目标"
                alignment_status = "已对准" if crosshair_controller.target_aligned_flag else "未对准"
                status_color = (0, 255, 0) if crosshair_controller.target_tracking_flag else (0, 0, 255)
                alignment_color = (0, 255, 0) if crosshair_controller.target_aligned_flag else (255, 0, 0)
                
                cv2.putText(frame, f"状态: {tracking_status}", (10, height - 60),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, status_color, 2)
                cv2.putText(frame, f"对准: {alignment_status}", (10, height - 45),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, alignment_color, 2)
                
                # 如果有目标，显示目标到十字架的距离和置信度
                if highest_conf_target and target_center:
                    distance = np.sqrt((target_center[0] - crosshair_controller.crosshair_pos[0])**2 + 
                                     (target_center[1] - crosshair_controller.crosshair_pos[1])**2)
                    distance_text = f"Distance: {distance:.1f}px | Conf: {highest_conf_target.confidence:.2f}"
                    cv2.putText(frame, distance_text, (10, height - 10),
                               cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
                
                # 处理ROS消息
                rclpy.spin_once(crosshair_controller, timeout_sec=0.001)
            
            # 计算总处理时间
            total_time = (time.time() - frame_start_time) * 1000
            total_times.append(total_time)
            
            # 性能统计显示
            current_time = time.time()
            if current_time - fps_start_time >= 1.0:
                current_fps = fps_counter / (current_time - fps_start_time)
                fps_counter = 0
                fps_start_time = current_time
                
                # 清理旧的性能数据
                if len(detection_times) > 100:
                    detection_times = detection_times[-50:]
                    tracking_times = tracking_times[-50:]
                    total_times = total_times[-50:]
                
                # 计算平均时间
                avg_detection = np.mean(detection_times) if detection_times else 0
                avg_tracking = np.mean(tracking_times) if tracking_times else 0
                avg_total = np.mean(total_times) if total_times else 0
                
                # 显示性能信息
                info_texts = [
                    f"FPS: {current_fps:.1f}",
                    f"检测: {avg_detection:.1f}ms",
                    f"跟踪: {avg_tracking:.1f}ms", 
                    f"总计: {avg_total:.1f}ms",
                    f"目标: {len(tracked_targets)}",
                    f"帧数: {frame_count}"
                ]
                
                y_offset = 25
                for i, text in enumerate(info_texts):
                    color = (0, 255, 0) if i < 3 else (255, 255, 255)
                    cv2.putText(frame, text, (10, y_offset + i * 20),
                               cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            
            # 显示目标详细信息
            if tracked_targets:
                info_y = height - 150
                cv2.putText(frame, "=== 目标状态 ===", (10, info_y),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
                info_y += 20
                
                for i, target in enumerate(tracked_targets[:4]):  # 显示前4个目标
                    motion_info = target.get_motion_info()
                    pos = motion_info.get('position')
                    vel = motion_info.get('velocity')
                    
                    if pos and vel:
                        speed = np.sqrt(vel[0]**2 + vel[1]**2)
                        
                        # 基础信息
                        info_text = f"ID{target.id}: ({pos[0]:.0f},{pos[1]:.0f}) v={speed:.1f}"
                        cv2.putText(frame, info_text, (10, info_y),
                                   cv2.FONT_HERSHEY_SIMPLEX, 0.4, target.color, 2)
                        info_y += 15
                        
                        # IMM模型信息
                        if 'dominant_model' in motion_info:
                            model_text = f"   模型: {motion_info['dominant_model']}"
                            cv2.putText(frame, model_text, (10, info_y),
                                       cv2.FONT_HERSHEY_SIMPLEX, 0.3, target.color, 1)
                            info_y += 12
                        
                        # 预测精度
                        accuracy = target.get_prediction_accuracy()
                        if accuracy:
                            acc_text = f"   误差: {accuracy['mean_error']:.1f}±{accuracy['std_error']:.1f}"
                            cv2.putText(frame, acc_text, (10, info_y),
                                       cv2.FONT_HERSHEY_SIMPLEX, 0.3, target.color, 1)
                            info_y += 12
        
        # 显示图像
        cv2.imshow('Advanced YOLOv5 Tracking System', frame)
        
        # 处理按键
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            print("👋 用户退出系统")
            break
        elif key == ord('s'):
            screenshot_name = f"advanced_tracking_{int(time.time())}.jpg"
            cv2.imwrite(screenshot_name, frame)
            print(f"📸 截图保存: {screenshot_name}")
        elif key == ord('t'):
            show_trajectory = not show_trajectory
            print(f"🌈 轨迹显示: {'开启' if show_trajectory else '关闭'}")
        elif key == ord('p'):
            show_prediction = not show_prediction
            print(f"🎯 预测显示: {'开启' if show_prediction else '关闭'}")
        elif key == ord('f'):
            show_future_predictions = not show_future_predictions
            print(f"🔮 未来预测: {'开启' if show_future_predictions else '关闭'}")
        elif key == ord('m'):
            show_model_info = not show_model_info
            print(f"🧠 模型信息: {'开启' if show_model_info else '关闭'}")
        elif key == ord('i'):
            # 显示详细性能统计
            stats = tracker.get_performance_stats()
            print("\n📊 === 性能统计 ===")
            print(f"平均分配时间: {stats.get('avg_assignment_time', 0):.2f}ms")
            print(f"处理帧数: {stats.get('total_frames', 0)}")
            print(f"活跃目标: {stats.get('active_targets', 0)}")
            print(f"总目标数: {stats.get('next_id', 1) - 1}")
            if detection_times:
                print(f"平均检测时间: {np.mean(detection_times):.2f}ms")
                print(f"平均跟踪时间: {np.mean(tracking_times):.2f}ms")
                print(f"平均总时间: {np.mean(total_times):.2f}ms")
            print("==================\n")
        elif key == ord(' '):
            paused = not paused
            print(f"⏸️  {'暂停' if paused else '继续'}")
        elif key == ord('r'):
            tracker = AdvancedMultiTargetTracker(
                max_distance_threshold=max_distance_threshold,
                use_imm=use_imm
            )
            print("🔄 跟踪器已重置")
        elif key == ord('c') and enable_crosshair and crosshair_controller:
            crosshair_controller.save_crosshair_position()
            print("💾 十字架位置已保存")
    
    # 最终性能统计
    print("\n📊 === 最终性能报告 ===")
    if detection_times and tracking_times and total_times:
        print(f"检测时间: {np.mean(detection_times):.2f}±{np.std(detection_times):.2f}ms")
        print(f"跟踪时间: {np.mean(tracking_times):.2f}±{np.std(tracking_times):.2f}ms")
        print(f"总处理时间: {np.mean(total_times):.2f}±{np.std(total_times):.2f}ms")
        print(f"理论FPS: {1000/np.mean(total_times):.1f}")
    
    stats = tracker.get_performance_stats()
    print(f"目标关联时间: {stats.get('avg_assignment_time', 0):.2f}ms")
    print(f"处理总帧数: {frame_count}")
    print("========================\n")
    
    # 清理资源
    cap.release()
    cv2.destroyAllWindows()
    
    # 清理ROS
    if enable_crosshair and crosshair_controller:
        crosshair_controller.destroy_node()
        rclpy.shutdown()
        print("🔄 ROS资源已清理")
    
    print("🎉 高级跟踪系统已关闭")


def parse_opt():
    """解析命令行参数"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'weights/colo.pt', help='model path(s)')
    parser.add_argument('--data', type=str, default=ROOT / 'data/colo.yaml', help='dataset.yaml path')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--visualize', action='store_true', help='visualize features')
    parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
    parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
    parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
    parser.add_argument('--camera-id', type=int, default=0, help='camera ID')
    parser.add_argument('--imgsz', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--use-imm', action='store_true', default=True, help='use IMM filter (vs UKF)')
    parser.add_argument('--show-trajectory', action='store_true', default=True, help='show target trajectory')
    parser.add_argument('--show-prediction', action='store_true', default=True, help='show position prediction')
    parser.add_argument('--show-future-predictions', action='store_true', default=True, help='show future predictions')
    parser.add_argument('--show-model-info', action='store_true', default=True, help='show motion model info')
    parser.add_argument('--max-distance-threshold', type=float, default=60.0, help='association distance threshold')
    parser.add_argument('--enable-crosshair', action='store_true', default=True, help='enable crosshair control for velocity output')
    return parser.parse_args()


def main(opt):
    """主函数"""
    algorithm_name = "IMM多模型滤波器" if opt.use_imm else "UKF无迹卡尔曼滤波器"
    
    print(f"""
🚀 YOLOv5 + 高级跟踪系统
═══════════════════════════════════
🧠 跟踪算法: {algorithm_name}
📂 模型文件: {opt.weights}
📊 数据配置: {opt.data}
🎯 置信度阈值: {opt.conf_thres}
🔗 IOU阈值: {opt.iou_thres}
📷 摄像头: {opt.camera_id}
🖼️  图像尺寸: {opt.imgsz}
🎪 关联阈值: {opt.max_distance_threshold}
═══════════════════════════════════
""")
    
    run_advanced_detection(**vars(opt))


if __name__ == '__main__':
    opt = parse_opt()
    main(opt) 
