import os
import cv2
import numpy as np
from datetime import datetime
import torch
from config import MARKED_VEHICLE_DIR, MARKED_PLATE_DIR, CROPPED_PLATE_DIR
from logger_config import setup_logger
import sys
from ocr_service import recognize_license_plate

# 添加模型路径到系统路径
sys.path.append(os.path.join(os.getcwd(), 'VehicleIdentification'))
sys.path.append(os.path.join(os.getcwd(), 'LicensePlateRecognition'))

# 导入车辆识别和车牌识别模型
from ultralytics import YOLO
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import Instances, Boxes
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.config import get_cfg

# 创建日志记录器
logger = setup_logger('model_inference')

# 全局变量，用于缓存已加载的模型
VEHICLE_MODEL = None
LICENSE_PLATE_MODEL = None
GREEN_LICENSE_PLATE_MODEL = None
MULTI_VEHICLE_MODEL = None

# 置信度阈值，低于此值认为识别结果不可靠
CONFIDENCE_THRESHOLD = 0.7

def load_vehicle_model():
    """
    加载车辆识别模型
    
    Returns:
        YOLO模型实例
    """
    global VEHICLE_MODEL
    if VEHICLE_MODEL is None:
        logger.info("加载车辆识别模型...")
        try:
            model_path = os.path.join('VehicleIdentification', 'best.pt')
            VEHICLE_MODEL = YOLO(model_path)
            logger.info("车辆识别模型加载成功")
        except Exception as e:
            error_msg = f"加载车辆识别模型失败: {str(e)}"
            logger.error(error_msg)
            raise
    return VEHICLE_MODEL

def load_multi_vehicle_model():
    """
    加载多车检测模型
    
    Returns:
        YOLO模型实例
    """
    global MULTI_VEHICLE_MODEL
    if MULTI_VEHICLE_MODEL is None:
        logger.info("加载多车检测模型...")
        try:
            model_path = os.path.join('VehicleIdentification', 'best_cars_detection.pt')
            MULTI_VEHICLE_MODEL = YOLO(model_path)
            logger.info("多车检测模型加载成功")
        except Exception as e:
            error_msg = f"加载多车检测模型失败: {str(e)}"
            logger.error(error_msg)
            raise
    return MULTI_VEHICLE_MODEL

def load_license_plate_model():
    """
    加载车牌检测模型
    
    Returns:
        车牌检测模型实例
    """
    global LICENSE_PLATE_MODEL
    if LICENSE_PLATE_MODEL is None:
        logger.info("加载车牌检测模型...")
        try:
            model_dir = os.path.join('LicensePlateRecognition', 'model_output')
            pre_config = os.path.join('LicensePlateRecognition', 'configs', '1', 'mask_rcnn_R_50_FPN_3x.yaml')
            
            cfg = get_cfg()
            cfg.merge_from_file(pre_config)
            cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
            cfg.MODEL.WEIGHT = os.path.join(model_dir, "model_car.pth")
            cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
            cfg.MODEL.ROI_HEADS.SCORE_THRESHOLD_TEST = 0.5
            cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
            cfg.DATASETS.TEST = ("ccpd_test",)
            
            # 构建模型
            model = build_model(cfg)
            model.eval()  # 设置为推理模式
            
            # 加载模型权重
            with torch.no_grad():
                checkpoint = torch.load(cfg.MODEL.WEIGHT, map_location=cfg.MODEL.DEVICE, weights_only=True)
                model.load_state_dict(checkpoint["model"])
                
            LICENSE_PLATE_MODEL = model
            logger.info("车牌检测模型加载成功")
        except Exception as e:
            error_msg = f"加载车牌检测模型失败: {str(e)}"
            logger.error(error_msg)
            raise
    return LICENSE_PLATE_MODEL

def load_green_license_plate_model():
    """
    加载新能源车牌检测模型
    
    Returns:
        新能源车牌检测模型实例
    """
    global GREEN_LICENSE_PLATE_MODEL
    if GREEN_LICENSE_PLATE_MODEL is None:
        logger.info("加载新能源车牌检测模型...")
        try:
            model_dir = os.path.join('LicensePlateRecognition', 'model_output')
            pre_config = os.path.join('LicensePlateRecognition', 'configs', '1', 'mask_rcnn_R_50_FPN_3x.yaml')
            
            cfg = get_cfg()
            cfg.merge_from_file(pre_config)
            cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
            cfg.MODEL.WEIGHT = os.path.join(model_dir, "model_green_car.pth")
            cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
            cfg.MODEL.ROI_HEADS.SCORE_THRESHOLD_TEST = 0.5
            cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
            cfg.DATASETS.TEST = ("ccpd_test",)
            
            # 构建模型
            model = build_model(cfg)
            model.eval()  # 设置为推理模式
            
            # 加载模型权重
            with torch.no_grad():
                checkpoint = torch.load(cfg.MODEL.WEIGHT, map_location=cfg.MODEL.DEVICE, weights_only=True)
                model.load_state_dict(checkpoint["model"])
                
            GREEN_LICENSE_PLATE_MODEL = model
            logger.info("新能源车牌检测模型加载成功")
        except Exception as e:
            error_msg = f"加载新能源车牌检测模型失败: {str(e)}"
            logger.error(error_msg)
            raise
    return GREEN_LICENSE_PLATE_MODEL

def draw_detection_boxes(image_path, detections, output_path):
    """
    在图像上绘制检测框和标签
    
    Args:
        image_path (str): 输入图像路径
        detections (list): 检测结果列表
        output_path (str): 输出图像路径
    """
    logger.info(f"开始绘制检测框: {image_path}")
    
    try:
        # 读取图像
        image = cv2.imread(image_path)
        if image is None:
            error_msg = f"无法读取图像: {image_path}"
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 为不同类型的检测设置不同的颜色
        colors = {
            '车辆': (0, 255, 0),    # 绿色
            '车牌': (0, 0, 255)     # 红色
        }
        
        # 绘制检测框和标签
        for det in detections:
            bbox = det['bbox']
            x1, y1, x2, y2 = map(int, bbox)
            
            # 获取颜色
            color = colors.get(det['type'], (255, 255, 0))
            
            # 绘制边界框
            cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
            
            # 准备标签文本
            label = f"{det['type']} ({det['confidence']:.2f})"
            
            # 绘制标签背景
            (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            cv2.rectangle(image, (x1, y1 - label_height - 10), (x1 + label_width, y1), color, -1)
            
            # 绘制标签文本
            cv2.putText(image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
        
        # 保存结果图像
        cv2.imwrite(output_path, image)
        logger.info(f"检测框绘制完成，已保存至: {output_path}")
        return output_path
        
    except Exception as e:
        error_msg = f"绘制检测框失败: {str(e)}"
        logger.error(error_msg)
        raise

def run_vehicle_detection(preprocessed_image_path, selected_model_name, output_dir):
    """
    执行车辆检测
    
    Args:
        preprocessed_image_path (str): 预处理后的图像路径
        selected_model_name (str): 选择的模型名称
        output_dir (str): 输出目录
    
    Returns:
        tuple: (标注后的图像路径, 检测结果列表)
    """
    logger.info(f"开始车辆检测: {preprocessed_image_path}, 使用模型: {selected_model_name}")
    
    try:
        # 加载车辆识别模型
        model = load_vehicle_model()
        
        # 读取图像
        img = cv2.imread(preprocessed_image_path)
        if img is None:
            error_msg = f"无法读取图像: {preprocessed_image_path}"
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 使用模型进行预测
        results = model.predict(img)
        
        # 生成输出文件名（使用时间戳）
        timestamp = datetime.now().strftime('%m%d%H%M%S')
        output_filename = f"{timestamp}_0.jpg"  # 0 表示标注后的图片
        output_path = os.path.join(output_dir, output_filename)
        
        # 在图像上绘制标注结果
        annotated_img = results[0].plot()
        
        # 保存标注后的图像
        cv2.imwrite(output_path, annotated_img)
        
        # 解析检测结果为统一格式
        detections = []
        for box in results[0].boxes:
            x1, y1, x2, y2 = box.xyxy[0].tolist()
            confidence = box.conf[0].item()
            class_id = int(box.cls[0].item())
            class_name = results[0].names[class_id]
            
            detections.append({
                'type': '车辆',
                'class': class_name,
                'confidence': confidence,
                'bbox': [x1, y1, x2, y2]
            })
        
        logger.info(f"车辆检测完成，检测到 {len(detections)} 个目标")
        return output_path, detections
        
    except Exception as e:
        error_msg = f"车辆检测失败: {str(e)}"
        logger.error(error_msg)
        raise

def inference_plate(model, image_path):
    """
    对单张图像进行车牌推理
    
    Args:
        model: 车牌检测模型
        image_path: 图像路径
    
    Returns:
        instances: 检测实例
    """
    # 加载图像
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    
    # 进行推理
    with torch.no_grad():
        inputs = {"image": torch.as_tensor(image.astype("float32").transpose(2, 0, 1))}
        outputs = model([inputs])[0]
    
    instances = outputs["instances"].to("cpu")
    
    return instances, image

def run_plate_detection(preprocessed_image_path, selected_model_name, output_dir):
    """
    执行车牌检测
    
    Args:
        preprocessed_image_path (str): 预处理后的图像路径
        selected_model_name (str): 选择的模型名称
        output_dir (str): 输出目录
    
    Returns:
        tuple: (标注后的图像路径, 切割后的车牌图像路径, OCR结果图像路径, 车牌文本, 检测结果列表)
    """
    logger.info(f"开始车牌检测: {preprocessed_image_path}, 使用模型: {selected_model_name}")
    
    try:
        # 根据选择的模型名称加载对应的模型
        if selected_model_name == "green_plate_detection":
            # 新能源车牌识别模型
            model = load_green_license_plate_model()
            logger.info("使用新能源车牌识别模型")
        else:
            # 普通车牌识别模型
            model = load_license_plate_model()
            logger.info("使用普通车牌识别模型")
        
        # 读取图像
        img = cv2.imread(preprocessed_image_path)
        if img is None:
            error_msg = f"无法读取图像: {preprocessed_image_path}"
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 执行车牌检测
        instances, image = inference_plate(model, preprocessed_image_path)
        
        # 生成时间戳（用于文件名）
        timestamp = datetime.now().strftime('%m%d%H%M%S')
        
        # 检查识别结果的置信度或是否有结果
        need_try_another_model = False
        
        if len(instances) == 0:
            # 没有检测到结果，需要尝试另一个模型
            need_try_another_model = True
        elif len(instances) > 0:
            # 检测到结果，但检查置信度
            max_score_index = torch.argmax(instances.scores)
            max_score = instances.scores[max_score_index].item()
            
            # 如果置信度低于阈值，也尝试另一个模型
            if max_score < CONFIDENCE_THRESHOLD:
                logger.warning(f"检测到车牌但置信度过低 ({max_score:.3f} < {CONFIDENCE_THRESHOLD})，尝试另一个模型")
                need_try_another_model = True
        
        # 如果需要，尝试使用另一个模型
        if need_try_another_model:
            if selected_model_name != "green_plate_detection":
                logger.warning("普通车牌模型检测结果不理想，尝试使用新能源车牌模型...")
                # 加载新能源车牌模型
                green_model = load_green_license_plate_model()
                # 执行新能源车牌检测
                green_instances, green_image = inference_plate(green_model, preprocessed_image_path)
                
                # 如果新能源模型检测到了车牌，或者新能源模型的置信度更高
                if len(green_instances) > 0:
                    green_max_score_index = torch.argmax(green_instances.scores)
                    green_max_score = green_instances.scores[green_max_score_index].item()
                    
                    # 如果新能源模型有结果且(原模型没结果或新能源模型置信度更高)
                    if len(instances) == 0 or green_max_score > max_score:
                        logger.info(f"使用新能源车牌模型结果，置信度: {green_max_score:.3f}")
                        instances = green_instances
                        image = green_image
                        
            else:
                logger.warning("新能源车牌模型检测结果不理想，尝试使用普通车牌模型...")
                # 加载普通车牌模型
                normal_model = load_license_plate_model()
                # 执行普通车牌检测
                normal_instances, normal_image = inference_plate(normal_model, preprocessed_image_path)
                
                # 如果普通模型检测到了车牌，或者普通模型的置信度更高
                if len(normal_instances) > 0:
                    normal_max_score_index = torch.argmax(normal_instances.scores)
                    normal_max_score = normal_instances.scores[normal_max_score_index].item()
                    
                    # 如果普通模型有结果且(原模型没结果或普通模型置信度更高)
                    if len(instances) == 0 or normal_max_score > max_score:
                        logger.info(f"使用普通车牌模型结果，置信度: {normal_max_score:.3f}")
                        instances = normal_instances
                        image = normal_image
        
        # 如果仍然没有检测到任何结果，返回空结果
        if len(instances) == 0:
            logger.warning("两种车牌模型均未检测到任何车牌")
            detections = []
            
            # 生成输出文件名
            output_filename = f"{timestamp}_0.jpg"
            output_path = os.path.join(output_dir, output_filename)
            
            # 保存原始图像作为结果
            cv2.imwrite(output_path, img)
            
            return output_path, None, None, "", detections
        
        # 获取置信度最高的检测结果
        max_score_index = torch.argmax(instances.scores)
        max_score = instances.scores[max_score_index].item()
        bbox = instances.pred_boxes.tensor[max_score_index].numpy().astype(int)
        x1, y1, x2, y2 = bbox
        
        # 创建检测结果列表
        detections = [
            {
                'type': '车牌',
                'confidence': max_score,
                'bbox': [float(x1), float(y1), float(x2), float(y2)]
            }
        ]
        
        # 手动创建 Metadata 对象用于可视化
        metadata = MetadataCatalog.get("ccpd_test")
        if not hasattr(metadata, "thing_classes"):
            metadata.thing_classes = ["license_plate"]
        
        # 可视化结果
        v = Visualizer(image, metadata, scale=1.2)
        out = v.draw_instance_predictions(instances)
        result_image = out.get_image()
        
        # 转换回BGR格式用于保存
        result_image_bgr = cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR)
        
        # 保存标注后的图像
        output_filename = f"{timestamp}_0.jpg"
        output_path = os.path.join(output_dir, output_filename)
        cv2.imwrite(output_path, result_image_bgr)
        
        # 切割并保存车牌图像
        cropped_plate = img[y1:y2, x1:x2]
        cropped_filename = f"{timestamp}_1.jpg"
        cropped_path = os.path.join(CROPPED_PLATE_DIR, cropped_filename)
        cv2.imwrite(cropped_path, cropped_plate)
        logger.info(f"切割后的车牌图片已保存至: {cropped_path}")
        
        # 执行OCR识别
        ocr_res_img_path = None
        plate_text = ""
        try:
            ocr_res_img_path, plate_text, ocr_confidence = recognize_license_plate(cropped_path, os.path.dirname(cropped_path))
            logger.info(f"OCR识别结果: {plate_text}, 置信度: {ocr_confidence}")
            
            # 将OCR结果添加到检测结果中
            detections[0]['plate_text'] = plate_text
            detections[0]['ocr_confidence'] = ocr_confidence
        except Exception as e:
            logger.error(f"OCR识别失败: {str(e)}")
        
        logger.info("车牌检测完成")
        
        return output_path, cropped_path, ocr_res_img_path, plate_text, detections
        
    except Exception as e:
        error_msg = f"车牌检测失败: {str(e)}"
        logger.error(error_msg)
        raise

if __name__ == "__main__":
    # 测试代码
    test_image_path = os.path.join("uploads", "preprocessed_images", "test.jpg")
    
    try:
        # 测试车辆检测
        vehicle_result = run_vehicle_detection(test_image_path, "vehicle_detection", MARKED_VEHICLE_DIR)
        logger.info(f"车辆检测测试成功: {vehicle_result}")
        
        # 测试车牌检测
        plate_result = run_plate_detection(test_image_path, "plate_detection", MARKED_PLATE_DIR)
        logger.info(f"车牌检测测试成功: {plate_result}")
    except Exception as e:
        logger.error(f"测试失败: {str(e)}")

def run_vehicle_plate_detection(preprocessed_image_path, output_dir):
    """
    执行车辆检测后在车辆区域内进行车牌检测
    
    Args:
        preprocessed_image_path (str): 预处理后的图像路径
        output_dir (str): 输出目录
    
    Returns:
        tuple: (标注后的图像路径, 检测结果列表, 车牌文本)
    """
    logger.info(f"开始车辆-车牌联合检测: {preprocessed_image_path}")
    
    try:
        # 1. 加载车辆识别模型和车牌检测模型
        vehicle_model = load_vehicle_model()
        plate_model = load_license_plate_model()
        green_plate_model = load_green_license_plate_model()
        
        # 2. 读取图像
        img = cv2.imread(preprocessed_image_path)
        if img is None:
            error_msg = f"无法读取图像: {preprocessed_image_path}"
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 3. 执行车辆检测
        vehicle_results = vehicle_model.predict(img)
        
        # 4. 解析车辆检测结果
        vehicle_detections = []
        for box in vehicle_results[0].boxes:
            x1, y1, x2, y2 = box.xyxy[0].tolist()
            confidence = box.conf[0].item()
            class_id = int(box.cls[0].item())
            class_name = vehicle_results[0].names[class_id]
            
            vehicle_detections.append({
                'type': '车辆',
                'class': class_name,
                'confidence': confidence,
                'bbox': [x1, y1, x2, y2]
            })
        
        # 5. 创建结果图像的副本
        result_image = img.copy()
        all_detections = vehicle_detections.copy()
        plate_text = ""
        
        # 6. 对每个检测到的车辆区域进行车牌检测
        for vehicle in vehicle_detections:
            # 获取车辆边界框
            v_x1, v_y1, v_x2, v_y2 = map(int, vehicle['bbox'])
            
            # 确保边界在图像范围内
            v_x1 = max(0, v_x1)
            v_y1 = max(0, v_y1)
            v_x2 = min(img.shape[1], v_x2)
            v_y2 = min(img.shape[0], v_y2)
            
            # 裁剪车辆区域
            vehicle_roi = img[v_y1:v_y2, v_x1:v_x2]
            
            if vehicle_roi.size == 0:
                logger.warning(f"车辆区域无效: {v_x1},{v_y1},{v_x2},{v_y2}")
                continue
                
            # 保存临时车辆图像用于车牌检测
            temp_vehicle_path = os.path.join(output_dir, "temp_vehicle.jpg")
            cv2.imwrite(temp_vehicle_path, vehicle_roi)
            
            try:
                # 首先使用普通车牌模型
                instances, _ = inference_plate(plate_model, temp_vehicle_path)
                
                # 先判断普通模型的结果置信度
                normal_plate_detected = False
                normal_plate_confidence = 0
                
                if len(instances) > 0:
                    normal_max_score_index = torch.argmax(instances.scores)
                    normal_plate_confidence = instances.scores[normal_max_score_index].item()
                    
                    # 如果普通车牌模型的置信度足够高，则使用普通模型结果
                    if normal_plate_confidence >= CONFIDENCE_THRESHOLD:
                        normal_plate_detected = True
                        logger.info(f"普通车牌模型检测到车牌，置信度: {normal_plate_confidence:.3f}")
                    else:
                        logger.warning(f"普通车牌模型置信度过低: {normal_plate_confidence:.3f} < {CONFIDENCE_THRESHOLD}")
                
                # 如果普通模型没有可靠结果，尝试新能源车牌模型
                green_instances = None
                green_plate_confidence = 0
                
                if not normal_plate_detected:
                    logger.info("尝试使用新能源车牌模型...")
                    green_instances, _ = inference_plate(green_plate_model, temp_vehicle_path)
                    
                    if len(green_instances) > 0:
                        green_max_score_index = torch.argmax(green_instances.scores)
                        green_plate_confidence = green_instances.scores[green_max_score_index].item()
                        logger.info(f"新能源车牌模型检测到车牌，置信度: {green_plate_confidence:.3f}")
                
                # 选择置信度更高的模型结果
                final_instances = None
                if green_instances is not None and green_plate_confidence > normal_plate_confidence:
                    final_instances = green_instances
                    logger.info("选用新能源车牌模型结果")
                else:
                    final_instances = instances
                    logger.info("选用普通车牌模型结果")
                
                # 如果没有可用的结果，跳过当前车辆
                if len(final_instances) == 0:
                    logger.warning("两种车牌模型均未检测到车牌")
                    continue
                
                # 获取置信度最高的检测结果
                max_score_index = torch.argmax(final_instances.scores)
                max_score = final_instances.scores[max_score_index].item()
                bbox = final_instances.pred_boxes.tensor[max_score_index].numpy().astype(int)
                p_x1, p_y1, p_x2, p_y2 = bbox
                
                # 将车牌坐标转换回原始图像坐标
                p_x1 += v_x1
                p_y1 += v_y1
                p_x2 += v_x1
                p_y2 += v_y1
                
                # 创建车牌检测结果
                plate_detection = {
                    'type': '车牌',
                    'confidence': max_score,
                    'bbox': [float(p_x1), float(p_y1), float(p_x2), float(p_y2)]
                }
                
                # 切割并保存车牌图像
                cropped_plate = img[p_y1:p_y2, p_x1:p_x2]
                timestamp = datetime.now().strftime('%m%d%H%M%S')
                cropped_filename = f"{timestamp}_plate.jpg"
                cropped_path = os.path.join(CROPPED_PLATE_DIR, cropped_filename)
                cv2.imwrite(cropped_path, cropped_plate)
                
                # 执行OCR识别
                try:
                    ocr_res_img_path, plate_text, ocr_confidence = recognize_license_plate(
                        cropped_path, os.path.dirname(cropped_path)
                    )
                    logger.info(f"OCR识别结果: {plate_text}, 置信度: {ocr_confidence}")
                    
                    # 将OCR结果添加到检测结果中
                    plate_detection['plate_text'] = plate_text
                    plate_detection['ocr_confidence'] = ocr_confidence
                except Exception as e:
                    logger.error(f"OCR识别失败: {str(e)}")
                
                # 添加到总检测结果中
                all_detections.append(plate_detection)
            except Exception as e:
                logger.error(f"车辆区域内车牌检测失败: {str(e)}")
            finally:
                # 删除临时文件
                if os.path.exists(temp_vehicle_path):
                    os.remove(temp_vehicle_path)
        
        # 7. 在结果图像上绘制所有检测框
        timestamp = datetime.now().strftime('%m%d%H%M%S')
        output_filename = f"{timestamp}_combined.jpg"
        output_path = os.path.join(output_dir, output_filename)
        
        # 绘制检测框
        for det in all_detections:
            bbox = det['bbox']
            x1, y1, x2, y2 = map(int, bbox)
            
            # 为不同类型的检测设置不同的颜色
            if det['type'] == '车辆':
                color = (0, 255, 0)  # 绿色
            else:
                color = (0, 0, 255)  # 红色
            
            # 绘制边界框
            cv2.rectangle(result_image, (x1, y1), (x2, y2), color, 2)
            
            # 准备标签文本
            if det['type'] == '车辆':
                label = f"Vehicle {det['class']} ({det['confidence']:.2f})"
            else:
                label = f"License Plate ({det['confidence']:.2f})"
                if 'plate_text' in det:
                    label = f"{label} - {det['plate_text']}"
            
            # 绘制标签背景
            (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            cv2.rectangle(result_image, (x1, y1 - label_height - 10), (x1 + label_width, y1), color, -1)
            
            # 绘制标签文本
            cv2.putText(result_image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
        
        # 8. 保存结果图像
        cv2.imwrite(output_path, result_image)
        logger.info(f"联合检测结果已保存至: {output_path}")
        
        return output_path, all_detections, plate_text
        
    except Exception as e:
        error_msg = f"车辆-车牌联合检测失败: {str(e)}"
        logger.error(error_msg)
        raise

def run_multi_vehicle_detection(preprocessed_image_path, selected_model_name, output_dir):
    """
    执行多车检测
    
    Args:
        preprocessed_image_path (str): 预处理后的图像路径
        selected_model_name (str): 选择的模型名称
        output_dir (str): 输出目录
    
    Returns:
        tuple: (标注后的图像路径, 检测结果列表)
    """
    logger.info(f"开始多车检测: {preprocessed_image_path}, 使用模型: {selected_model_name}")
    
    try:
        # 加载多车检测模型
        model = load_multi_vehicle_model()
        
        # 读取图像
        img = cv2.imread(preprocessed_image_path)
        if img is None:
            error_msg = f"无法读取图像: {preprocessed_image_path}"
            logger.error(error_msg)
            raise ValueError(error_msg)
        
        # 使用模型进行预测
        results = model.predict(img)
        
        # 生成输出文件名（使用时间戳）
        timestamp = datetime.now().strftime('%m%d%H%M%S')
        output_filename = f"{timestamp}_0.jpg"  # 0 表示标注后的图片
        output_path = os.path.join(output_dir, output_filename)
        
        # 在图像上绘制标注结果
        annotated_img = results[0].plot()
        
        # 保存标注后的图像
        cv2.imwrite(output_path, annotated_img)
        
        # 解析检测结果为统一格式
        detections = []
        for box in results[0].boxes:
            x1, y1, x2, y2 = box.xyxy[0].tolist()
            confidence = box.conf[0].item()
            class_id = int(box.cls[0].item())
            class_name = results[0].names[class_id]
            
            detections.append({
                'type': '车辆',
                'class': class_name,
                'confidence': confidence,
                'bbox': [x1, y1, x2, y2]
            })
        
        logger.info(f"多车检测完成，检测到 {len(detections)} 个目标")
        return output_path, detections
        
    except Exception as e:
        error_msg = f"多车检测失败: {str(e)}"
        logger.error(error_msg)
        raise 