import cv2
import numpy as np
import onnxruntime as ort
import argparse
from pathlib import Path

# -------------------------------
# 配置参数
# -------------------------------
INPUT_IMAGE = r"D:\20250906111554555_092240102.jpg"
ONNX_MODEL = r"D:\CodeCNN\PlateRecognition\pd_yolov7\weights\yolov7plate.onnx"
CONF_THRESHOLD = 0.25
IOU_THRESHOLD = 0.45
INPUT_WIDTH = 640
INPUT_HEIGHT = 640

# 只关注 traffic light 类别
TARGET_CLASS = "traffic light"

CLASSES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
    'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
    'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
    'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
    'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
    'hair drier', 'toothbrush'
]

# 颜色字典（BGR）
COLORS = {i: [np.random.randint(0, 255) for _ in range(3)] for i in range(len(CLASSES))}
CLASS_ID_MAP = {name: idx for idx, name in enumerate(CLASSES)}  # 类名 → ID 映射


# -------------------------------
# 图像预处理
# -------------------------------
def preprocess(image):
    """缩放并填充图像为模型输入尺寸"""
    h, w = image.shape[:2]
    new_h, new_w = INPUT_HEIGHT, INPUT_WIDTH
    ratio = min(new_h / h, new_w / w)
    pad_h = int(h * ratio)
    pad_w = int(w * ratio)

    resized = cv2.resize(image, (pad_w, pad_h), interpolation=cv2.INTER_LINEAR)
    padded = np.full((new_h, new_w, 3), 114, dtype=np.uint8)
    dh = (new_h - pad_h) // 2
    dw = (new_w - pad_w) // 2
    padded[dh:dh + pad_h, dw:dw + pad_w] = resized

    blob = padded.transpose(2, 0, 1)[np.newaxis, ...].astype(np.float32) / 255.0
    return blob, ratio, dh, dw


# -------------------------------
# 后处理：解析输出 & NMS
# -------------------------------
def postprocess(outputs, ratio, dh, dw, original_shape):
    """解析模型输出，执行 NMS，返回检测框、分数、类别"""
    output = outputs[0]

    # 统一输出形状为 (N, 85)
    if len(output.shape) == 4:
        output = output.squeeze(0).squeeze(0)
    elif len(output.shape) == 3:
        output = output.squeeze(0)
    elif len(output.shape) == 2:
        pass
    else:
        raise ValueError(f"Unexpected output shape: {output.shape}")

    output = np.array(output)
    boxes, scores, class_ids = [], [], []

    target_class_id = CLASS_ID_MAP[TARGET_CLASS]

    for row in output:
        confidence = row[4]
        if confidence < CONF_THRESHOLD:
            continue

        class_scores = row[5:]
        max_score = np.max(class_scores)
        class_id = np.argmax(class_scores)

        final_confidence = confidence * max_score
        if final_confidence < CONF_THRESHOLD:
            continue

        # 只保留 traffic light
        if class_id != target_class_id:
            continue

        x_c, y_c, width, height = row[:4]
        left = int((x_c - width / 2 - dw) / ratio)
        top = int((y_c - height / 2 - dh) / ratio)
        right = int((x_c + width / 2 - dw) / ratio)
        bottom = int((y_c + height / 2 - dh) / ratio)

        # 边界检查
        left = max(0, left)
        top = max(0, top)
        right = min(original_shape[1], right)
        bottom = min(original_shape[0], bottom)

        boxes.append([left, top, right - left, bottom - top])
        scores.append(final_confidence)
        class_ids.append(class_id)

    boxes = np.array(boxes, dtype=np.int32)
    scores = np.array(scores, dtype=np.float32)
    class_ids = np.array(class_ids, dtype=np.int32)

    if len(boxes) == 0:
        return [], [], []

    indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), CONF_THRESHOLD, IOU_THRESHOLD)
    final_boxes, final_scores, final_classes = [], [], []

    if len(indices) > 0:
        indices = indices.flatten()
        for i in indices:
            final_boxes.append(boxes[i])
            final_scores.append(scores[i])
            final_classes.append(class_ids[i])

    return final_boxes, final_scores, final_classes


# -------------------------------
# 裁剪并保存目标
# -------------------------------
def crop_and_save_targets(image, boxes, scores, output_dir="cropped"):
    """
    将检测到的目标裁剪并保存为单独图像
    文件命名格式: cropped_traffic_light_0.jpg, 1, ...
    """
    output_dir = Path(output_dir)
    output_dir.mkdir(exist_ok=True)

    cropped_paths = []
    for idx, (box, score) in enumerate(zip(boxes, scores)):
        left, top, width, height = box
        if width <= 0 or height <= 0:
            continue

        crop = image[top:top + height, left:left + width]
        filename = output_dir / f"cropped_{TARGET_CLASS.replace(' ', '_')}_{idx}.jpg"
        cv2.imwrite(str(filename), crop)
        cropped_paths.append(str(filename))
        print(f"已保存: {filename} (置信度: {score:.2f})")

    return cropped_paths


# -------------------------------
# 绘制检测结果（可选可视化）
# -------------------------------
def draw_detections(image, boxes, scores, class_ids):
    """在图像上绘制边界框和标签"""
    img_copy = image.copy()
    cls_id = CLASS_ID_MAP[TARGET_CLASS]
    color = COLORS[cls_id]

    for box, score in zip(boxes, scores):
        left, top, width, height = box
        label = f"{TARGET_CLASS}: {score:.2f}"
        cv2.rectangle(img_copy, (left, top), (left + width, top + height), color, 2)

        (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
        cv2.rectangle(img_copy, (left, top - h - 10), (left + w, top), color, -1)
        cv2.putText(img_copy, label, (left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)

    return img_copy


# -------------------------------
# 主函数
# -------------------------------
def main():
    parser = argparse.ArgumentParser(description="YOLOv7 ONNX 检测 traffic light 并裁剪")
    parser.add_argument("--image", default=INPUT_IMAGE, help="输入图片路径")
    parser.add_argument("--model", default=ONNX_MODEL, help="ONNX 模型路径")
    args = parser.parse_args()

    img_path = Path(args.image)
    if not img_path.exists():
        raise FileNotFoundError(f"图像未找到: {img_path}")
    image = cv2.imread(str(img_path))
    if image is None:
        raise ValueError("图像加载失败，请检查路径或文件格式")

    model_path = Path(args.model)
    if not model_path.exists():
        raise FileNotFoundError(f"ONNX 模型未找到: {model_path}")

    # 加载 ONNX 模型（优先使用 GPU）
    providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
    try:
        session = ort.InferenceSession(str(model_path), providers=providers)
        device = "GPU" if "CUDAExecutionProvider" in session.get_providers() else "CPU"
        print(f"✅ 模型加载成功，使用设备: {device}")
    except Exception as e:
        print("❌ 模型加载失败:", e)
        return

    # 预处理
    input_tensor, ratio, dh, dw = preprocess(image)

    # 推理
    input_name = session.get_inputs()[0].name
    outputs = session.run(None, {input_name: input_tensor})

    # 后处理
    boxes, scores, class_ids = postprocess(outputs, ratio, dh, dw, image.shape)

    print(f"🔍 检测到 {len(boxes)} 个 '{TARGET_CLASS}' 目标")

    if len(boxes) == 0:
        print("⚠️ 未检测到任何 traffic light")
        return

    # 裁剪并保存
    cropped_files = crop_and_save_targets(image, boxes, scores, output_dir=f"cropped_{img_path.stem}")
    print(f"✅ 共裁剪并保存 {len(cropped_files)} 个目标")

    # 可视化结果（可选）
    result_img = draw_detections(image, boxes, scores, class_ids)
    output_img_path = f"result_{img_path.stem}.jpg"
    cv2.imwrite(output_img_path, result_img)
    print(f"🎨 结果图像已保存至: {output_img_path}")

    # 显示（仅调试时使用）
    cv2.imshow("Detection Result", result_img)
    print("📌 按任意键关闭窗口...")
    cv2.waitKey(0)
    cv2.destroyAllWindows()


if __name__ == "__main__":
    main()