# yolov5_video_detection.py
import cv2
import numpy as np
import onnxruntime as ort
import os
import time
import argparse
from collections import defaultdict
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler("video_detection.log")
    ]
)
logger = logging.getLogger("YOLOv5VideoDetection")

# COCO 类别名称（80类）
CLASS_NAMES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
    'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat',
    'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
    'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
    'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
    'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
    'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]

# 优化参数设置
CONF_THRESHOLD = 0.25   # 默认置信度阈值0.25
IOU_THRESHOLD = 0.4     # IoU阈值
MAX_DETECTIONS = 20     # 最大检测数量
BOX_THICKNESS = 2       # 边界框线宽
FONT_SIZE = 0.7         # 标签字体大小

# 针对人物检测优化显示
PERSON_COLOR = (0, 165, 255)  # 橙黄色边界框
ACCESSORY_COLOR = (255, 100, 0)  # 蓝色为配件

def initialize_onnx_session(model_path):
    """初始化ONNX Runtime会话"""
    try:
        logger.info(f"[1/6] 正在初始化ONNX Runtime会话...")
        logger.info(f"模型路径: {model_path}")
        
        # 使用CPU提供者
        providers = ['CPUExecutionProvider']
        session = ort.InferenceSession(model_path, providers=providers)
        
        # 获取输入信息
        input_name = session.get_inputs()[0].name
        input_shape = session.get_inputs()[0].shape
        
        # 确保输入尺寸是整数
        input_height = int(input_shape[2])
        input_width = int(input_shape[3])
        input_size = (input_width, input_height)
        
        logger.info(f"✅ ONNX Runtime模型加载成功! 输入尺寸: {input_width}x{input_height}")
        return session, input_name, input_size
    except Exception as e:
        logger.error(f"❌ 无法加载模型: {str(e)}")
        return None, None, None

def preprocess_frame(frame, input_size):
    """预处理视频帧"""
    try:
        # 保存原始尺寸
        orig_h, orig_w = frame.shape[:2]
        
        # 确保输入尺寸是整数
        input_width, input_height = input_size
        
        # 调整大小 - 保持宽高比
        scale = min(input_width / orig_w, input_height / orig_h)
        new_w = int(orig_w * scale)
        new_h = int(orig_h * scale)
        
        # 创建填充图像
        padded_frame = np.full((input_height, input_width, 3), 114, dtype=np.uint8)
        resized_frame = cv2.resize(frame, (new_w, new_h))
        padded_frame[0:new_h, 0:new_w] = resized_frame
        
        # 转换为RGB并归一化
        img = cv2.cvtColor(padded_frame, cv2.COLOR_BGR2RGB)
        img = img.astype(np.float32) / 255.0
        
        # 转换为CHW格式
        img = np.transpose(img, (2, 0, 1))
        
        # 添加批次维度
        input_tensor = np.expand_dims(img, axis=0)
        
        return input_tensor, frame, (orig_w, orig_h), scale
    except Exception as e:
        logger.error(f"❌ 帧处理错误: {str(e)}")
        return None, None, None, None

def run_inference(session, input_name, input_tensor):
    """运行模型推理"""
    try:
        start_time = time.time()
        outputs = session.run(None, {input_name: input_tensor})
        inference_time = time.time() - start_time
        return outputs, inference_time
    except Exception as e:
        logger.error(f"❌ 推理失败: {str(e)}")
        return None, 0

def calculate_iou(box1, box2):
    """计算两个边界框的IoU"""
    x1, y1, w1, h1 = box1
    x2, y2, w2, h2 = box2
    
    # 计算交集区域
    inter_x1 = max(x1, x2)
    inter_y1 = max(y1, y2)
    inter_x2 = min(x1 + w1, x2 + w2)
    inter_y2 = min(y1 + h1, y2 + h2)
    
    # 检查是否有交集
    if inter_x2 < inter_x1 or inter_y2 < inter_y1:
        return 0.0
    
    # 计算交集和并集面积
    inter_area = (inter_x2 - inter_x1) * (inter_y2 - inter_y1)
    box1_area = w1 * h1
    box2_area = w2 * h2
    union_area = box1_area + box2_area - inter_area
    
    return inter_area / union_area

def apply_nms(detections, iou_threshold=IOU_THRESHOLD):
    """应用非极大值抑制算法"""
    # 按置信度排序
    detections = sorted(detections, key=lambda x: x['confidence'], reverse=True)
    
    # 用于存储最终检测结果
    final_detections = []
    
    while detections:
        # 取出置信度最高的检测
        current = detections.pop(0)
        final_detections.append(current)
        
        # 计算与剩余检测的IoU
        i = 0
        while i < len(detections):
            iou = calculate_iou(current['box'], detections[i]['box'])
            if iou > iou_threshold:
                # 移除重叠度高的检测
                detections.pop(i)
            else:
                i += 1
                
    return final_detections

def postprocess_output(outputs, orig_size, scale, conf_threshold=CONF_THRESHOLD):
    """后处理模型输出"""
    if outputs is None or len(outputs) == 0:
        return []
    
    # YOLOv5 输出格式: [1, num_detections, 85]
    predictions = outputs[0][0]  # 取出第一个输出
    
    orig_w, orig_h = orig_size
    detections = []
    
    for detection in predictions:
        confidence = detection[4]
        if confidence < conf_threshold:
            continue
            
        # 获取类别分数
        scores = detection[5:85]
        class_id = np.argmax(scores)
        class_score = scores[class_id]
        final_score = confidence * class_score
        
        if final_score < conf_threshold:
            continue
            
        # 获取边界框坐标 (center_x, center_y, width, height)
        cx, cy, w, h = detection[:4]
        
        # 转换为原始图像坐标
        x = int((cx - w/2) * orig_w)
        y = int((cy - h/2) * orig_h)
        width = int(w * orig_w)
        height = int(h * orig_h)
        
        # 确保坐标在图像范围内
        x = max(0, min(x, orig_w - 1))
        y = max(0, min(y, orig_h - 1))
        width = min(width, orig_w - x)
        height = min(height, orig_h - y)
        
        detections.append({
            'class_id': int(class_id),
            'confidence': float(final_score),
            'box': [x, y, width, height],
            'class_name': CLASS_NAMES[class_id] if class_id < len(CLASS_NAMES) else f"类别{class_id}"
        })
    
    # 应用非极大值抑制
    filtered_detections = apply_nms(detections)
    
    # 限制最大检测数量
    return filtered_detections[:MAX_DETECTIONS]

def get_detection_color(class_name):
    """为检测框选择合适的颜色"""
    if class_name == 'person':
        return PERSON_COLOR
    elif class_name in ['backpack', 'handbag', 'sports ball', 'suitcase']:
        return ACCESSORY_COLOR
    else:
        return (0, 255, 0)  # 其他物品绿色

def draw_detection_boxes(frame, detections, fps, inference_time):
    """在帧上绘制边界框和标签"""
    result = frame.copy()
    
    # 绘制性能信息
    cv2.putText(result, f"FPS: {fps:.1f}", 
               (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    cv2.putText(result, f"Inference: {inference_time*1000:.1f}ms", 
               (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    
    # 绘制检测结果
    class_counts = defaultdict(int)
    for det in detections:
        class_name = det['class_name']
        class_counts[class_name] += 1
        
        # 获取边界框位置
        x, y, w, h = det['box']
        conf = det['confidence']
        
        # 选择颜色
        color = get_detection_color(class_name)
        
        # 绘制边界框
        cv2.rectangle(result, (x, y), (x + w, y + h), color, BOX_THICKNESS)
        
        # 准备标签文本
        label = f"{class_name} {conf:.2f}"
        
        # 计算标签背景尺寸
        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, 2)
        
        # 绘制标签背景
        cv2.rectangle(result, 
                      (x, y - label_height - 8),
                      (x + label_width, y),
                      color, -1)
        
        # 绘制标签文本
        cv2.putText(result, label, 
                   (x + 3, y - 6), 
                   cv2.FONT_HERSHEY_SIMPLEX, FONT_SIZE, 
                   (255, 255, 255), 2, cv2.LINE_AA)
    
    # 添加检测统计
    summary_y = 100
    for class_name, count in class_counts.items():
        summary_text = f"{class_name}: {count}"
        cv2.putText(result, summary_text,
                   (10, summary_y),
                   cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                   (0, 0, 255), 2)
        summary_y += 30
    
    return result

def process_video(model_path, video_path, output_path, conf_threshold=CONF_THRESHOLD, skip_frames=0):
    """处理视频文件"""
    # 初始化ONNX Runtime会话
    session, input_name, input_size = initialize_onnx_session(model_path)
    if session is None:
        return
    
    # 打开视频文件
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        logger.error(f"❌ 无法打开视频文件: {video_path}")
        return
    
    # 获取视频信息
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    
    logger.info(f"✅ 视频加载成功: {video_path}")
    logger.info(f"  尺寸: {frame_width}x{frame_height}")
    logger.info(f"  帧率: {fps:.2f} FPS")
    logger.info(f"  总帧数: {total_frames}")
    
    # 创建视频写入器
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
    
    # 性能统计
    frame_count = 0
    total_inference_time = 0
    start_time = time.time()
    
    # 处理每一帧
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        frame_count += 1
        
        # 跳过指定帧数（用于加速处理）
        if skip_frames > 0 and frame_count % (skip_frames + 1) != 0:
            continue
        
        # 显示进度
        if frame_count % 10 == 0:
            progress = frame_count / total_frames * 100
            logger.info(f"处理进度: {progress:.1f}% ({frame_count}/{total_frames} 帧)")
        
        # 预处理帧
        input_tensor, orig_frame, orig_size, scale = preprocess_frame(frame, input_size)
        if input_tensor is None:
            continue
        
        # 运行推理
        outputs, inference_time = run_inference(session, input_name, input_tensor)
        total_inference_time += inference_time
        
        # 后处理输出
        detections = postprocess_output(outputs, orig_size, scale, conf_threshold)
        
        # 计算实时FPS
        current_time = time.time()
        elapsed_time = current_time - start_time
        current_fps = frame_count / elapsed_time if elapsed_time > 0 else 0
        
        # 绘制检测结果
        result_frame = draw_detection_boxes(orig_frame, detections, current_fps, inference_time)
        
        # 写入输出视频
        out.write(result_frame)
        
        # 显示实时结果（可选）
        # cv2.imshow('YOLOv5 视频检测', cv2.resize(result_frame, (1280, 720)))
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    
    # 释放资源
    cap.release()
    out.release()
    # cv2.destroyAllWindows()
    
    # 计算平均性能
    avg_inference_time = total_inference_time / frame_count * 1000 if frame_count > 0 else 0
    avg_fps = frame_count / (time.time() - start_time) if frame_count > 0 else 0
    
    logger.info(f"✅ 视频处理完成! 保存到: {output_path}")
    logger.info(f"  处理帧数: {frame_count}")
    logger.info(f"  平均推理时间: {avg_inference_time:.2f}ms")
    logger.info(f"  平均FPS: {avg_fps:.2f}")
    
    return True

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="YOLOv5 视频目标检测")
    parser.add_argument("-m", "--model", required=True, help="ONNX模型文件路径")
    parser.add_argument("-i", "--input", required=True, help="输入视频文件路径")
    parser.add_argument("-o", "--output", required=True, help="输出视频文件路径")
    parser.add_argument("-c", "--confidence", type=float, default=CONF_THRESHOLD, 
                        help=f"置信度阈值 (默认: {CONF_THRESHOLD})")
    parser.add_argument("-s", "--skip", type=int, default=0, 
                        help="跳帧数 (默认: 0, 不跳帧)")
    
    args = parser.parse_args()
    
    # 检查文件是否存在
    if not os.path.exists(args.model):
        logger.error(f"❌ 错误: 模型文件 {args.model} 不存在")
        exit(1)
    
    if not os.path.exists(args.input):
        logger.error(f"❌ 错误: 输入视频文件 {args.input} 不存在")
        exit(1)
    
    # 创建输出目录
    output_dir = os.path.dirname(args.output)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 处理视频
    process_video(
        model_path=args.model,
        video_path=args.input,
        output_path=args.output,
        conf_threshold=args.confidence,
        skip_frames=args.skip
    )

if __name__ == "__main__":
    main()