# -*- coding: utf-8 -*-
"""
@author: Trae
@contact: traetai@gmail.com
@software: TraeAI
@file: main.py
@time: 2024/7/29 17:00
@desc: 视频目标检测系统主程序
"""
import cv2
import time
from pathlib import Path

from src.utils.config_loader import load_config
from src.core.detector import Detector
from src.utils.visualization import draw_detections

def main():
    """主函数，执行视频处理流程。"""
    # 1. 加载配置
    try:
        config = load_config()
    except FileNotFoundError as e:
        print(f"Error: {e}")
        return

    # 提取配置项
    model_cfg = config['model']
    detection_cfg = config['detection']
    video_cfg = config['video']
    output_cfg = config['output']

    # 2. 初始化检测器
    detector = Detector(model_path=model_cfg['path'], device=detection_cfg['device'])

    # 3. 打开视频源
    video_source = video_cfg['source']
    cap = cv2.VideoCapture(video_source)
    if not cap.isOpened():
        print(f"Error: Could not open video source: {video_source}")
        return

    # 获取视频信息用于输出
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # 4. 配置视频输出
    writer = None
    if output_cfg['save']:
        output_path = Path(output_cfg['filepath'])
        output_path.parent.mkdir(parents=True, exist_ok=True) # 确保输出目录存在
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(str(output_path), fourcc, fps, (frame_width, frame_height))

    # 5. 逐帧处理
    frame_count = 0
    start_time = time.time()

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # 执行检测
        detections = detector.detect(
            frame,
            confidence_threshold=detection_cfg['confidence_threshold'],
            target_classes=detection_cfg['target_classes']
        )

        # 结果可视化
        annotated_frame = draw_detections(frame.copy(), detections)

        # 显示实时处理速率
        frame_count += 1
        elapsed_time = time.time() - start_time
        current_fps = frame_count / elapsed_time if elapsed_time > 0 else 0
        cv2.putText(annotated_frame, f'FPS: {current_fps:.2f}', (10, 30), 
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

        # 显示结果
        if output_cfg['show_video']:
            cv2.imshow('YOLOv8 Detections', annotated_frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # 保存视频帧
        if writer:
            writer.write(annotated_frame)

    # 6. 释放资源
    cap.release()
    if writer:
        writer.release()
    if output_cfg['show_video']:
        cv2.destroyAllWindows()

    print("Processing finished.")

if __name__ == '__main__':
    main()