import time

import cv2
import h5py
import numpy as np
from tqdm import tqdm
from ultralytics import YOLO

# ========== 参数配置 ==========
HDF5_PATH = 'E:/EFY/003无人机数据/DJI_0813_4.h5'
INPUT_VIDEO_PATH = 'E:/EFY/002无人机视频/DJI_0813.MP4'
OUTPUT_VIDEO_PATH = 'E:/EFY/003无人机数据/DJI_0813_HDF5_300_1280,720.MP4'
MODEL_PATH = 'weights/visdrone+mydata100+crowdhuman200.pt'
CONF_THRESHOLD = 0.4

# ========== 主处理函数 ==========
def infer_and_render(hdf5_path, input_video_path, output_video_path, model_path):
    model = YOLO(model_path)
    cap = cv2.VideoCapture(input_video_path)
    h5_file = h5py.File(hdf5_path, 'r')

    width = 1280  # 因为你前面已 resize 为 1280x720
    height = 720
    fps = cap.get(cv2.CAP_PROP_FPS)
    writer = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))

    frame_idx = 0
    total_frames = len(h5_file.keys())

    with tqdm(total=total_frames, desc="推理中") as pbar:
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            frame = cv2.resize(frame, (1280, 720))  # 保证一致性
            frame_key = f'frame_{frame_idx}'
            if frame_key not in h5_file:
                break

            patches = []
            positions = []

            for patch_name in h5_file[frame_key]:
                dset = h5_file[frame_key][patch_name]
                img = np.array(dset)
                ox = dset.attrs['origin_x']
                oy = dset.attrs['origin_y']
                patches.append(img)
                positions.append((ox, oy))

            results = model.predict(patches, conf=CONF_THRESHOLD, verbose=False)

            for det, (ox, oy) in zip(results, positions):
                for box in det.boxes.data.cpu().numpy():
                    x1, y1, x2, y2, conf, cls = box
                    x1 = int(x1 + ox)
                    y1 = int(y1 + oy)
                    x2 = int(x2 + ox)
                    y2 = int(y2 + oy)
                    label = f'{model.names[int(cls)]} {conf:.2f}'
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                    cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

            writer.write(frame)
            frame_idx += 1
            pbar.update(1)

    cap.release()
    writer.release()
    h5_file.close()
    print(f"[✅ DONE] 推理完成，输出视频保存在 {output_video_path}")

# ========== 主程序入口 ==========
if __name__ == '__main__':
    start_time = time.time()

    infer_and_render(HDF5_PATH, INPUT_VIDEO_PATH, OUTPUT_VIDEO_PATH, MODEL_PATH)

    # 格式化输出（包含小数秒）
    print(f"\n程序开始时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}")
    print(f"程序结束时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))}")