import cv2
from ultralytics import YOLO
import time
import subprocess
import json
import os
from typing import List, Tuple


# ==============================
# 🛠️ 工具函数定义（全部放在文件开头）
# ==============================

def format_time(seconds: float) -> str:
    h = int(seconds // 3600)
    m = int((seconds % 3600) // 60)
    s = seconds % 60
    return f"{h:02}:{m:02}:{s:06.3f}"


def merge_intervals(intervals: List[Tuple[float, float]]) -> List[Tuple[float, float]]:
    if not intervals:
        return []
    intervals.sort()
    merged = [intervals[0]]
    for curr_start, curr_end in intervals[1:]:
        prev_start, prev_end = merged[-1]
        if curr_start <= prev_end:
            merged[-1] = (prev_start, max(prev_end, curr_end))
        else:
            merged.append((curr_start, curr_end))
    return merged


def get_video_duration(video_path: str) -> float:
    cmd = [
        'ffprobe',
        '-v', 'quiet',
        '-of', 'json',
        '-show_entries', 'format=duration',
        video_path
    ]
    try:
        result = subprocess.run(cmd, capture_output=True, text=True, check=True)
        info = json.loads(result.stdout)
        return float(info['format']['duration'])
    except Exception as e:
        raise RuntimeError(f"无法获取视频时长: {e}")


def extract_clip(input_video: str, start: float, end: float, output_file: str) -> bool:
    duration = end - start
    cmd = [
        'ffmpeg',
        '-y',
        '-i', input_video,
        '-ss', str(start),
        '-t', str(duration),
        '-c:v', 'copy',           # 视频直接复制
        '-c:a', 'aac',            # ✅ 音频转为 AAC（MP4 标准格式）
        '-b:a', '128k',           # 音频码率
        '-avoid_negative_ts', 'make_zero',
        '-fflags', '+genpts',
        output_file
    ]
    result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    return result.returncode == 0


def concat_clips(file_list: List[str], output_video: str) -> bool:
    list_file = "temp_clip_list.txt"
    with open(list_file, "w") as f:
        for file in file_list:
            f.write(f"file '{file}'\n")

    cmd = [
        'ffmpeg',
        '-y',
        '-f', 'concat',
        '-safe', '0',
        '-i', list_file,
        '-c:v', 'copy',
        '-c:a', 'copy',
        output_video
    ]
    result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if os.path.exists(list_file):
        os.remove(list_file)
    return result.returncode == 0


def cleanup_temp_files(temp_files: List[str]):
    for file in temp_files:
        if os.path.exists(file):
            try:
                os.remove(file)
            except:
                pass


# ==============================
# 🎯 主程序开始
# ==============================

# -------------------------------
# 配置参数
# -------------------------------
VIDEO_PATH = ("merge.mp4")
OUTPUT_VIDEO = "output_human_clips.mp4"
MODEL_NAME = "yolov8n.pt"
CONFIDENCE_THRESHOLD = 0.4
CLASSES = [0]  # 只检测人
BUFFER_SECONDS = 10  # 前后各保留 30 秒
SKIP_FRAMES = 3
# -------------------------------

if not os.path.exists(VIDEO_PATH):
    raise FileNotFoundError(f"视频文件不存在: {VIDEO_PATH}")

print("🚀 开始处理视频...")
total_start_time = time.time()

# ==============================
# 1. 加载模型
# ==============================
print("🔧 加载 YOLO 模型...")
model_load_start = time.time()
model = YOLO(MODEL_NAME)
model_load_time = time.time() - model_load_start
print(f"✅ 模型加载耗时: {model_load_time:.2f} 秒")

# ==============================
# 2. 获取视频信息
# ==============================
try:
    video_duration = get_video_duration(VIDEO_PATH)
except Exception as e:
    print(f"❌ 无法获取视频时长: {e}")
    exit(1)

print(f"📽️  视频总时长: {video_duration:.1f} 秒")

cap = cv2.VideoCapture(VIDEO_PATH)
if not cap.isOpened():
    raise ValueError(f"无法打开视频: {VIDEO_PATH}")

fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# ==============================
# 3. 扫描检测人像（最耗时步骤）
# ==============================
print("🔍 正在扫描视频中的人...")
detection_start = time.time()
detection_times = []
frame_idx = 0
last_print_time = 0

while True:
    ret, frame = cap.read()
    if not ret:
        break

    current_time = frame_idx / fps
    if current_time - last_print_time >= 10.0:
        print(f"   扫描进度: {current_time:.1f}s / {video_duration:.1f}s")
        last_print_time = current_time
    # ✅ 跳帧处理：只处理每隔 SKIP_FRAMES 的帧
    if frame_idx % SKIP_FRAMES != 0:
        frame_idx += 1
        continue  # 跳过当前帧
    results = model(frame, classes=CLASSES, conf=CONFIDENCE_THRESHOLD, verbose=False)
    if len(results[0].boxes) > 0:
        detection_times.append(current_time)

    frame_idx += 1

cap.release()
detection_time = time.time() - detection_start
print(f"✅ 检测完成！共 {len(detection_times)} 帧检测到人，耗时: {detection_time:.2f} 秒")

if len(detection_times) == 0:
    print("⚠️ 未检测到任何人，生成空视频")
    # 可选：创建一个空视频或提示文件
    with open(OUTPUT_VIDEO, "w") as f:
        f.write("No human detected.\n")
    print(f"✅ 已生成提示文件: {OUTPUT_VIDEO}")
    print(f"⏱️  总处理耗时: {time.time() - total_start_time:.2f} 秒")
    print("✅ 处理速度: ∞x (无内容)")
    exit(0)

# ==============================
# 4. 合并时间区间
# ==============================
interval_start = time.time()
intervals = []
for t in detection_times:
    start = max(0, t - BUFFER_SECONDS)
    end = min(video_duration, t + BUFFER_SECONDS)
    intervals.append((start, end))

merged_intervals = merge_intervals(intervals)
interval_time = time.time() - interval_start
print(f"🎬 生成 {len(merged_intervals)} 个片段（合并耗时: {interval_time:.2f} 秒）")

# ==============================
# 5. 提取片段
# ==============================
extract_start = time.time()
temp_files = []
success_count = 0

for i, (start, end) in enumerate(merged_intervals):
    temp_output = f"temp_clip_{i:03d}.mp4"
    print(f"✂️  提取片段 {i+1}/{len(merged_intervals)}: {format_time(start)} → {format_time(end)}")
    if extract_clip(VIDEO_PATH, start, end, temp_output):
        temp_files.append(temp_output)
        success_count += 1
    else:
        print(f"❌ 提取失败: {temp_output}")

extract_time = time.time() - extract_start
print(f"✅ 片段提取完成，成功 {success_count} 个，耗时: {extract_time:.2f} 秒")

if success_count == 0:
    print("❌ 所有片段提取失败")
    cleanup_temp_files(temp_files)
    exit(1)

# ==============================
# 6. 合并片段
# ==============================
concat_start = time.time()
print("🔄 正在合并所有片段...")
if concat_clips(temp_files, OUTPUT_VIDEO):
    concat_time = time.time() - concat_start
    print(f"✅ 合并完成，耗时: {concat_time:.2f} 秒")
else:
    print("❌ 合并失败")
    cleanup_temp_files(temp_files)
    exit(1)

# ==============================
# 7. 清理临时文件
# ==============================
cleanup_start = time.time()
cleanup_temp_files(temp_files)
cleanup_time = time.time() - cleanup_start
print(f"🧹 清理临时文件耗时: {cleanup_time:.2f} 秒")

# ==============================
# 📊 最终性能报告
# ==============================
total_elapsed = time.time() - total_start_time

print("\n" + "="*50)
print("📊 性能统计报告")
print("="*50)
print(f"⏱️  视频总时长      : {video_duration:.2f} 秒")
print(f"⚙️  模型加载耗时     : {model_load_time:.2f} 秒")
print(f"🔍 人像检测耗时     : {detection_time:.2f} 秒")
print(f"🧩 时间区间合并耗时 : {interval_time:.2f} 秒")
print(f"✂️  片段提取耗时     : {extract_time:.2f} 秒")
print(f"🔗 片段合并耗时     : {concat_time:.2f} 秒")
print(f"🧹 清理耗时         : {cleanup_time:.2f} 秒")
print(f"⚡ 总处理耗时       : {total_elapsed:.2f} 秒")

# ✅ 关键判断：是否快于实时？
speed_ratio = video_duration / total_elapsed
print(f"🚀 处理速度倍率     : {speed_ratio:.2f}x 实时速度")

if total_elapsed < video_duration:
    print(f"✅ 成功！处理速度 **快于实时** ({speed_ratio:.2f}x)")
else:
    print(f"❌ 未达标！处理速度 **慢于实时** ({speed_ratio:.2f}x)")
    print("💡 建议：使用更轻量模型（如 yolov8n），或降低分辨率")

print(f"✅ 最终视频已保存: {OUTPUT_VIDEO}")
print("="*50)