"""RTMP协议的服务器推流与拉流"""
import time

import cv2
import numpy as np
import subprocess
from ultralytics import YOLO

# ✅ 1. RTMP 流地址配置
# RTMP_INPUT = "rtmp://ns8.indexforce.com/home/mystream"  # 原始视频流
# RTMP_INPUT = "rtmp://video.godouav.com/live/1816437285179953152181735212?appId=zkytApp001"  # 原始视频流
RTMP_INPUT = "rtmp://10.1.38.245:1935/live/raw_stream3"  # 原始视频流
RTMP_OUTPUT = "rtmp://localhost/live/stream"  # AI 识别结果流

# ✅ 2. 加载 YOLO 模型
# model = YOLO("D:/code_work/git/police_ai/weight/yolo_accident.pt").to("cuda")
model = YOLO("D:/code_work/git/police_ai/weight/yolo11l.pt").to("cuda")
# model = YOLO("D:/code_work/git/police_ai/weight/yolo_face.pt").to("cuda")
# model = YOLO("D:/code_work/git/police_ai/weight/yolo_abnormal.pt").to("cuda")

def process_stream():
    # ✅ 3. 读取 RTMP 视频流
    cap = cv2.VideoCapture(RTMP_INPUT)
    if not cap.isOpened():
        print("❌ 无法打开 RTMP 流")
        return False

    # ✅ 4. 获取视频属性
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS)) or 25  # 设定默认 25 FPS

    # ✅ 5. 设置 FFmpeg 推流
    ffmpeg_cmd = [
        "ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo",
        "-pix_fmt", "bgr24", "-s", f"{width}x{height}", "-r", str(fps),
        "-i", "-", "-c:v", "libx264", "-preset", "ultrafast", "-f", "flv",
        RTMP_OUTPUT
    ]

    # ✅ 6. 启动 FFmpeg 推流进程
    process = subprocess.Popen(ffmpeg_cmd, stdin=subprocess.PIPE)

    try:
        # ✅ 7. 处理视频流
        while cap.isOpened():
            a = time.time()
            ret, frame = cap.read()
            if not ret:
                print("❌ 视频读取失败")
                break

            # ✅ 目标检测
            results = model.predict(frame, imgsz=(width, height), classes=[7])
            # 卡车
            # results = model.predict(frame, imgsz=(width, height), tracker="bytetrack.yaml", conf=0.5,
            #                         show_conf=False, line_width=2, iou=0.4, classes=[7])

            # ✅ 绘制识别框
            annotated_frame = results[0].plot()

            # ✅ 发送处理后的帧到 FFmpeg（推流）
            process.stdin.write(annotated_frame.tobytes())
            print(time.time() - a)
    except Exception as e:
        print(f"❌ 处理过程中发生错误: {str(e)}")
        return False
    finally:
        # ✅ 8. 释放资源
        cap.release()
        process.stdin.close()
        process.wait()
    return True

# 主循环
while True:
    print("正在尝试连接视频流...")
    if process_stream():
        print("视频流处理完成，准备重新连接...")
    else:
        print("视频流处理失败，5秒后重试...")
    time.sleep(5)  # 等待5秒后重试
