from flask import Flask, Response
import cv2
import time
import os
from datetime import datetime

import sys

# 获取命令行参数作为RTSP URL
rtsp_url = sys.argv[1] if len(sys.argv) > 1 else "rtsp://192.168.31.24:554/1"

app = Flask(__name__)

# 运行RTSP视频流处理并生成MJPEG流
def generate_mjpeg(rtsp_url, frame_interval=1, threshold=5, snapshot_interval=10):
    # 创建视频捕获对象
    cap = cv2.VideoCapture(rtsp_url)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # 减少缓冲区大小

    if not cap.isOpened():
        print("Cannot open RTSP stream")
        return

    # 初始化变量
    frame_count = 0
    prev_frame = None
    last_snapshot_time = 0  # 上次截图时间

    try:
        while cap.isOpened():
            start_time = time.time()

            ret, frame = cap.read()
            if not ret:
                break

            # 调整帧大小以进一步减少处理时间
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)  # 缩小到原始大小的1/4

            # 每隔frame_interval帧进行比较
            if frame_count % frame_interval == 0:
                if prev_frame is not None:
                    # 计算当前帧和上一参考帧的差异
                    diff = cv2.absdiff(small_frame, prev_frame)

                    # 转换为灰度并二值化，突出变化区域
                    gray_diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
                    _, thresh = cv2.threshold(gray_diff, 25, 255, cv2.THRESH_BINARY)

                    # 计算非零像素的数量
                    non_zero = cv2.countNonZero(thresh)

                    if non_zero > threshold:
                        # 在画面上显示运动信息
                        cv2.putText(small_frame, "Motion Detected!", (50, 50),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                        current_time = time.time()
                        # 检查是否超过截图间隔时间
                        if current_time - last_snapshot_time > snapshot_interval:

                            # 保存原始帧（未缩放的）
                            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
                            filename = f"D:/work_station/fullstack/axcbd/front_end/demo/public/motion_frames/{timestamp}.jpg"
                            os.makedirs("D:/work_station/fullstack/axcbd/front_end/demo/public/motion_frames", exist_ok=True)  # 动态创建目录
                            cv2.imwrite(filename, frame)  # 保存原始帧
                            # 获取绝对路径
                            absolute_path = os.path.abspath(filename)
                            print(f"Motion Detected! Frame saved to {absolute_path}", flush=True)  # 输出绝对路径
                            last_snapshot_time = current_time  # 更新上次截图时间

                # 更新参考帧
                prev_frame = small_frame

            # 调整JPEG编码质量以进一步降低延迟
            ret, jpeg = cv2.imencode('.jpg', small_frame, [cv2.IMWRITE_JPEG_QUALITY, 90])  # 质量降低到50
            if ret:
                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')

            frame_count += 1
            elapsed_time = time.time() - start_time
            # 确保每帧处理时间不超过一定阈值，避免累积延迟
            if elapsed_time < 0.03:  # 30ms per frame
                time.sleep(0.03 - elapsed_time)

    finally:
        cap.release()

# 提供MJPEG流的路由
@app.route('/video_feed')
def video_feed():
    #rtsp_url = "rtsp://192.168.31.70:554/1"
    #rtsp_url = "rtsp://192.168.31.24:554/1"  # RTSP流地址
    #rtsp_url = "rtsp://192.168.0.6:554/1"
    return Response(generate_mjpeg(rtsp_url, frame_interval=1, threshold=4000, snapshot_interval=10),
                    mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)

