import os

import cv2
import io
import numpy as np
from ultralytics import YOLO
from src.ms_server_api import MsServerApi
from src.detector import Detector

ms_server_api = MsServerApi()
detector = Detector()


def image_to_bytes(img):
    """图片转字节流"""
    success, buffer = cv2.imencode(".jpg", img)
    return io.BytesIO(buffer) if success else None


def video_to_bytes(video_path):
    """视频文件转字节流"""
    with open(video_path, "rb") as f:
        return io.BytesIO(f.read())


def predict_image(img_path, predict_mark):
    results, detected = detector.predict_target(img_path, predict_mark)
    if detected:
        img_bytes = image_to_bytes(results[0].plot())
        return ms_server_api.upload_bytes(img_bytes.getvalue(), "fire_predict_file" + ".jpg")
    return None


def process_hat_video2(video_path, filename):
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)

    start_frame, end_frame = None, None
    frame_idx = 0
    first_frame_img = None
    frames_buffer = []

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        results, detected = detector.predict_helmet(frame)
        frames_buffer.append((frame_idx, frame, results))

        if detected:
            if start_frame is None:
                start_frame = frame_idx
                if results and len(results) > 0:
                    first_frame_img = results[0].plot().copy()
            end_frame = frame_idx
        elif start_frame is not None:
            break

        frame_idx += 1

    cap.release()

    if start_frame is None or end_frame is None:
        return None, None

    # 取一帧确认分辨率
    _, sample_frame, sample_results = frames_buffer[start_frame]
    annotated_frame = sample_results[0].plot()
    h, w = annotated_frame.shape[:2]
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    out = cv2.VideoWriter(filename, fourcc, fps, (w, h))

    for idx, frame, results in frames_buffer:
        if start_frame <= idx <= end_frame:
            annotated_frame = results[0].plot()
            out.write(annotated_frame)
    out.release()

    return image_to_bytes(first_frame_img), video_to_bytes(filename)


def process_helmet_video_fragment(sourcefile_tmp_path, detected_filename, predict_mark):
    """
    取第一次检测到目标的帧，并扩展前后 2s 作为预警视频
    :param sourcefile_tmp_path: 原视频文件保存的临时路径
    :param detected_filename: 预测结果视频保存的路径
    :param predict_mark: 预测对象标识，head，person，helmet
    :return: 结果存储的 oss 对象
    """

    cap = cv2.VideoCapture(sourcefile_tmp_path)
    fps = cap.get(cv2.CAP_PROP_FPS)

    if not cap.isOpened() or fps <= 0:
        return None, None

    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    buffer = int(5 * fps)  # 前后各 5 秒，取10秒钟预警视频

    start_frame, end_frame = None, None
    frame_idx = 0
    first_frame_img = None

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    video_writer = None

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        results, detected = detector.predict_target(frame, predict_mark)

        if detected and start_frame is None:
            # 第一次检测到目标
            start_frame = max(0, frame_idx - buffer)
            end_frame = min(total_frames - 1, frame_idx + buffer)
            if results and len(results) > 0:
                first_frame_img = results[0].plot().copy()

            # 初始化视频写入器
            h, w = frame.shape[:2]
            video_writer = cv2.VideoWriter(detected_filename, fourcc, fps, (w, h))

        # 写入需要的片段
        if start_frame is not None and start_frame <= frame_idx <= end_frame:
            annotated = results[0].plot() if results else frame
            video_writer.write(annotated)

        # 超过 end_frame 后就可以停了
        if end_frame is not None and frame_idx > end_frame:
            break

        frame_idx += 1

    cap.release()
    if video_writer:
        video_writer.release()

    if start_frame is None or end_frame is None:
        return None, None

    # 转换为字节流
    img_bytes = image_to_bytes(first_frame_img)
    video_bytes = video_to_bytes(detected_filename)

    # 上传
    img_oss = ms_server_api.upload_bytes(img_bytes.getvalue(), "detected_first_img.jpg")
    video_oss = ms_server_api.upload_bytes(video_bytes.getvalue(), detected_filename.split("/")[-1])

    # 删除本地detected_filename
    # os.remove(detected_filename)

    return img_oss, video_oss

import os
from collections import deque
import cv2

def process_helmet_video(sourcefile_tmp_path, detected_filename, predict_mark, pre_seconds=3):
    """
    取第一次检测到目标的帧，并扩展前后 pre_seconds 作为预警视频，同时生成整个视频的推理结果
    :param sourcefile_tmp_path: 原视频文件保存的临时路径
    :param detected_filename: 预测结果视频保存的路径（预警视频，带路径）
    :param predict_mark: 预测对象标识，head，person，helmet
    :param pre_seconds: 前后扩展秒数，默认 2 秒
    :return: (预警首帧图片OSS or None, 预警视频OSS or None, 全量推理视频OSS or None)
    """
    cap = cv2.VideoCapture(sourcefile_tmp_path)
    fps = cap.get(cv2.CAP_PROP_FPS)

    if not cap.isOpened() or fps <= 0:
        return None, None, None

    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))  # 可能为 0 或 -1
    buffer_frames = max(1, int(pre_seconds * fps))

    start_frame, end_frame = None, None
    frame_idx = 0
    first_frame_img = None

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")

    # 预警 writer（延迟初始化）
    warning_writer = None
    warning_active = False

    # 全量 writer
    base, ext = os.path.splitext(detected_filename)
    full_detected_filename = base + "_full" + (ext if ext else ".mp4")
    full_writer = None

    # 用 deque 缓存前 buffer_frames 帧的已标注帧（用于回写）
    pre_buffer = deque(maxlen=buffer_frames)

    # 如果想要更稳健，可以记录宽高在初始化 full_writer 时设置
    w = h = None

    try:
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            # 确保尺寸
            if h is None or w is None:
                h, w = frame.shape[:2]

            # 1) 推理
            results, detected = detector.predict_target(frame, predict_mark)

            # 2) 生成带标注的帧（确保 copy，以免后续被覆盖）
            if results and len(results) > 0:
                annotated = results[0].plot().copy()
            else:
                annotated = frame.copy()

            # 3) 全量视频 writer 初始化并写入
            if full_writer is None:
                full_writer = cv2.VideoWriter(full_detected_filename, fourcc, fps, (w, h))
            full_writer.write(annotated)

            # 4) 如果检测到且是第一次检测：初始化预警并把 pre_buffer 写入，再写当前帧
            if detected and start_frame is None:
                # 计算 start/end（如果 total_frames 无效则使用流式计算）
                if total_frames > 0:
                    start_frame = max(0, frame_idx - buffer_frames)
                    end_frame = min(total_frames - 1, frame_idx + buffer_frames)
                else:
                    start_frame = max(0, frame_idx - buffer_frames)
                    end_frame = frame_idx + buffer_frames  # 流式：可能会超过实际帧数，但没关系

                # 首帧图（用当前 annotated 作为首帧备选，确保不为 None）
                first_frame_img = annotated.copy()

                # 初始化预警 writer
                warning_writer = cv2.VideoWriter(detected_filename, fourcc, fps, (w, h))

                # 把 pre_buffer 中的帧按序写入（这些是检测前的最近帧）
                for buf_frame in pre_buffer:
                    warning_writer.write(buf_frame)

                # 把当前帧也写入（pre_buffer 中不包括当前帧，因为我们在最后才 append）
                warning_writer.write(annotated)

                warning_active = True

            else:
                # 5) 如果处于预警活动期且还没到 end_frame，继续写入
                if warning_active:
                    if frame_idx <= end_frame:
                        # 注意：如果检测发生在当前帧，当前帧已经写入上面，this branch 只在后续帧生效
                        # 但写一次不会重复写当前帧（因为我们在检测时已写入当前帧）
                        warning_writer.write(annotated)
                    else:
                        # 预警时段结束，关闭 writer（不要 break，继续写 full_writer）
                        warning_active = False
                        if warning_writer:
                            warning_writer.release()
                            warning_writer = None

            # 把当前 annotated 加入 pre_buffer，供未来可能的检测使用
            pre_buffer.append(annotated.copy())

            frame_idx += 1

    finally:
        cap.release()
        if warning_writer:
            warning_writer.release()
        if full_writer:
            full_writer.release()

    # 上传：全量视频无论是否检测到都上传
    full_video_oss = None
    try:
        full_video_bytes = video_to_bytes(full_detected_filename)
        full_video_oss = ms_server_api.upload_bytes(full_video_bytes.getvalue(), os.path.basename(full_detected_filename))
    except Exception as e:
        # 记录上传失败，或设为 None
        full_video_oss = None

    # 如果没有检测到目标，则不一定有预警图/预警视频
    if start_frame is None:
        return None, None, full_video_oss

    # 有检测到：上传首帧与预警视频
    img_oss = None
    video_oss = None
    try:
        img_bytes = image_to_bytes(first_frame_img)
        img_oss = ms_server_api.upload_bytes(img_bytes.getvalue(), "detected_first_img.jpg")
    except Exception:
        img_oss = None

    try:
        video_bytes = video_to_bytes(detected_filename)
        video_oss = ms_server_api.upload_bytes(video_bytes.getvalue(), os.path.basename(detected_filename))
    except Exception:
        video_oss = None
    return img_oss, video_oss, full_video_oss


def check_blur(frame1, frame2, area_threshold=900):
    """
    基于背景减除的遮挡检测
    :param frame1: 前一帧
    :param frame2: 当前帧
    :param area_threshold: 最小变化区域面积，小于此值忽略
    :return: (is_obscure, changed_areas)
    """
    # 帧差法
    diff = cv2.absdiff(frame1, frame2)
    gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(thresh, None, iterations=3)

    # 轮廓检测
    contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    changed_areas = []
    is_obscure = False
    for contour in contours:
        if cv2.contourArea(contour) < area_threshold:
            continue
        (x, y, w, h) = cv2.boundingRect(contour)
        changed_areas.append((x, y, w, h))
        is_obscure = True  # 发现大面积变化 → 可能遮挡

    return is_obscure, changed_areas


# -------- 图片检测 --------
def detect_image_blur(image_path, threshold=100.0):
    image = cv2.imread(image_path)
    is_blur, score = check_blur(image, threshold)
    if is_blur:
        img_bytes = image_to_bytes(image)
        img_oss = ms_server_api.upload_bytes(img_bytes.getvalue(), "detected_blur_img.jpg")
        print(f"模糊检测结果: {'模糊/遮挡' if is_blur else '清晰'}, 清晰度分数={score:.2f}")
        return img_oss
    return is_blur


# -------- 视频检测 --------
def detect_video_blur(video_path):
    cap = cv2.VideoCapture(video_path)
    ret, frame1 = cap.read()
    ret, frame2 = cap.read()
    frame_idx = 1
    results = []

    while cap.isOpened() and ret:
        is_obscure, areas = check_blur(frame1, frame2)
        if is_obscure:
            img_bytes = image_to_bytes(frame2)
            cap.release()
            return ms_server_api.upload_bytes(img_bytes.getvalue(), "detected_blur_img.jpg")
        # results.append({
        #     "frame": frame_idx,
        #     "is_obscure": is_obscure,
        #     "areas": areas
        # })

        frame1 = frame2
        ret, frame2 = cap.read()
        frame_idx += 1

    cap.release()
    return results
