import argparse
import datetime
import json
import math
import os
import sys
import uuid
from typing import List, Tuple

import cv2
from ultralytics import YOLO  # 确保这与您的YOLO模型实现相匹配


class PredictInfo:
    def __init__(self, recog_name: str, recog_similarity: float, detection_time: str):
        self.recog_name = recog_name
        self.recog_similarity = recog_similarity
        self.detection_time = detection_time  # 注意：start_time 和 end_time 在 PredictInfo 中可能不需要

    def to_dict(self) -> dict:
        return {
            "recog_name": self.recog_name,
            "recog_similarity": self.recog_similarity,
            "detection_time": self.detection_time
        }


class SaveResult:
    def __init__(self):
        self.results = []

    def add_result(self, cover_image_path: str, video_path: str, start_time: datetime.datetime,
                   end_time: datetime.datetime,
                   recog_result: List[PredictInfo]):
        self.results.append({
            "cover_image_path": cover_image_path,
            "video_path": video_path,
            "start_time": start_time.strftime("%Y-%m-%d %H:%M:%S"),
            "end_time": end_time.strftime("%Y-%m-%d %H:%M:%S"),
            "recog_result": [result.to_dict() for result in recog_result],
        })

    def serialize(self) -> str:
        return json.dumps(self.results, indent=4)


def process_video_segment(start_time, config, start: int, model,
                          cap: cv2.VideoCapture,
                          fps: int, ) -> Tuple[
    bool, List[cv2.Mat], List[PredictInfo], bool, datetime.datetime, datetime.datetime, str]:
    specified_clip_duration = config.clip_duration  # 指定的时间长度
    end_time = start_time + datetime.timedelta(seconds=specified_clip_duration)  # 结束时间
    if end_time > config.end_time:
        end_time = config.end_time
    skip_frame = config.skip_frame  # 跳过帧数
    temp_frames = []  # 用于暂存视频帧
    predictions = []  # 存储预测结果
    detected = False  # 标记是否检测到对象
    total_frames_processed = 0  # 记录处理的总帧数
    frames_to_save = math.ceil(
        (end_time - start_time).total_seconds() * fps // skip_frame)  # 需要保存的帧数
    saved_detection_frames = 0  # 已保存的帧数
    detection_target = 0  # 检测目标数
    is_save = True  # 是否保存
    cover_image = None  # 检测到目标的第一帧
    save_start_time = start_time  # 保存视频的开始时间
    undetection_frames = 0  # 未检测到目标的帧数

    cap.set(cv2.CAP_PROP_POS_FRAMES, start)
    while len(temp_frames) < frames_to_save:
        success, frame = cap.read()
        if not success:
            # 到达视频末尾或读取失败，返回当前已记录的信息和开始及结束时间
            save_end_time = start_time + datetime.timedelta(seconds=(saved_detection_frames / fps))
            return detected, temp_frames, predictions, True, save_start_time, save_end_time, cover_image
        # 不管帧是否被处理，读取帧数总是增加
        total_frames_processed += 1
        if total_frames_processed % config.skip_frame == 0:
            result = model.predict(source=frame, conf=config.conf, iou=config.iou, show=False,
                                   verbose=False,
                                   vid_stride=config.skip_frame)
            detections = result[0].boxes
            if detections:
                # 事件被检测到时，设置 save_start_time（如果尚未设置）
                if not detected:
                    detected = True
                    save_start_time = start_time + datetime.timedelta(seconds=(saved_detection_frames / fps))

                detection_target += 1
                annotated_frame = result[0].plot()
                temp_frames.append(annotated_frame)  # 保存识别后的帧
                class_dict = result[0].names
                frame_predictions = [PredictInfo(class_dict[int(detection.cls)], float(detection.conf),
                                                 datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) for detection in
                                     detections]
                predictions.extend(frame_predictions)
                # 保存第一个帧到本地文件
                if len(temp_frames) == 1:
                    file_name = f'first_frame_{uuid.uuid4()}.jpg'
                    result[0].save(filename=file_name)
                    cover_image = os.path.abspath(file_name)
            else:
                undetection_frames += 1
                # 保存未进行检测的帧，以保持视频连续性
                temp_frames.append(frame)
            # 保存的帧数
            saved_detection_frames += 1
            ratio = undetection_frames / frames_to_save
            if ratio > 1 - config.save_threshold:
                save_end_time = save_start_time + datetime.timedelta(seconds=(saved_detection_frames / fps))
                is_save = False
                if cover_image is not None:
                    os.remove(cover_image)
                return is_save, temp_frames, predictions, False, save_start_time, save_end_time, cover_image
    # 结束时间的计算现在基于调整后的开始时间加上实际保存的帧数除以帧率
    save_end_time = save_start_time + datetime.timedelta(seconds=(saved_detection_frames // fps))
    return is_save, temp_frames, predictions, False, save_start_time, save_end_time, cover_image


def format_video_time(time: datetime.datetime) -> str:
    return time.strftime("%Y-%m-%d %H-%M-%S")


class VideoProcessingConfig:
    def __init__(self, video_path, model_path=r"../../model/bird/yolov8n.pt", save_threshold=0.5,
                 clip_duration=5, start_time=None, end_time=None, skip_frame=1, conf=0.5, iou=0.5):
        self.video_path = video_path
        self.model_path = model_path
        self.save_threshold = save_threshold
        self.clip_duration = clip_duration
        self.start_time = start_time
        self.end_time = end_time
        self.skip_frame = skip_frame
        self.conf = conf
        self.iou = iou

    @classmethod
    def from_args(cls, args):
        # 检查args.start_time和args.end_time是否为None，如果是，则使用硬编码的默认值
        if args.start_time is not None:
            start_time = datetime.datetime.strptime(args.start_time, "%Y-%m-%d %H:%M:%S")
        else:
            start_time = datetime.datetime.strptime("2023-04-01 12:00:00", "%Y-%m-%d %H:%M:%S")

        if args.end_time is not None:
            end_time = datetime.datetime.strptime(args.end_time, "%Y-%m-%d %H:%M:%S")
        else:
            end_time = datetime.datetime.strptime("2023-04-01 12:05:00", "%Y-%m-%d %H:%M:%S")

        return cls(
            video_path=args.video,
            model_path=args.model,
            save_threshold=args.save_threshold,
            clip_duration=args.clip_duration,
            start_time=start_time,
            end_time=end_time,
            skip_frame=args.skip_frame,
            conf=args.conf,
            iou=args.iou
        )


def main():
    # 参数解析和视频处理逻辑
    parser = argparse.ArgumentParser(description='处理视频进行对象检测。')
    parser.add_argument('--video', help='视频文件的路径。')
    parser.add_argument('--model', default=r"../../model/bird/yolov8n.pt", help='YOLO模型文件的路径。')
    parser.add_argument('--save_threshold', type=float, default=0.5, help='保存视频的检测比率阈值。')
    parser.add_argument('--clip_duration', type=int, default=5, help='要处理的视频时间长度（秒），默认为5秒。')
    parser.add_argument('--start_time', help='视频的开始时间，格式为YYYY-MM-DD HH:MM:SS。')
    parser.add_argument('--end_time', help='视频的开始时间，格式为YYYY-MM-DD HH:MM:SS。')
    parser.add_argument('--skip_frame', type=int, default=1, help='跳过帧数')
    parser.add_argument('--conf', type=float, default=0.5, help='置信度')
    parser.add_argument('--iou', type=float, default=0.5, help='iou')
    args = parser.parse_args()

    # 封装参数到一个对象中
    config = VideoProcessingConfig.from_args(args)
    config.video_path = r"D:\thispc\Videos\3b1a707b90bf69568775c7797fa4a3ad.mp4"
    # 使用封装的参数对象进行视频处理
    video_path = config.video_path
    model_path = config.model_path
    specified_clip_duration = config.clip_duration
    skip_frame = config.skip_frame
    start_time = config.start_time
    cap = cv2.VideoCapture(video_path)
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    if fps == 0:
        sys.exit()
    original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    save_result = SaveResult()
    start_frame = 0

    model = YOLO(model_path)
    while True:
        is_save, save_frames, predictions, end_reached, save_start_time, save_end_time, first_frame = process_video_segment(
            start_time, config, start_frame, model, cap, fps)
        if end_reached:
            break
        if is_save:
            # 计算结束时间：基于实际保存的帧数
            output_filename = f"{format_video_time(save_start_time)}_to_{format_video_time(save_end_time)}.mp4"
            video_writer = cv2.VideoWriter(output_filename, cv2.VideoWriter.fourcc(*'avc1'), fps // skip_frame,
                                           (original_width, original_height))
            for annotated_frame in save_frames:
                video_writer.write(annotated_frame)
            video_writer.release()
            save_video_path = os.path.join(os.getcwd(), output_filename)
            save_result.add_result(first_frame, save_video_path, save_start_time, save_end_time, predictions)
        # 更新start_frame为下一个视频片段的开始
        start_frame += specified_clip_duration * fps
        start_time = save_end_time
    cap.release()
    print(save_result.serialize())


if __name__ == '__main__':
    main()
