'''

保存视频切片
    根据动作发生时间截取视频切片保存
'''
from collections import deque
from datetime import timedelta
import cv2
import os

import numpy as np


def get_save_video_clip(**kwargs):
    """
    创建一个视频剪辑保存处理器。

    参数:
        save_dir (str): 保存视频剪辑的目录路径
        roi_data (dict): 区域兴趣(ROI)数据，用于定位登机桥等区域
        skip_frames (int): 视频处理时跳过的帧数
        window_length (int, optional): 时间窗口长度，默认值为120
        video_clip_length (int, optional): 视频剪辑长度（秒），默认值为6秒
        post_processor (object): 后处理对象，用于判断飞机位置，具有area_id属性(left/right/outside)

    返回:
        function: 返回save_video_clip函数，用于保存视频剪辑
    """
    _save_dir = kwargs.get('save_dir')
    _roi_data = kwargs.get('roi_data')
    _skip_frames = kwargs.get('skip_frames')
    _window_length = kwargs.get('window_length', 120)
    _video_clip_length = kwargs.get('video_clip_length', 12)  # s

    _post_processor = kwargs.get('post_processor')  # 用于判断飞机位置  _post_processor.area_id  left/right/outside

    _frame_queue = deque(maxlen=_window_length)  # [current_timestamp, frame,det_results]

    _action_time_dict = {}  # action:[start,end]

    _colors = []
    """
    生成一组视觉上可区分的颜色，用于目标检测结果的可视化。
    颜色通过随机生成，并确保颜色之间的差异度大于阈值（5*3=15），
    以保证在视频中可以清晰地区分不同的检测对象。
    """
    for i in range(100):
        dif = 0
        while dif < 5 * 3:
            new_color = [np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)]
            # 找最小dif
            dif = 255 * 3
            for sc in _colors:
                t = abs(sc[0] - new_color[0]) + abs(sc[1] - new_color[1]) + abs(sc[2] - new_color[2])
                dif = min(dif, t)
        _colors.append(new_color)
    if not os.path.exists(_save_dir):
        os.makedirs(_save_dir)

    def _timestampstr2timestamp(timestamp_str):
        """
        将时间戳字符串转换为总秒数。

        参数:
            timestamp_str (str): 时间戳字符串，格式为'小时:分钟:秒.毫秒'（例如'0:00:07.567000'）

        返回:
            float: 表示时间戳的总秒数
        """

        h, m, s = map(float, timestamp_str.split(':'))
        total_seconds = h * 3600 + m * 60 + s
        return total_seconds

    def _timestamp2timestampstr(timestamp):
        """
        将总秒数转换为时间戳字符串。

        参数:
            timestamp (float): 表示时间戳的总秒数

        返回:
            str: 时间戳字符串，格式为'小时:分钟:秒.毫秒'
        """

        timestamp_str = str(timedelta(seconds=timestamp))

        return timestamp_str

    def _frames2video(save_frames, save_timestamps, video_save_path):
        """
        将一系列帧转换为视频文件。

        参数:
            save_frames (list): 要保存的视频帧列表
            save_timestamps (list): 对应于每帧的时间戳列表
            video_save_path (str): 输出视频文件的保存路径
        """

        fps = 25 / _skip_frames  # min 1
        # w, h = 2160, 3840
        w, h = 3840, 2160
        mp4 = cv2.VideoWriter_fourcc(*'mp4v')  # MJPG  XVID
        res = cv2.VideoWriter(video_save_path, mp4, fps, (w, h), True)  # WH

        print(f'fps: {fps}, total: {len(save_frames)}, w: {w}, h: {h}')
        for save_timestamp, save_frame in zip(save_timestamps, save_frames):
            # print(f'{save_timestamp}')
            save_timestamp_str = _timestamp2timestampstr(save_timestamp)
            cv2.putText(save_frame, save_timestamp_str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            res.write(save_frame)
        res.release()
        print(f'video clip saved to {video_save_path}')

    def _get_roi_by_action_name(action_name):
        """
        根据动作名称获取对应的感兴趣区域(ROI)。

        参数:
            action_name (str): 动作名称

        返回:
            list or None: ROI坐标[x1, y1, x2, y2]或None（表示无可用ROI）
        """

        if _post_processor.area_id == 'outside':  # left right
            return None
        if action_name in ["first_jetway_docking", "first_jetway_retracted"]:
            # xyxy = _roi_data['jetway_roi_dict'][_post_processor.area_id]['bridge_wheel1_roi']
            coordinates = _roi_data[f'{_post_processor.area_id}_area']['first_bridge_wheel_zone']['coordinates']
            x1 = min([coordinate[0] for coordinate in coordinates])
            y1 = min([coordinate[1] for coordinate in coordinates])
            x2 = max([coordinate[0] for coordinate in coordinates])
            y2 = max([coordinate[1] for coordinate in coordinates])
            return [x1, y1, x2, y2]
        elif action_name in ["second_jetway_docking", "second_jetway_retracted"]:
            # xyxy = _roi_data['jetway_roi_dict'][_post_processor.area_id]['bridge_wheel2_roi']
            coordinates = _roi_data[f'{_post_processor.area_id}_area']['second_bridge_wheel_zone']['coordinates']
            x1 = min([coordinate[0] for coordinate in coordinates])
            y1 = min([coordinate[1] for coordinate in coordinates])
            x2 = max([coordinate[0] for coordinate in coordinates])
            y2 = max([coordinate[1] for coordinate in coordinates])
            return [x1, y1, x2, y2]

        elif action_name == 'guide_in_position':
            return None
        elif action_name == 'wheel_chocks_placed':
            return None
        elif action_name == 'thumbs_up_signal':
            return None
        elif action_name == 'ground_power_connected':
            return None
        elif action_name == 'air_conditioning_unit_connected':
            return None
        elif action_name == 'forward_cargo_door_open':
            return None
        elif action_name == 'aft_cargo_door_open':
            return None
        elif action_name == 'belt_loader_in_position':
            return None
        elif action_name == 'baggage_handling_ready':
            return None
        elif action_name == 'container_unloading':
            return None

        elif action_name == 'first_baggage_unloaded':
            return None
        elif action_name == 'catering_truck_in_position':
            return None
        elif action_name == 'fuel_truck_in_position':
            return None
        elif action_name == 'forward_cargo_door_closed':
            return None
        elif action_name == 'aft_cargo_door_closed':
            return None
        elif action_name == 'tug_in_position':
            return None
        elif action_name == 'pushback_initiated':
            return None

        return None

    def _draw_det_results(show_frame, results):
        """
        在视频帧上绘制目标检测结果。

        参数:
            show_frame (np.array): 要绘制的视频帧
            results (list): 检测结果列表，包含边界框和类别信息

        返回:
            np.array: 绘制了检测结果的视频帧
        """

        boxes = results[0].boxes.cpu().numpy()
        names = results[0].names

        out = []
        for i in range(boxes.shape[0]):
            xyxy = boxes.xyxy[i, :].tolist()
            conf = boxes.conf[i]
            cls_ind = int(boxes.cls[i])
            cls_name = names[cls_ind]
            out.append([xyxy, cls_ind, cls_name, conf])

        for ind, (xyxy, cls_ind, cls_name, conf) in enumerate(out):
            track_color = _colors[cls_ind % len(_colors)]
            px1, py1, px2, py2 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
            cv2.putText(show_frame, f'{cls_ind}_{str(np.round(conf, 2))}_{cls_name}', (px1, py1),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=2,
                        color=track_color,
                        thickness=2)
            cv2.rectangle(show_frame, (px1, py1), (px2, py2), track_color, 2)
        return show_frame

    def save_video_clip(**kwargs):
        """
        保存视频剪辑。

        参数:
            current_timestamp (float): 当前时间戳
            frame (np.array): 当前视频帧
            det_results (list): 目标检测结果
            processed_results (list): 处理后的结果，包含动作和时间信息
        """

        current_timestamp = kwargs.get('current_timestamp')
        frame = kwargs.get('frame')
        det_results = kwargs.get('det_results')
        processed_results = kwargs.get('processed_results')

        # timestamp = relative_time2timestamp(relative_time)

        _frame_queue.append([current_timestamp, frame, det_results])

        # 缓存至_action_time_dict
        for processed_result in processed_results:
            timestamp_str = processed_result.get('relative_time')
            timestamp = _timestampstr2timestamp(timestamp_str)  # id
            action_name = processed_result.get('actions')
            _action_time_dict[action_name] = timestamp  # 前后共6s

        # 保存
        remove_keys = []
        for action_name, action_timestamp in _action_time_dict.items():
            start_timestamp = max(0, action_timestamp - _video_clip_length // 2)
            end_timestamp = action_timestamp + _video_clip_length // 2  # 前后共6s
            if current_timestamp > end_timestamp:
                timestampstr = _timestamp2timestampstr(end_timestamp - 3)
                save_path = f"{_save_dir}/{timestampstr.replace(':', '_')}_{int(_timestampstr2timestamp(timestampstr))}_{action_name}.mp4"

                save_frames = []
                save_timestamps = []
                for timestamp, frame, dets in _frame_queue:
                    if start_timestamp <= timestamp <= end_timestamp:
                        # 画图
                        show_frame = frame.copy()
                        ## 获取action_name的ROI
                        roi_xyxy = _get_roi_by_action_name(action_name)
                        if roi_xyxy:
                            color = (0, 0, 255) if abs(timestamp - action_timestamp) < 1 else (0, 255, 0)
                            show_frame = cv2.rectangle(show_frame, (roi_xyxy[0], roi_xyxy[1]),
                                                       (roi_xyxy[2], roi_xyxy[3]), color, 4)
                        # 画det 结果
                        show_frame = _draw_det_results(show_frame, dets)
                        save_frames.append(show_frame)
                        save_timestamps.append(timestamp)
                _frames2video(save_frames, save_timestamps, save_path)
                remove_keys.append(action_name)
        for remove_key in remove_keys:  # 删除已处理的动作
            _action_time_dict.pop(remove_key)

        pass

    return save_video_clip


def test_save_video_clip():
    pass


if __name__ == '__main__':
    pass
