#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project : aimix
@File    : single_video_process_method.py
@IDE     : PyCharm
@Author  : admin
@Date    : 2025/4/21 13:41
"""
import re
import cv2
import tempfile
import concurrent.futures
from queue import Queue
import pysrt
import numpy as np
from moviepy.editor import *
from typing import List
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.tools.subtitles import SubtitlesClip
from moviepy.video.fx import all as vfx
from PIL import Image, ImageDraw, ImageFont
from pydub import AudioSegment

from utils.tools import retry
from log import log

moviepy_temp_dir = os.path.join(tempfile.gettempdir(), "moviepy_temp")
os.makedirs(moviepy_temp_dir, exist_ok=True)
import moviepy.config as mpconf
mpconf.change_settings({
    "IMAGEMAGICK_BINARY": r"C:\Users\admin\workspace\aimix\libs\ImageMagick-7.1.1-Q16-HDRI\magick.exe",
    "TEMP_DIR": moviepy_temp_dir,
})



@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_srt_to_video(video_path: str, srt_path: str, output_path: str, bottom_margin: int = 50, font: str = "Arial", font_size: int = 36, font_color: str = "white", horizontal_margin: int = 20):
    """
    设置 SRT 字幕并导出。

    参数：
        video_path (str): 输入视频路径
        srt_path (str): SRT 字幕文件路径
        output_path (str): 输出视频路径
        bottom_margin (int): 字幕离底部距离（像素）
        font (str): 字体名称（如 'Arial'，也可指定路径）
        font_size (int): 字体大小
        font_color (str): 字体颜色（如 'white', '#FF0000'）
        horizontal_margin (int): 字幕左右边距阈值
    """
    # 加载视频
    video = VideoFileClip(video_path)

    # 读取 SRT 文件内容
    def parse_srt(srt_path):
        """
        解析 SRT 文件，返回字幕列表。
        """
        with open(srt_path, 'r', encoding='utf-8') as file:
            content = file.read()

        # 使用正则表达式解析 SRT 文件
        pattern = re.compile(r'(\d+)\n(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})\n(.*?)(?=\n\d+|\Z)', re.DOTALL)
        subtitles = pattern.findall(content)

        # 转换为字典格式
        subtitle_list = []
        for sub in subtitles:
            start_time = sub[1].replace(',', '.')
            end_time = sub[2].replace(',', '.')
            text = sub[3].strip()
            subtitle_list.append((start_time, end_time, text))

        # 调试：打印解析结果
        if not subtitle_list:
            print("SRT 文件解析结果为空，请检查文件内容和格式！")
        else:
            print(f"解析到 {len(subtitle_list)} 条字幕：")
            for sub in subtitle_list:
                print(sub)

        return subtitle_list

    # 将 SRT 时间戳转换为秒
    def srt_to_seconds(time_str):
        """
        将 SRT 时间戳转换为秒。
        """
        h, m, s = time_str.split(':')
        s, ms = s.split('.')
        return int(h) * 3600 + int(m) * 60 + int(s) + int(ms) / 1000

    # 解析 SRT 文件
    subtitle_list = parse_srt(srt_path)

    # 检查 subtitles 是否为空
    if not subtitle_list:
        print("解析后的字幕列表为空，请检查 SRT 文件内容和格式！")
        return

    # 创建字幕剪辑
    def generator(txt):
        """
        生成字幕文本剪辑。
        """
        return TextClip(txt, font=font, fontsize=font_size, color=font_color, align='Center')

    # 转换格式为 ((start_time, end_time), text)
    subtitles = [((srt_to_seconds(start), srt_to_seconds(end)), txt) for start, end, txt in subtitle_list]
    subtitle_clips = SubtitlesClip(subtitles, generator)

    # 设置字幕位置
    subtitle_clips = subtitle_clips.set_position(('center', video.size[1] - bottom_margin - font_size))

    # 合成视频和字幕
    video_with_subtitles = CompositeVideoClip([video, subtitle_clips])

    # 导出视频
    video_with_subtitles.write_videofile(output_path, remove_temp=True, codec="libx264", audio_codec="aac")

    # 释放资源
    video.close()
    video_with_subtitles.close()


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_subtitle_to_video(video_path: str, output_path: str, subtitle_text: str, bottom_margin: int = 50, font_path: str = r"C:\Windows\Fonts\simsun.ttc", font_size: int = 36, font_color: str = "red", subtitle_duration: float = 1.0):
    """
    设置视频字幕
    参数：
        video_path (str): 输入视频路径
        output_path (str): 输出视频保存路径
        subtitle_text (str): 要显示的字幕内容
        bottom_margin (int): 字幕离底部的距离（像素）
        font (str): 字体名称，如 'Arial'
        font_size (int): 字体大小
        font_color (str): 字体颜色，如 'white'、'#FF0000'
        subtitle_duration (float): 字幕在视频中显示的时长（秒）
    """
    video = VideoFileClip(video_path)
    video_width, video_height = video.size
    subtitle_img_height = font_size + 20
    img = Image.new("RGBA", (video_width, subtitle_img_height), (0, 0, 0, 0))
    draw = ImageDraw.Draw(img)
    font = ImageFont.truetype(font_path, font_size)

    bbox = draw.textbbox((0, 0), subtitle_text, font=font)
    text_width = bbox[2] - bbox[0]
    text_height = bbox[3] - bbox[1]

    # 计算字幕的位置
    x = (video_width - text_width) // 2
    y = (subtitle_img_height - text_height) // 2

    draw.text((x, y), subtitle_text, font=font, fill=font_color)

    subtitle_np = np.array(img)
    subtitle_clip = ImageClip(subtitle_np).set_duration(subtitle_duration if subtitle_duration > 0 else video.duration)
    subtitle_clip = subtitle_clip.set_position(("center", video_height - bottom_margin - subtitle_img_height))
    result = CompositeVideoClip([video, subtitle_clip])
    result.write_videofile(output_path, codec="libx264", remove_temp=True, audio_codec="aac", threads=4, logger=None)
    log.debug(f"视频添加标题完成, 视频保存到: {output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_background_music_to_video(video_path: str, audio_path: str, output_path: str, volume: float = 1.0, keep_original_audio: bool = True, video_priority: bool = True):
    """
    给视频添加背景音乐，并支持音量调整和是否保留原声。

    参数：
        video_path (str): 输入视频路径
        audio_path (str): 输入背景音乐音频路径
        output_path (str): 输出视频路径
        volume (float): 背景音乐音量，1.0 为原音量，> 1 为增大音量，< 1 为减小音量
        keep_original_audio (bool): 是否保留视频的原声
        video_priority (bool): 如果音频比视频长，是否循环视频直到音频结束
    """
    # 加载视频和音频
    video = VideoFileClip(video_path)
    audio = AudioFileClip(audio_path)

    # 获取视频和音频的时长
    video_duration = video.duration
    audio_duration = audio.duration
    video.close()
    audio.close()

    # 如果音频比视频长，则循环播放视频直到音频结束
    if video_priority:
        audio_path = process_audio(audio_path, video_duration, tempfile.mktemp(suffix=".mp3", dir=os.path.dirname(output_path)))
    else:
        video_path = process_video(video_path, audio_duration, tempfile.mktemp(suffix=".mp4", dir=os.path.dirname(output_path)))

    # 加载视频和音频
    video = VideoFileClip(video_path)
    audio = AudioFileClip(audio_path)
    # 调整音频音量
    audio = audio.volumex(volume)  # 设置音量
    # 如果需要保留原声，混合背景音乐和原声
    if keep_original_audio:
        original_audio = video.audio
        final_audio = CompositeAudioClip([original_audio, audio])  # 混合原声和背景音乐
    else:
        final_audio = audio  # 替换为背景音乐
    # 设置视频的音频为处理后的音频
    video_with_audio = video.set_audio(final_audio)
    # 导出新的视频，确保音频和视频时长匹配
    video_with_audio.write_videofile(output_path, codec="libx264", remove_temp=True, audio_codec="aac", audio_bitrate="192k", threads=4, logger=None)
    log.debug(f"添加背景音乐完成, 视频保存到: {output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_image_overlay_to_video(video_path: str, image_path: str, output_path: str, overlay_duration: float):
    """
    给视频添加图片贴片，贴片时长和分辨率与视频一致。

    参数：
        video_path (str): 输入视频路径
        image_path (str): 贴片素材图片路径
        output_path (str): 输出视频路径
        overlay_duration (float): 贴片显示的时长（单位：秒）
    """
    # 加载视频
    video = VideoFileClip(video_path)
    video_width, video_height = video.size

    # 加载图片素材，并调整其尺寸与视频一致
    image = ImageClip(image_path)
    image = image.resize((video_width, video_height))  # 调整图片大小以匹配视频分辨率
    # 设置图片的出现时间
    image = image.set_duration(overlay_duration).set_start(0)  # 图片显示从视频开始
    # 设置图片透明度（如果需要）或者其他效果
    image = image.set_opacity(1)  # 默认不透明，可以调整透明度
    # 使用 CompositeVideoClip 合成视频和贴片
    final_video = CompositeVideoClip([video, image])  # 叠加贴片素材到视频上
    # 导出新的视频
    final_video.write_videofile(output_path, codec="libx264", remove_temp=True, audio_codec="aac", threads=4, logger=None)
    log.debug(f"贴片完成, 视频保存到: {output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_video_cover(video_path: str, cover_path: str, output_path: str):
    """
        给视频设置封面，并保存到指定路径。
    :param video_path: 输入视频文件路径
    :param cover_path: 封面图片文件路径
    :param output_path: 输出视频文件路径
    """
    video = VideoFileClip(video_path)
    cover = ImageClip(cover_path).set_duration(3)  # 封面显示时长为 3 秒
    cover = cover.resize(video.size)  # 调整封面尺寸以匹配视频
    final_clip = concatenate_videoclips([cover, video], method="compose")
    final_clip.write_videofile(output_path, codec="libx264", remove_temp=True, audio_codec="aac", threads=4, logger=None)
    log.debug(f"设置封面完成，视频已保存到 {output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def set_text_watermark_to_video(video_path: str, output_path: str, watermark_text: str, x: int, y: int, font_path: str = r"C:\Windows\Fonts\simsun.ttc", font_size: int = 36, font_color: str = "white"):
    """
    给视频添加文字水印。

    参数：
        video_path (str): 输入视频路径
        output_path (str): 输出视频路径
        watermark_text (str): 水印的文字内容
        x (int): 水印的 x 坐标
        y (int): 水印的 y 坐标
        font (str): 字体名称（如 'Arial'）
        font_size (int): 字号
        font_color (str): 字体颜色（如 'white'，也可以使用 '#FF0000'）
    """
    # 加载视频
    video = VideoFileClip(video_path)
    video_width, video_height = video.size

    # 创建透明背景的图像
    font = ImageFont.truetype(font_path, font_size)
    dummy_img = Image.new("RGBA", (1, 1), (0, 0, 0, 0))
    dummy_draw = ImageDraw.Draw(dummy_img)
    bbox = dummy_draw.textbbox((0, 0), watermark_text, font=font)
    text_width = bbox[2] - bbox[0]
    text_height = bbox[3] - bbox[1]

    img = Image.new("RGBA", (text_width, text_height), (0, 0, 0, 0))
    draw = ImageDraw.Draw(img)
    draw.text((0, 0), watermark_text, font=font, fill=font_color)

    # 转 numpy
    np_img = np.array(img)
    watermark_clip = ImageClip(np_img).set_duration(video.duration)
    watermark_clip = watermark_clip.set_position((x, y))

    # 合成视频
    final = CompositeVideoClip([video, watermark_clip])
    final.write_videofile(output_path, codec="libx264", audio_codec="aac", remove_temp=True, threads=4, logger=None)
    log.debug(f"添加水印完成，视频保存到：{output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def clip_video_by_moviepy(input_file, output_file, start_time, end_time):
    def time_to_seconds(time_str):
        h, m, s = map(int, time_str.split(":"))
        return h * 3600 + m * 60 + s

    output_file = os.path.normpath(output_file)
    if isinstance(start_time, str) and re.search("\d+:\d+:", start_time):
        start_time = time_to_seconds(start_time)
    if isinstance(end_time, str) and re.search("\d+:\d+:", end_time):
        end_time = time_to_seconds(end_time)
    if type(start_time) not in [int, float] or type(end_time) not in [int, float]:
        log.debug(start_time, end_time, type(start_time), type(end_time))
        raise ValueError("start_time and end_time must be int or str in format 'HH:MM:SS'")

    video_item = VideoFileClip(input_file)
    try:
        video = video_item.subclip(start_time, min(end_time, video_item.duration))
        video.write_videofile(output_file, codec="libx264", audio_codec="aac", remove_temp=True, threads=4, logger=None)
    finally:
        video_item.close()
        log.debug(f"剪切后的视频已保存到：{output_file}")
        return output_file


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def speed_up_video_with_audio(input_video_path: str, output_video_path: str, factor: float, keep_original_audio: bool = True):
    """
    给视频和音频加速，音频保持原音色，可选是否保留原声音。

    参数：
        input_video_path (str): 输入视频文件路径
        output_video_path (str): 输出视频文件路径
        factor (float): 加速因子，factor > 1 表示加速
        keep_original_audio (bool): 是否保留并加速原始音频，默认为 True
    """
    # 加载视频文件
    video = VideoFileClip(input_video_path)

    # 加速视频
    sped_up_video = video.fx(vfx.speedx, factor)

    if keep_original_audio and video.audio is not None:
        # 提取原始音频
        audio = video.audio
        audio_path = tempfile.mktemp(suffix='.wav', dir=os.path.dirname(output_video_path))
        audio.write_audiofile(audio_path, codec='pcm_s16le', logger=None)

        # 使用 pydub 加速音频并保持原音色
        audio_segment = AudioSegment.from_wav(audio_path)
        sped_up_audio = audio_segment.speedup(playback_speed=factor, chunk_size=150, crossfade=25)

        # 保存加速后的音频
        sped_up_audio_path = tempfile.mktemp(suffix='.wav', dir=os.path.dirname(output_video_path))
        sped_up_audio.export(sped_up_audio_path, format='wav')

        # 加载加速后的音频到 moviepy
        sped_up_audio_clip = AudioFileClip(sped_up_audio_path)
        # 设置加速后的音频
        sped_up_video = sped_up_video.set_audio(sped_up_audio_clip)
        # 清理临时文件
        # os.remove(audio_path)
        # os.remove(sped_up_audio_path)
    else:
        # 不保留音频
        sped_up_video = sped_up_video.without_audio()
    # 保存结果
    sped_up_video.write_videofile(output_video_path, codec="libx264", remove_temp=True, audio_codec="aac", threads=4, logger=None)
    log.debug(f"调整速度之后的视频已保存到：{output_video_path}")
    return output_video_path

@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def load_video(path: str):
    clip = VideoFileClip(path)
    clip.close()
    return clip.__dict__


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def resize_video(input_path: str, output_path: str, resolution: tuple):
    """调整视频分辨率"""
    clip = VideoFileClip(input_path)
    resized = clip.resize(resolution)
    resized.write_videofile(output_path, codec='libx264', audio_codec='aac', remove_temp=True, threads=4, logger=None)
    clip.close()
    resized.close()
    log.debug(f"调整分辨率之后的视频已保存到：{output_path}")


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def merge_videos(video_paths: List[str], output_path: str, keep_audio: bool = True, with_transition: bool = False, transition_duration: float = 0.4):
    """
        合并多个视频文件为一个视频文件
    :param video_paths: 视频文件路径列表
    :param output_path: 输出视频文件路径
    :param keep_audio: 是否保留音频
    :param with_transition: 是否添加淡入淡出效果
    :param transition_duration: 淡入淡出持续时间
    """
    clips = []
    log.debug(f"开始合并视频: {video_paths}")
    for path in video_paths:
        if not os.path.exists(path):
            log.debug(f"文件不存在: {path}")
            continue
        clip = VideoFileClip(path)
        if not keep_audio:
            clip = clip.without_audio()

        if with_transition:
            # 添加淡入淡出效果
            clip = clip.fx(vfx.fadein, transition_duration).fx(vfx.fadeout, transition_duration)
        clips.append(clip)

    if not clips:
        log.debug("没有可用的视频进行拼接")
        return

    final_clip = concatenate_videoclips(clips, method="compose",
                                        padding=-transition_duration if with_transition else 0)
    final_clip.write_videofile(output_path, codec='libx264', audio_codec='aac', remove_temp=True, threads=4, logger=None)

    for clip in clips:
        clip.close()
    final_clip.close()
    log.debug(f"合并后的视频已保存到: {output_path}")
    return output_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def process_audio(input_audio_path: str, target_duration: int, output_audio_path: str):
    """
    处理音频，传入时间，若音频大于该时间，则截取；若音频小于该时间，则循环播放音频至目标时长。

    参数：
        input_audio_path (str): 输入音频文件路径
        target_duration (int): 目标音频时长（单位为秒）
        output_audio_path (str): 输出音频文件路径
    """
    # 加载音频
    audio = AudioSegment.from_file(input_audio_path)

    # 获取音频时长（单位为秒）
    audio_duration = len(audio) / 1000  # 毫秒转秒

    # 如果音频时间大于目标时长，则截取音频
    if audio_duration > target_duration:
        audio = audio[:target_duration * 1000]  # 截取到目标时长（毫秒）

    # 如果音频时间小于目标时长，则循环播放音频
    elif audio_duration < target_duration:
        # 计算需要重复的次数，确保 repeat_count 是整数
        repeat_count = int(target_duration // audio_duration)
        remainder_duration = target_duration - (repeat_count * audio_duration)
        # 拼接循环音频
        audio = audio * repeat_count + audio[:remainder_duration * 1000]

    # 导出处理后的音频
    audio.export(output_audio_path, format="mp3")
    log.debug(f"处理后的音频已保存至 {output_audio_path}")
    return output_audio_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def process_video(input_video_path: str, target_duration: int, output_video_path: str):
    """
    处理视频，传入时间参数 a：
    - 如果 a 小于视频的时长，则截取视频。
    - 如果视频时长小于 a，则循环播放视频直到总时长达到 a 秒。

    参数：
        input_video_path (str): 输入视频文件路径
        a (int): 目标时长（单位为秒）
        output_video_path (str): 输出视频文件路径
    """
    try:
        # 打开输入视频
        video = VideoFileClip(input_video_path)
        video_duration = video.duration

        # 如果视频时间大于 a，则截取视频
        if video_duration >= target_duration:
            final_clip = video.subclip(0, target_duration)
        # 如果视频时间小于 a，则循环拼接视频直到总时长达到 a 秒
        else:
            # 计算需要拼接的次数
            num_repeats = int(target_duration / video_duration) + 1
            # 创建一个列表，包含重复的视频片段
            repeated_clips = [video] * num_repeats
            # 拼接视频片段
            concatenated_video = concatenate_videoclips(repeated_clips)
            # 截取到目标时长
            final_clip = concatenated_video.subclip(0, target_duration)
            # 关闭拼接后的视频对象
            concatenated_video.close()

        # 导出处理后的视频
        final_clip.write_videofile(output_video_path, codec="libx264", audio_codec="aac", remove_temp=True, threads=4, logger=None)
        # 关闭最终的视频对象
        final_clip.close()
        log.debug(f"处理后的视频已保存至 {output_video_path}")
        return output_video_path

    finally:
        # 确保原始视频对象被关闭
        if 'video' in locals() and video is not None:
            video.close()


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def flip_video_vertically(input_path: str, output_path: str):
    """
    将视频上下翻转。
    参数：
        input_path (str): 输入视频路径
        output_path (str): 输出视频路径
    """
    video = VideoFileClip(input_path)
    flipped_video = video.fl(lambda gf, t: gf(t)[::-1, :, :])  # 上下翻转
    flipped_video.write_videofile(output_path, codec="libx264", audio_codec="aac", remove_temp=True, threads=4, logger=None)
    log.debug(f"视频已上下翻转，处理后的视频已保存至 {output_path}")
    video.close()
    flipped_video.close()


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def flip_video_horizontally(input_path: str, output_path: str):
    """
    将视频左右翻转。

    参数：
        input_path (str): 输入视频路径
        output_path (str): 输出视频路径
    """
    video = VideoFileClip(input_path)
    flipped_video = video.fl(lambda gf, t: gf(t)[:, ::-1, :])  # 左右翻转
    flipped_video.write_videofile(output_path, codec="libx264", audio_codec="aac", remove_temp=True, threads=4, logger=None)
    log.debug(f"视频已左右翻转，处理后的视频已保存至 {output_path}")
    video.close()
    flipped_video.close()
    return output_path

@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def add_image_overlay_to_video(video_path, overlay_image_path, output_path, opacity=0.5, position=("center", "center")):
    """
    给视频添加图片蒙层
    :param video_path: 输入视频的路径
    :param overlay_image_path: 要添加的图片蒙层的路径
    :param output_path: 输出视频的路径
    :param opacity: 图片蒙层的透明度（0.0 到 1.0，1.0 表示完全不透明）
    :param position: 图片蒙层的位置，可以是 ("center", "center") 或 ("left", "top") 等
    """
    # 加载视频
    video = VideoFileClip(video_path)

    # 加载图片蒙层
    overlay_image = ImageClip(overlay_image_path).set_duration(video.duration).set_opacity(opacity)

    # 设置图片蒙层的位置
    overlay_image = overlay_image.set_position(position)

    # 合成视频
    final_video = CompositeVideoClip([video, overlay_image])

    # 写入输出视频
    final_video.write_videofile(output_path, codec="libx264", fps=video.fps, threads=4, remove_temp=True, logger=None)

    log.debug(f"视频处理完成，已保存到 {output_path}")
    return output_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def add_video_overlay_to_video(background_video_path, overlay_video_path, output_path, opacity=0.5, position=("center", "center")):
    """
    给视频添加视频蒙层
    :param background_video_path: 背景视频的路径
    :param overlay_video_path: 要添加的视频蒙层的路径
    :param output_path: 输出视频的路径
    :param opacity: 视频蒙层的透明度（0.0 到 1.0，1.0 表示完全不透明）
    :param position: 视频蒙层的位置，可以是 ("center", "center") 或 ("left", "top") 等
    """
    # 加载背景视频
    background_video = VideoFileClip(background_video_path)

    # 加载视频蒙层
    overlay_video = VideoFileClip(overlay_video_path).set_opacity(opacity)

    # 如果蒙层视频比背景视频短，则循环播放蒙层视频
    if overlay_video.duration < background_video.duration:
        overlay_video = overlay_video.loop(duration=background_video.duration)

    # 设置蒙层视频的位置
    overlay_video = overlay_video.set_position(position)

    # 合成视频
    final_video = CompositeVideoClip([background_video, overlay_video])

    # 写入输出视频
    final_video.write_videofile(output_path, codec="libx264", fps=background_video.fps, remove_temp=True, threads=4, logger=None)

    log.debug(f"视频处理完成，已保存到 {output_path}")
    return output_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def random_frame_extraction(input_video_path, output_video_path, frame_interval=20):
    """
    随机抽帧并保持视频和音频同步
    :param input_video_path: 输入视频的路径
    :param output_video_path: 输出视频的路径
    :param frame_interval: 帧间隔（单位为秒）
    """
    # 加载视频和音频
    video_clip = VideoFileClip(input_video_path)
    audio_clip = video_clip.audio if video_clip.audio is not None else None

    # 获取视频的帧率
    fps = video_clip.fps

    # 计算需要抽取的帧的时间戳
    frame_times = [i / fps for i in range(0, int(video_clip.duration * fps), frame_interval)]

    # 创建一个列表来存储视频片段和音频片段
    video_segments = []
    audio_segments = []

    # 遍历时间戳，提取视频和音频片段
    for start_time in frame_times:
        # 确保结束时间不会超出视频的实际持续时间
        end_time = min(start_time + (frame_interval - 1) / fps, video_clip.duration)

        # 提取视频片段
        video_segment = video_clip.subclip(start_time, end_time)
        video_segments.append(video_segment)

        # 如果有音频，也提取对应的音频片段
        if audio_clip:
            audio_segment = audio_clip.subclip(start_time, end_time)
            audio_segments.append(audio_segment)

    # 拼接视频片段
    final_video = concatenate_videoclips(video_segments, method="compose")

    # 如果有音频，拼接音频片段
    if audio_clip:
        final_audio = concatenate_audioclips(audio_segments)
        final_video = final_video.set_audio(final_audio)
    else:
        print("No audio found in the input video.")

    # 写入输出文件
    final_video.write_videofile(output_video_path, codec="libx264", audio_codec="aac", remove_temp=True, fps=fps, threads=4, logger=None)

    log.debug(f"视频处理完成，已保存到 {output_video_path}")
    return output_video_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def adjust_video(video_path, output_path, brightness=1.0, contrast=1.0, saturation=1.0, gamma=1.0):
    """
    调整视频的亮度、对比度、饱和度和伽马值
    :param video_path: 输入视频的路径
    :param output_path: 输出视频的路径
    :param brightness: 亮度调整因子（默认为1.0）
    :param contrast: 对比度调整因子（默认为1.0）
    :param saturation: 饱和度调整因子（默认为1.0）
    :param gamma: 伽马调整值（默认为1.0）
    """
    # 加载视频
    video = VideoFileClip(video_path)

    # 定义一个函数来处理每一帧
    def process_frame(frame):
        # 转换为 HSV 色彩空间
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # 调整亮度
        if brightness != 1.0:
            hsv[..., 2] = np.clip(hsv[..., 2] * brightness, 0, 255)

        # 转换回 BGR 色彩空间
        frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        # 调整对比度
        if contrast != 1.0:
            frame = np.clip(contrast * frame + (1 - contrast) * 128, 0, 255).astype(np.uint8)

        # 调整饱和度
        if saturation != 1.0:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            hsv[..., 1] = np.clip(hsv[..., 1] * saturation, 0, 255)
            frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        # 调整伽马值
        if gamma != 1.0:
            frame = np.clip(255 * (frame / 255) ** gamma, 0, 255).astype(np.uint8)

        return frame

    # 使用 fl 方法处理每一帧
    processed_video = video.fl_image(process_frame)

    # 写入输出视频
    processed_video.write_videofile(output_path, codec="libx264", fps=video.fps, remove_temp=True, threads=4, logger=None)
    log.debug(f"视频处理完成，已保存到 {output_path}")
    return output_path


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def adjust_hsl(video_path, output_path, hue_shift=0, saturation_factor=1.0, lightness_factor=1.0):
    """
    调整视频的色相、饱和度和亮度
    :param video_path: 输入视频的路径
    :param output_path: 输出视频的路径
    :param hue_shift: 色相偏移量（范围：-180 到 180）
    :param saturation_factor: 饱和度调整因子（范围：0.0 到 2.0）
    :param lightness_factor: 亮度调整因子（范围：0.0 到 2.0）
    """
    # 加载视频
    video = VideoFileClip(video_path)

    # 将 hue_shift 从 0 到 10 转换为 -180 到 180
    hue_shift_converted = (hue_shift / 10) * 360 - 180

    # 定义一个函数来处理每一帧
    def process_frame(frame):
        # 转换为 HSV 色彩空间
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # 调整色相
        if hue_shift_converted:
            hsv[..., 0] = (hsv[..., 0].astype(np.int16) + hue_shift_converted) % 180

        # 调整饱和度
        if saturation_factor != 1.0:
            hsv[..., 1] = np.clip(hsv[..., 1] * saturation_factor, 0, 255).astype(np.uint8)

        # 调整亮度
        if lightness_factor != 1.0:
            hsv[..., 2] = np.clip(hsv[..., 2] * lightness_factor, 0, 255).astype(np.uint8)

        # 转换回 BGR 色彩空间
        frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return frame

    # 使用 fl 方法处理每一帧
    processed_video = video.fl_image(process_frame)

    # 写入输出视频
    processed_video.write_videofile(output_path, codec="libx264", fps=video.fps, remove_temp=True, threads=4, logger=None)
    log.debug(f"视频处理完成，已保存到 {output_path}")
    return output_path


def process_frame(frame, shadow_red, shadow_green, shadow_blue, midtones_red, midtones_green, midtones_blue, highlights_red, highlights_green, highlights_blue):
    """
    调整视频帧的阴影、中间调和高光的红色、绿色和蓝色平衡。
    :param frame: 输入的视频帧（BGR 格式）
    :param shadow_red: 阴影区域红色通道调整因子
    :param shadow_green: 阴影区域绿色通道调整因子
    :param shadow_blue: 阴影区域蓝色通道调整因子
    :param midtones_red: 中间调区域红色通道调整因子
    :param midtones_green: 中间调区域绿色通道调整因子
    :param midtones_blue: 中间调区域蓝色通道调整因子
    :param highlights_red: 高光区域红色通道调整因子
    :param highlights_green: 高光区域绿色通道调整因子
    :param highlights_blue: 高光区域蓝色通道调整因子
    :return: 调整后的帧（BGR 格式）
    """
    # 将 BGR 转换为 YUV
    yuv = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
    y, u, v = cv2.split(yuv)

    # 创建掩码
    shadow_mask = y < 85
    midtones_mask = (y >= 85) & (y < 170)
    highlights_mask = y >= 170

    # 调整阴影区域
    u[shadow_mask] = np.clip(u[shadow_mask] + shadow_red * 255, 0, 255)
    v[shadow_mask] = np.clip(v[shadow_mask] + shadow_green * 255, 0, 255)

    # 调整中间调区域
    u[midtones_mask] = np.clip(u[midtones_mask] + midtones_red * 255, 0, 255)
    v[midtones_mask] = np.clip(v[midtones_mask] + midtones_green * 255, 0, 255)

    # 调整高光区域
    u[highlights_mask] = np.clip(u[highlights_mask] + highlights_red * 255, 0, 255)
    v[highlights_mask] = np.clip(v[highlights_mask] + highlights_green * 255, 0, 255)

    # 合并调整后的通道
    yuv = cv2.merge([y, u, v])
    return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)


def worker(frame_queue, result_queue, shadow_red, shadow_green, shadow_blue, midtones_red, midtones_green, midtones_blue, highlights_red, highlights_green, highlights_blue):
    """
    工作线程函数，从队列中获取帧，处理后放入结果队列。
    """
    while True:
        frame = frame_queue.get()
        if frame is None:
            break
        adjusted_frame = process_frame(
            frame,
            shadow_red=shadow_red,
            shadow_green=shadow_green,
            shadow_blue=shadow_blue,
            midtones_red=midtones_red,
            midtones_green=midtones_green,
            midtones_blue=midtones_blue,
            highlights_red=highlights_red,
            highlights_green=highlights_green,
            highlights_blue=highlights_blue
        )
        result_queue.put(adjusted_frame)
        frame_queue.task_done()


@retry(exceptions=Exception, tries=3, delay=1, backoff=2)
def adjust_color_balance(video_path, output_path, shadow_red=0, shadow_green=0, shadow_blue=0, midtones_red=0, midtones_green=0, midtones_blue=0, highlights_red=0, highlights_green=0, highlights_blue=0):
    """
    调整视频的色彩平衡。

    参数：
        video_path (str): 输入视频的路径
        output_path (str): 输出视频的路径
        shadow_red (float): 阴影红色通道调整因子（范围：0 到 1）
        shadow_green (float): 阴影绿色通道调整因子（范围：0 到 1）
        shadow_blue (float): 阴影蓝色通道调整因子（范围：0 到 1）
        midtones_red (float): 中间调红色通道调整因子（范围：0 到 1）
        midtones_green (float): 中间调绿色通道调整因子（范围：0 到 1）
        midtones_blue (float): 中间调蓝色通道调整因子（范围：0 到 1）
        highlights_red (float): 高光红色通道调整因子（范围：0 到 1）
        highlights_green (float): 高光绿色通道调整因子（范围：0 到 1）
        highlights_blue (float): 高光蓝色通道调整因子（范围：0 到 1）
    """
    # 加载视频

    video = VideoFileClip(video_path)

    # 定义一个函数来处理每一帧
    def process_frame(frame):
        # 确保帧是可写的
        frame = np.copy(frame)

        # 将 BGR 转换为 HSV 色彩空间
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv)

        # 归一化亮度值到 [0, 1]
        v_norm = v / 255.0

        # 创建掩码
        shadow_mask = v_norm < 0.3
        midtones_mask = (v_norm >= 0.3) & (v_norm < 0.7)
        highlights_mask = v_norm >= 0.7

        # 调整阴影区域
        if shadow_red != 0:
            frame[shadow_mask, 2] = np.clip(frame[shadow_mask, 2] + shadow_red * 255, 0, 255)
        if shadow_green!= 0:
            frame[shadow_mask, 1] = np.clip(frame[shadow_mask, 1] + shadow_green * 255, 0, 255)
        if shadow_blue != 0:
            frame[shadow_mask, 0] = np.clip(frame[shadow_mask, 0] + shadow_blue * 255, 0, 255)

        # 调整中间调区域
        if midtones_red != 0:
            frame[midtones_mask, 2] = np.clip(frame[midtones_mask, 2] + midtones_red * 255, 0, 255)
        if midtones_green != 0:
            frame[midtones_mask, 1] = np.clip(frame[midtones_mask, 1] + midtones_green * 255, 0, 255)
        if midtones_blue != 0:
            frame[midtones_mask, 0] = np.clip(frame[midtones_mask, 0] + midtones_blue * 255, 0, 255)

        # 调整高光区域
        if highlights_red != 0:
            frame[highlights_mask, 2] = np.clip(frame[highlights_mask, 2] + highlights_red * 255, 0, 255)
        if highlights_green != 0:
            frame[highlights_mask, 1] = np.clip(frame[highlights_mask, 1] + highlights_green * 255, 0, 255)
        if highlights_blue != 0:
            frame[highlights_mask, 0] = np.clip(frame[highlights_mask, 0] + highlights_blue * 255, 0, 255)
        return frame
    # 使用 fl 方法处理每一帧
    processed_video = video.fl_image(process_frame)
    # 写入输出视频
    processed_video.write_videofile(output_path, codec="libx264", remove_temp=True, fps=video.fps, threads=4, logger=None)
    return output_path