import os
import time

from loguru import logger
import pysrt
from moviepy.editor import *
import uuid
from tqdm import tqdm
import requests
import random
import dashscope
from hashlib import md5
import edge_tts
import asyncio
from http import HTTPStatus
import math
import cv2
import numpy as np
import json
import config
import tools

base_path = f'{os.getcwd()}/'
v = '3.4.2-e'
voices_role_list = ({'Name': 'zh-CN-XiaoxiaoNeural', 'dec': '晓晓-女', 'speed': '+20%'},
                    {'Name': 'zh-CN-XiaoyiNeural', 'dec': '晓怡-女', 'speed': '+20%'},
                    {'Name': 'zh-CN-YunjianNeural', 'dec': '云健-男', 'speed': '+20%'},
                    {'Name': 'zh-CN-YunxiNeural', 'dec': '云希-男', 'speed': '+20%'},
                    {'Name': 'zh-CN-YunxiaNeural', 'dec': '云夏-男', 'speed': '+20%'},
                    {'Name': 'zh-CN-YunyangNeural', 'dec': '云阳-男', 'speed': '+20%'},
                    # {'Name': 'zh-CN-shaanxi-XiaoniNeural', 'dec': '掌柜-陕西-女', 'speed': '+20%'},
                    {'Name': 'en-US-RogerNeural', 'dec': '美国-年轻男性', 'speed': '+10%'},
                    {'Name': 'en-US-GuyNeural', 'dec': '美国-中青年男性', 'speed': '+10%'},
                    {'Name': 'en-US-ChristopherNeural', 'dec': '英语-平和口音-正剧-男-1.1', 'speed': '+10%'},
                    {'Name': 'en-US-EricNeural', 'dec': '美国-中年男性2', 'speed': '+10%'},
                    # {'Name': 'en-US-JennyMultilingualNeural', 'dec': '英语-剧情解说-女-1.1(暂无)', 'speed': '+10%'},
                    # {'Name': 'en-US-AIGenerate1Neural', 'dec': '英语-记录片正剧-男-1.0', 'speed': '+0%'},
                    {'Name': 'en-US-AriaNeural', 'dec': '英语-紧凑剧情-女-1.2', 'speed': '+20%'},
                    {'Name': 'en-US-AriaNeural', 'dec': '英语-年轻剧科幻-女-1.1', 'speed': '+10%'},
                    )


def text_op(op=1, ssml=None, speed='+0%'):
    file_name = get_file_name(f'{base_path}改写',
                              'srt')
    srt_data = pysrt.open(f'{base_path}改写/{file_name}.srt')
    if op == 1:
        if not os.path.exists(f'{base_path}改写/wav'):
            os.mkdir(f'{base_path}改写/wav')
        for item in tqdm(srt_data.data):
            content = item.text
            if content.startswith('说话人 1: '):
                content = content.replace('说话人 1: ', '')
                asyncio.run(get_voice_edge(text=content, file_name=f'{item.index}-t.wav', role=ssml, speed=speed))
        logger.info('--------生成语音 ok！---------')
    if op == 2:
        return srt_data.data


async def get_voice_edge(text, file_name, role, speed):
    communicate = edge_tts.Communicate(text, role, rate=speed, volume='+50%', receive_timeout=30)
    await communicate.save(f'{base_path}改写/wav/{file_name}')


def get_role_list():
    for index_role, item in enumerate(voices_role_list):
        logger.warning(f'{index_role + 1}: {item["dec"]}')


def choose_role(index_role):
    return voices_role_list[index_role - 1]['Name']


def change_text_with_ali(text):
    resp = dashscope.Generation.call(model='qwen-turbo',
                                     prompt=f'改写：“{text}” 这句话。要求改写后必须字数差不多。直接给出改写后的句子',
                                     api_key='sk-9df3420709994cd99f3670727a23b98b')
    # qwen-plus
    # qwen-turbo

    if resp.status_code == HTTPStatus.OK:
        return resp.output['text']
    else:
        return text


def trs_to_e(text):
    appid = '20240403002013842'
    appkey = 'uuFg51ktnuJWzGIQ01Q5'
    salt = random.randint(32768, 65536)
    sign = md5(f'{appid}{text}{salt}{appkey}'.encode('utf-8')).hexdigest()
    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
    payload = {'appid': appid, 'q': text, 'from': 'zh', 'to': 'en', 'salt': salt, 'sign': sign}
    r = requests.post('http://api.fanyi.baidu.com/api/trans/vip/translate', params=payload, headers=headers)
    result = r.json()
    return result['trans_result'][0]['dst']


def change_srt():
    file_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    out_path = f'{base_path}改写/{file_name}_gpt.srt'
    srt_data = pysrt.open(f'{base_path}原始/原srt/{file_name}.srt')
    subtitle = pysrt.SubRipFile()
    for item in tqdm(srt_data.data):
        content = item.text
        if content.startswith('说话人 1: '):
            content = content.replace('说话人 1: ', '')
            # logger.info(f'原-> {item.index}:  {content}')
            content_gpt = change_text_with_ali(content)
            # content_gpt = trs_to_e(content)
            # logger.warning(f'GPT-> {item.index}:  {content_gpt}')
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item.end.ordinal,  # 结束时间（毫秒）
                text=f'说话人 1: {content_gpt}'
            ))
        else:
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item.end.ordinal,  # 结束时间（毫秒）
                text=content
            ))
    subtitle.save(out_path)
    logger.info('-----------改写 完成!----------')


def change_role(is_s):
    file_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    if file_name.endswith('_c'):
        logger.info('------- 完成角色调整la ----------')
        return
    srt_data = pysrt.open(f'{base_path}原始/原srt/{file_name}.srt')
    subtitle = pysrt.SubRipFile()
    if is_s == 1:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal

            end = item.end.ordinal
            if content.startswith('说话人 2: '):
                content = content.replace('说话人 2: ', '')
                subtitle.append(pysrt.SubRipItem(
                    index=index_role,
                    start=start,  # 开始时间（毫秒）
                    end=end,  # 结束时间（毫秒）
                    text=f'说话人 1: {content}'
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))
        subtitle.save(f'{base_path}原始/原srt/{file_name}_c.srt')
        os.remove(f'{base_path}原始/原srt/{file_name}.srt')
    else:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal
            end = item.end.ordinal
            if content.startswith('说话人 1: '):
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text=item.text
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))

        subtitle.save(f'{base_path}原始/原srt/{file_name}_c.srt')
        os.remove(f'{base_path}原始/原srt/{file_name}.srt')
    logger.info('------- 完成角色调整 ---------- ')


def get_mp3_from_video():
    in_path = f'{base_path}原始/原视频/'
    file_name = get_file_name(in_path, 'mp4')
    audio_y = VideoFileClip(filename=f'{in_path}{file_name}.mp4')
    audio_y.audio.write_audiofile(filename=f'{in_path}{file_name}.mp3')


def get_file_name(path, file_type):
    files = os.listdir(f'{path}')
    file_name = None
    for item in files:
        if os.path.splitext(item)[1][1:] == file_type:
            file_name = os.path.splitext(item)[0]
    return file_name


def init_dir():
    if not os.path.exists(f'{base_path}改写'):
        os.mkdir(f'{base_path}改写')
        os.mkdir(f'{base_path}改写/wav')
    else:
        files = os.listdir(f'{base_path}改写')
        if 'wav' not in files:
            os.mkdir(f'{base_path}改写/wav')

    if not os.path.exists(f'{base_path}原始'):
        os.mkdir(f'{base_path}原始')
        os.mkdir(f'{base_path}原始/原视频')
        os.mkdir(f'{base_path}原始/原srt')
    else:
        files = os.listdir(f'{base_path}原始')
        if '原srt' not in files:
            os.mkdir(f'{base_path}原始/原srt')
        if '原视频' not in files:
            os.mkdir(f'{base_path}原始/原视频')

    video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    srt_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    config = get_file_name(base_path, 'text')

    # if not os.path.exists(f'{base_path}/配音角色'):
    #     raise Exception('文件夹下面没有‘配音角色’文件夹！')
    if not video_name:
        raise Exception('文件夹下面没有MP4格式的视频！')
    # if not srt_name:
    #     raise Exception('文件夹下面没有srt文件！')
    # if not config:
    #     raise Exception('文件夹下面没有config配置文件！')
    # os.rename(f'{base_path}{video_name}.mp4',f'{base_path}原始/原视频/{video_name}.mp4')
    # os.rename(f'{base_path}{srt_name}.srt', f'{base_path}原始/原srt/{srt_name}.srt')


def cute_mute():
    file_name = get_file_name(f'{base_path}改写', 'srt')
    srt_data = pysrt.open(f'{base_path}改写/{file_name}.srt')
    for item in tqdm(srt_data.data):
        content = item.text
        if content.startswith('说话人 1: '):
            if os.path.exists(f'{base_path}改写/wav/{item.index}-t.wav'):
                audio_clip = AudioFileClip(filename=f'{base_path}改写/wav/{item.index}-t.wav')

                if audio_clip.duration > 1.5:
                    f = audio_clip.to_soundarray()
                    f_l = len(f)
                    end = 0
                    for i in range(f_l - 1, 0, -10):
                        a, b = f[i]
                        if a > 0.00015:
                            end = round(i / audio_clip.fps, 2)
                            break
                    audio_clip_2 = audio_clip.subclip(0, end)
                    audio_clip_2.write_audiofile(filename=f'{base_path}改写/wav/{item.index}.wav', logger=None)
                    audio_clip.close()
                    audio_clip_2.close()
                    os.remove(f'{base_path}改写/wav/{item.index}-t.wav')
                else:
                    os.rename(f'{base_path}改写/wav/{item.index}-t.wav', f'{base_path}改写/wav/{item.index}.wav')


def video_cut():
    video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    clip = VideoFileClip(f'{base_path}原始/原视频/{video_name}.mp4')
    sp2 = text_op(2, None)
    clips = []
    clips_youtube = []
    for item in tqdm(sp2):
        index_item = item.index
        content = item.text
        subclip_t = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000)
        if content.startswith('说话人 1: '):
            audio_clip = AudioFileClip(filename=f'{base_path}/改写/wav/{index_item}.wav')
            if audio_clip.duration > item.duration.ordinal / 1000:
                random_number = random.randint(1, 10)
                if random_number < 4:
                    # logger.info(f"{random_number} - fadeout :{item.index}")
                    t_ = audio_clip.duration - item.duration.ordinal / 1000
                    new_clip = ImageClip(subclip_t.get_frame(t=subclip_t.duration), duration=t_).fadeout(t_)
                    subclip = concatenate_videoclips([subclip_t, new_clip])
                if random_number > 8:
                    # logger.info(f"{random_number} -Zoom-in effect :{item.index}")
                    size = subclip_t.size
                    t_ = audio_clip.duration - item.duration.ordinal / 1000
                    slide = ImageClip(subclip_t.get_frame(t=subclip_t.duration)).set_fps(subclip_t.fps).set_duration(t_)

                    def re(t):
                        a = math.radians(t * 180 / t_)
                        num = math.sin(a)
                        num = num * 0.1 + 1
                        if num < 1:
                            num = 1
                        return num

                    slide = slide.resize(re)  # Zoom-in effect
                    slide = slide.set_position(('center', 'center'))
                    slide = CompositeVideoClip([slide], size=size)
                    subclip = concatenate_videoclips([subclip_t, slide])
                if 3 < random_number < 9:
                    # logger.info(f"{random_number} -speedx :{item.index}")
                    subclip = subclip_t.fx(vfx.speedx, round((item.duration.ordinal / 1000) / audio_clip.duration, 2))
            else:
                subclip = subclip_t
            f_clip = subclip.set_audio(audio_clip)
            clips_youtube.append(f_clip)
        else:
            f_clip = subclip_t
        # clips.append(f_clip)
    # clips_t = concatenate_videoclips(clips)
    clip_youtube = concatenate_videoclips(clips_youtube)
    # clips_t.write_videofile(f'{base_path}改写/改写_{video_name}.mp4')
    logger.info('5.1 ---->  开始合成没有原音的视频 ----- ')
    clip_youtube.write_videofile(f'{base_path}改写/youtubo_{video_name}.mp4')
    logger.info(f'!!!!恭喜合成成功  两个影片 一个有电影原音 一个没有。')


def get_last_frame(video_path):
    # 加载视频文件
    clip = VideoFileClip(video_path)
    sub_clip = clip.subclip(0, 6)
    new_clip = ImageClip(sub_clip.get_frame(t=sub_clip.duration), duration=5).fadeout(5)
    clips_t = concatenate_videoclips([sub_clip, new_clip])
    clips_t.write_videofile('t-2.mp4')


def test_add_zoom_e():
    video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    clip = VideoFileClip(f'{base_path}原始/原视频/{video_name}.mp4')
    subclip_t = clip.subclip(20.09, 25.78)
    subclip_t.save_frame(f'temp.jpg', t=subclip_t.duration)
    # slide = ImageClip(subclip_t.get_frame(t=subclip_t.duration)).set_duration(5)
    image = cv2.imread(f'temp.jpg')
    fps = subclip_t.fps
    duration_seconds = 1.35
    total_frames = int(fps * duration_seconds) + 1
    WIDTH = image.shape[1]
    HEIGHT = image.shape[0]
    fourcc = cv2.VideoWriter.fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(f'{base_path}改写/temp/temp.mp4', fourcc, fps, (WIDTH, HEIGHT), isColor=True)
    background = np.full((HEIGHT, WIDTH, 3), 0, np.uint8)

    for i in tqdm(range(total_frames)):

        a = math.radians(i * 180 / total_frames)
        num = math.sin(a)
        num = num * 0.1 + 1
        if num < 1:
            num = 1
        scale_factor = num
        new_w = int(WIDTH * scale_factor)
        new_h = int(HEIGHT * scale_factor)
        scaleImg = cv2.resize(src=image, dsize=(new_w, new_h))
        offset_w = int((new_w - WIDTH) / 2)
        offset_h = int((new_h - HEIGHT) / 2)
        background[:HEIGHT, :WIDTH] = scaleImg[offset_h:offset_h + HEIGHT, offset_w:offset_w + WIDTH]
        videoWriter.write(background)
    # img[y:y+h, x:x+w]
    videoWriter.release()
    os.remove('temp.jpg')
    clip_f = VideoFileClip(f'{base_path}改写/temp/temp.mp4')
    clips_t = concatenate_videoclips([subclip_t, clip_f])
    clips_t.write_videofile('t-2.mp4')
    os.remove(f'{base_path}改写/temp/temp.mp4')


def test_mask_subtitle():
    # video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    # clip = VideoFileClip(f'{base_path}原始/原视频/{video_name}.mp4')
    # subclip_t = clip.subclip(20.09, 25.78)
    # subclip_t.write_videofile(f'{base_path}原始/原视频/t_1.mp4')

    video_clip = VideoFileClip(f'{base_path}原始/原视频/t_1.mp4')
    video_capture = cv2.VideoCapture(f'{base_path}原始/原视频/t_1.mp4')
    fps = int(video_capture.get(cv2.CAP_PROP_FPS))
    video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
    video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    duration_seconds = total_frames / fps
    logger.info(f'原视频opencv读出的时长：{duration_seconds}')
    # 检查视频是否成功打开
    if not video_capture.isOpened():
        logger.info('"Error: Unable to open video file."')

    fourcc = cv2.VideoWriter.fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(f'{base_path}原始/原视频/打码_temp_t_1.mp4', fourcc, fps, (video_width, video_height),
                                  isColor=True)
    x = 0
    y = video_height - 120
    w = video_width
    h = 120
    pbar = tqdm(total=total_frames, desc='字幕打码进度')
    while True:
        ret, frame = video_capture.read()
        if frame is not None:
            pbar.update(1)
            roi = frame[y:y + h, x:x + w]
            blurred_roi = cv2.GaussianBlur(roi, (151, 151), 0)  # 高斯模糊示例
            frame[y:y + h, x:x + w] = blurred_roi
            videoWriter.write(frame)
        else:
            videoWriter.release()
            break
    output_video = VideoFileClip(f'{base_path}原始/原视频/打码_temp_t_1.mp4')
    output_video = output_video.set_audio(video_clip.audio)
    output_video.write_videofile(f'{base_path}原始/原视频/打码_t_1.mp4')
    os.remove(f'{base_path}原始/原视频/打码_temp_t_1.mp4')


def test_image_xs():
    image = cv2.imread('temp.jpg')
    height, width = image.shape[:2]
    for y in tqdm(range(height)):
        for x in range(width):
            # 读取当前像素点的BGR值
            blue, green, red = image[y, x]

            # 对每个通道的像素值进行修改
            # 这里仅作示例，将每个通道的像素值减去50
            blue = max(0, blue - 50)
            green = max(0, green - 50)
            red = max(0, red - 50)

            # 将修改后的像素值重新赋给图像
            image[y, x] = [blue, green, red]

    # 显示修改后的图像
    cv2.imshow('Modified Image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def test1():
    logger.info('------- test start1 ------')
    tools.delete_files_in_folder(f'{base_path}改写/temp')
    tools.delete_files_in_folder(f'{base_path}改写/temp2')
    clip = VideoFileClip(filename=f'{base_path}原始/原视频/布偶.mp4')
    logger.info('------- test start3 ------')
    sp2 = pysrt.open(f'{base_path}改写/srt_gpt/布偶_gpt.srt')
    for item in tqdm(sp2):
        index_item = item.index
        content = item.text
        logger.info('------- test 1111 ------')
        subclip_t = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000)
        if content.startswith('说话人 1: '):
            audio_clip = AudioFileClip(filename=f'{base_path}改写/wav/{index_item}.wav')
            if audio_clip.duration > item.duration.ordinal / 1000:
                t_ = round(audio_clip.duration - item.duration.ordinal / 1000, 2)
                if t_ > 2:
                    # logger.info(f"Zoom-in  :{item.index}")
                    subclip = tools.add_zoom_effect(subclip_t, t_, index_item)
                if 1.3 <= t_ <= 2:
                    # logger.info(f"fadeout  :{item.index}")
                    new_clip = ImageClip(subclip_t.get_frame(t=subclip_t.duration), duration=t_).fadeout(t_)
                    subclip = concatenate_videoclips([subclip_t, new_clip])
                if t_ < 1.3:
                    # logger.info(f"speedx  :{item.index}")
                    subclip = subclip_t.fx(vfx.speedx, round((item.duration.ordinal / 1000) / audio_clip.duration, 2))
            else:
                subclip = subclip_t
            f_clip = subclip.set_audio(audio_clip)
            logger.warning(f'33333')
            f_clip.write_videofile(f'{base_path}改写/temp2/{index_item}.mp4', logger=None)
            logger.warning(f'44444')
        else:
            f_clip = subclip_t
            f_clip.write_videofile(f'{base_path}改写/temp2/{index_item}_y.mp4', logger=None)

    logger.info(f'------- test end ------')


def test11():
    list_file = 'concat_videos.txt'
    with open(list_file, 'w', encoding='utf-8') as f:
        for i in range(1, 11):
            f.write(f"file '{base_path}改写/temp/{i}.mp4'\n")
    cmd = f'ffmpeg -f concat -safe 0 -y -i {list_file} -c copy {base_path}改写/mp4/f.mp4'
    tools.run_cmd(cmd)
    logger.info('------- test start1 ------')
    # start_time = 480.22
    # end_time = 490.83
    # cap = cv2.VideoCapture(f'{base_path}原始/原视频/青瓦台.mp4')
    # video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    # video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    # frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    # fps = cap.get(cv2.CAP_PROP_FPS)
    # start_frame = int(start_time * fps)
    # end_frame = int(end_time * fps)
    # cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
    # fourcc = cv2.VideoWriter.fourcc(*'mp4v')
    # videoWriter = cv2.VideoWriter(f'{base_path}原始/原视频/打码_temp_t_1.mp4', fourcc, fps, (video_width, video_height),
    #                               isColor=True)
    # current_frame = 0
    # while cap.isOpened():
    #     ret, frame = cap.read()
    #     if not ret:
    #         break
    #
    #     current_frame += 1
    #     if start_frame <= frame_count <= end_frame:
    #         videoWriter.write(frame)
    #
    # videoWriter.release()
    # cap.release()
    logger.info('------- test end ------')


def test():
   pass




if __name__ == '__main__':

    try:
        init_dir()
        index = int(
            input(
                f" ---- v:{v} ----\n 0 全自动\n 1 处理srt角色 \n 2 改写文案\n 3 生成新的音频片段\n 4 处理音频小尾巴\n 5 合成新的视频\n 6 提取原片音频\n"))
        if index == 1:
            logger.info('-----------开始处理角色----------')
            is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))
            change_role(is_s)
        if index == 2:
            logger.info('-----------开始改写文案----------')
            change_srt()
        if index == 3:
            get_role_list()
            index_2 = int(input('请输入配音角色序号:'))
            role_name = choose_role(index_2)
            logger.info('-----------开始生成新的音频片段----------')
            text_op(1, role_name)
        if index == 4:
            logger.info('-----------开始处理音频小尾巴----------')
            cute_mute()
        if index == 5:
            logger.info('-----------开始合成新的视频----------')
            video_cut()
        if index == 6:
            logger.info('-----------开始提取音频----------')
            get_mp3_from_video()
        if index == 7:
            video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
            get_last_frame(f'{base_path}原始/原视频/{video_name}.mp4')
        if index == 8:
            test()

        if index == 0:
            get_role_list()
            index_2 = int(input('请输入配音角色序号:\n'))
            role_name = choose_role(index_2)
            logger.info('1 ---> 开始处理角色----------')
            is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))
            change_role(is_s)
            logger.info('2 ---> 开始改写文案----------')
            change_srt()
            logger.info('3 ---> 开始生成语音----------')
            text_op(1, role_name)
            logger.info('4 ---> 开始去除小尾巴----------')
            cute_mute()
            logger.info('5 ---> 开始合成视频 ----------')
            video_cut()
    except Exception as e:
        input(f'发生以下错误：\n  {e}')
    finally:
        input('回车退出')
