import json
import os
from loguru import logger
import config
import run
import pysrt
import dashscope
from http import HTTPStatus
import edge_tts
from hashlib import md5
import requests
import random
import cv2
import numpy as np
from moviepy.editor import *
from tqdm import tqdm
from better_ffmpeg_progress import FfmpegProcess
import subprocess as sp
import math
import uuid
import asyncio


def get_file_name(path, file_type):
    files = os.listdir(f'{path}')
    file_name = None
    file_full_name = None
    for item in files:
        if os.path.splitext(item)[1][1:] == file_type:
            file_name = os.path.splitext(item)[0]
            file_full_name = f'{file_name}.{file_type}'
    return file_name, file_full_name


def get_version_num():
    response = requests.get(config.url_version)
    res_json = response.json()
    if len(res_json['data']) > 0:
        return res_json['data'][0]
    else:
        return None


def save_config():
    # response = requests.get(config.url_config)
    # res_json = response.json()
    # for item in res_json['data']:
    #     if item['config_type'] == 'ali_key':
    #         config.ali_key = item["config_vlaue"]
    #     if item['config_type'] == 'ali_model':
    #         config.ali_model = item["config_vlaue"]
    #     if item['config_type'] == 'baidu_id':
    #         config.baidu_id = item["config_vlaue"]
    #     if item['config_type'] == 'baidu_key':
    #         config.baidu_key = item["config_vlaue"]
    #     if item['config_type'] == 'change_str_model':
    #         config.change_type = int(item["config_vlaue"])
    #
    # config.change_type = 1
    # # 1: 英语 2：ali  3：百度翻译
    # config.voice_type = 2
    # # 1 微软 2 本地
    config_dict = {}
    with open('config', 'r') as f:
        for line in f:
            config_dict.update({line.split('=')[0]: line.split('=')[1].replace('\n', '')})
    config.ali_key = config_dict['ali_key']
    config.ali_model = config_dict['ali_model']
    config.baidu_id = config_dict['baidu_id']
    config.baidu_key = config_dict['baidu_key']
    config.change_type = int(config_dict['change_type'])
    config.voice_type = int(config_dict['voice_type'])
    config.voice_local_url = config_dict['voice_local_url']
    for k, v in config_dict.items():
        logger.debug(f'{k}:{v}')

    input('\n确认配置以上配置，回车继续！')

    response = requests.get(config.url_jy_effect)
    res_json = response.json()
    config.jy_effect_list = res_json['data']


def save_voices_role_list():
    response = requests.get(config.url_dubbing)
    res_json = response.json()
    if len(res_json) < 1:
        raise Exception('获取配音列表失败，请联系管理员！')
    config.voices_role_list = res_json['data']


def run_with_index():
    # index = int(
    #     input(
    #         f" ---- {config.version_s} ----\n 0 全自动\n 1 处理srt角色 \n 2 改写文案\n 3 生成新的音频片段\n 4 检查处理音频\n 5 合成新的视频\n 6 提取原片音频\n 7 打码字幕 \n"))
    # run.choose_index(index)

    index = int(
        input(
            f" ---- {config.version_s} ----\n 0 全自动\n 1 处理srt角色 \n 2 改写文案&改写语音\n 3 合成新的视频\n 6 提取原片音频\n "))
    run.choose_index(index)


def choose_role_list():
    for index_role, item in enumerate(config.voices_role_list):
        logger.debug(f'{index_role + 1}: {item["des"]} 语速:{item["speed"]}')
    index = int(input('请输入配音角色序号:'))
    config.role_dict = config.voices_role_list[index - 1]


def get_srt_content():
    srt_data = pysrt.open(f'{config.base_path}改写/srt_gpt/{config.original_srt_name}_gpt.srt')
    return srt_data.data


def change_text_with_ali(text):
    resp = dashscope.Generation.call(model=config.ali_model,
                                     prompt=f'改写：“{text}” 这句话。要求改写后必须字数差不多。直接给出改写后的句子。',
                                     api_key=config.ali_key)
    # logger.info(f'{config.ali_model} - {resp}')
    if resp.status_code == HTTPStatus.OK:
        return resp.output['text']
    else:
        return text


async def get_voice_edge(text, file_name, role, speed):
    communicate = edge_tts.Communicate(text, role, rate=speed, volume='+50%', receive_timeout=30)
    await communicate.save(f'{config.base_path}改写/wav_t/{file_name}')


def delete_files_in_folder(folder_path):
    # 遍历文件夹中的所有文件和子文件夹
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            # 删除文件
            os.remove(file_path)


def trs_to_e(text):
    appid = config.baidu_id
    appkey = config.baidu_key
    salt = random.randint(32768, 65536)
    sign = md5(f'{appid}{text}{salt}{appkey}'.encode('utf-8')).hexdigest()
    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
    payload = {'appid': appid, 'q': text, 'from': 'zh', 'to': 'en', 'salt': salt, 'sign': sign}
    r = requests.post('http://api.fanyi.baidu.com/api/trans/vip/translate', params=payload, headers=headers)
    result = r.json()
    # logger.warning(result)
    dst = result['trans_result'][0]['dst']

    return dst


def trs_to_c(text):
    appid = config.baidu_id
    appkey = config.baidu_key
    salt = random.randint(32768, 65536)
    sign = md5(f'{appid}{text}{salt}{appkey}'.encode('utf-8')).hexdigest()
    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
    payload = {'appid': appid, 'q': text, 'from': 'en', 'to': 'zh', 'salt': salt, 'sign': sign}
    r = requests.post('http://api.fanyi.baidu.com/api/trans/vip/translate', params=payload, headers=headers)
    result = r.json()
    # logger.debug(result)
    dst = result['trans_result'][0]['dst']
    return dst


def get_voice_local(text, file_name, role, speed):
    params = {'text': text,
              'character': role,
              'speed': speed}
    response = requests.get(url=config.voice_local_url, params=params, stream=True)
    if response.status_code == 200:
        with open(f'{config.base_path}改写/wav_t/{file_name}', 'wb') as f:
            f.write(response.content)
    else:
        raise Exception("语音服务出错")


def add_zoom_effect(subclip, duration, index):
    subclip.save_frame(f'temp.jpg', t=subclip.duration)
    image = cv2.imread(f'temp.jpg')
    fps = subclip.fps
    total_frames = int(fps * duration) + 1
    width = image.shape[1]
    height = image.shape[0]
    fourcc = cv2.VideoWriter.fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(f'{config.base_path}改写/temp/{index}_temp.mp4', fourcc, fps, (width, height),
                                  isColor=True)
    background = np.full((height, width, 3), 0, np.uint8)
    for i in range(total_frames):
        a = math.radians(i * 180 / total_frames)
        num = math.sin(a)
        num = num * 0.1 + 1
        if num < 1:
            num = 1
        scale_factor = num
        new_w = int(width * scale_factor)
        new_h = int(height * scale_factor)
        scaleImg = cv2.resize(src=image, dsize=(new_w, new_h))
        offset_w = int((new_w - width) / 2)
        offset_h = int((new_h - height) / 2)
        background[:height, :width] = scaleImg[offset_h:offset_h + height, offset_w:offset_w + width]
        videoWriter.write(background)
    # img[y:y+h, x:x+w]
    videoWriter.release()
    # os.remove('temp.jpg')
    clip_f = VideoFileClip(f'{config.base_path}改写/temp/{index}_temp.mp4')
    clips_t = concatenate_videoclips([subclip, clip_f])
    # os.remove(f'{config.base_path}改写/temp/temp.mp4')
    return clips_t


def mask_subtitles():
    file_name = input('请输入要处理的文件名字（不带.mp4）：')
    y = int(input('请输入要遮住的区域开始的像素值（整数）：'))
    h = int(input('请输入要遮住的区域的高度（整数）：'))
    video_clip = VideoFileClip(f'{config.base_path}改写/mp4/{file_name}.mp4')
    video_capture = cv2.VideoCapture(f'{config.base_path}改写/mp4/{file_name}.mp4')
    fps = int(video_capture.get(cv2.CAP_PROP_FPS))
    video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
    video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    # 检查视频是否成功打开
    if not video_capture.isOpened():
        logger.warning('"Error: Unable to open video file."')
        raise Exception(f'有原音_{config.original_video_name}.mp4 这个文件不能被打开')

    fourcc = cv2.VideoWriter.fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(f'{config.base_path}改写/mp4/temp_.mp4', fourcc, fps, (video_width, video_height),
                                  isColor=True)
    x = 0
    w = video_width
    y = video_height - y
    pbar = tqdm(total=total_frames, desc='打码进度')
    while True:
        ret, frame = video_capture.read()
        if frame is not None:
            pbar.update(1)
            roi = frame[y:y + h, x:x + w]
            blurred_roi = cv2.GaussianBlur(roi, (151, 151), 0)  # 高斯模糊示例
            frame[y:y + h, x:x + w] = blurred_roi
            videoWriter.write(frame)
        else:
            videoWriter.release()
            break
    output_video = VideoFileClip(f'{config.base_path}改写/mp4/temp_.mp4')
    output_video = output_video.set_audio(video_clip.audio)
    output_video.write_videofile(f'{config.base_path}改写/mp4/打码_{file_name}.mp4')
    os.remove(f'{config.base_path}改写/mp4/temp_.mp4')
    logger.warning('成功！')


def run_cmd(cmd, show_banner=True):
    if show_banner:
        process = FfmpegProcess(cmd.split(' '))
        process.run()
    else:
        sp.call(f'{cmd} -loglevel quiet', shell=True)


def cut_video_and_effect(cap, start_time, end_time, index, duration, content):
    video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    start_frame = math.floor(start_time * fps)
    end_frame = math.floor(end_time * fps)
    frame_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if end_frame >= frame_total:
        end_frame = frame_total - 1
    bit_rate = int(cap.get(cv2.CAP_PROP_BITRATE))
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

    fourcc = cv2.VideoWriter.fourcc(*'mp4v')  # mp4v
    videoWriter = cv2.VideoWriter(f'{config.base_path}改写/temp/{index}_y.mp4',
                                  fourcc,
                                  fps,
                                  (video_width, video_height),
                                  isColor=True)
    current_frame = start_frame
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            logger.warning(f'{index}-- start_frame {start_frame} ---end_frame {end_frame} - frame_total {frame_total}')
            break
        if current_frame == end_frame:
            cv2.imencode('.png', frame)[1].tofile(f'{config.base_path}改写/temp/{index}_l.png')
            cv2.imencode('.png', frame)[1].tofile(f'{config.base_path}改写/temp/{index}_f.png')
        current_frame += 1
        if current_frame <= end_frame:
            videoWriter.write(frame)
        else:
            break
    videoWriter.release()
    # -----
    if content.startswith('说话人 1: '):
        audio_clip = AudioFileClip(filename=f'{config.base_path}/改写/wav/{index}.wav')
        t_ = round(audio_clip.duration - duration, 2)

        if t_ < 0.05:
            logger.info(f'{index} - - 无处理1')
            set_audio(f'{config.base_path}改写/temp/{index}_y.mp4',
                      f'{config.base_path}/改写/wav/{index}.wav',
                      f'{config.base_path}改写/temp/{index}.mp4',
                      bit_rate)
        else:
            if t_ < 1.3:
                slow_videos(f'{config.base_path}改写/temp/{index}_y.mp4',
                            f'{config.base_path}改写/temp/{index}_.mp4',
                            round(audio_clip.duration / duration, 2))
                logger.warning(f'{index} - - slow_videos -{t_}')
            else:
                logger.warning(f'{index} - - effect_str -{t_}')
                effect_str = random.sample(config.effect_list, 1)[0]
                out_put = f'{config.base_path}改写/temp/{index}_e.mp4'
                cmd = f'ffmpeg -loop 1 -t {t_} -i "{config.base_path}改写/temp/{index}_l.png" -loop 1 -t {t_} -i "{config.base_path}改写/temp/{index}_f.png" -filter_complex "[0][1]xfade=transition={effect_str}:duration={t_ - 0.01}:offset=0.01,format=yuv420p" -r {fps} -y "{out_put}"'
                run_cmd(cmd, show_banner=False)
                merge_videos(f'{config.base_path}改写/temp/{index}_y.mp4',
                             f'{config.base_path}改写/temp/{index}_e.mp4',
                             f'{config.base_path}改写/temp/{index}_.mp4', )

            set_audio(f'{config.base_path}改写/temp/{index}_.mp4',
                      f'{config.base_path}/改写/wav/{index}.wav',
                      f'{config.base_path}改写/temp/{index}.mp4',
                      bit_rate)

    else:
        logger.info(f'{index} - - 无处理2')
        set_audio(f'{config.base_path}改写/temp/{index}_y.mp4',
                  f'{config.base_path}/改写/wav/{index}.wav',
                  f'{config.base_path}改写/temp/{index}.mp4',
                  bit_rate)
    os.remove(f'{config.base_path}改写/temp/{index}_f.png')
    os.remove(f'{config.base_path}改写/temp/{index}_l.png')


def cut_video_and_freeze(cap, start_time, end_time, index, duration, content):
    video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    start_frame = math.floor(start_time * fps)
    end_frame = math.floor(end_time * fps)
    frame_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if end_frame >= frame_total:
        end_frame = frame_total - 2
    bit_rate = int(cap.get(cv2.CAP_PROP_BITRATE))
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

    fourcc = cv2.VideoWriter.fourcc(*'mp4v')  # mp4v
    videoWriter = cv2.VideoWriter(f'{config.base_path}改写/temp/{index}_y.mp4',
                                  fourcc,
                                  fps,
                                  (video_width, video_height),
                                  isColor=True)

    audio_clip = AudioFileClip(filename=f'{config.base_path}/改写/wav/{index}.wav')
    t_ = round(audio_clip.duration - duration, 2)

    if t_ < 3.5:
        current_frame = start_frame
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                logger.warning(
                    f'{index}-- start_frame {start_frame} ---end_frame {end_frame} - frame_total {frame_total}')
                break
            current_frame += 1
            if current_frame <= end_frame:
                videoWriter.write(frame)
            else:
                break
        videoWriter.release()
        if t_ <= 0:
            set_audio(f'{config.base_path}改写/temp/{index}_y.mp4',
                      f'{config.base_path}/改写/wav/{index}.wav',
                      f'{config.base_path}改写/temp/{index}.mp4',
                      bit_rate)
        else:
            slow_videos(f'{config.base_path}改写/temp/{index}_y.mp4',
                        f'{config.base_path}改写/temp/{index}_.mp4',
                        round(audio_clip.duration / duration, 2))
            set_audio(f'{config.base_path}改写/temp/{index}_.mp4',
                      f'{config.base_path}/改写/wav/{index}.wav',
                      f'{config.base_path}改写/temp/{index}.mp4',
                      bit_rate)

    else:
        current_frame = start_frame
        freeze_frame_count = int(fps * t_)
        last_frame = None
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                logger.warning(
                    f'{index}-- start_frame {start_frame} ---end_frame {end_frame} - frame_total {frame_total}')
                break
            current_frame += 1
            if current_frame == end_frame:
                last_frame = frame
            if current_frame <= end_frame:
                videoWriter.write(frame)
            elif current_frame <= freeze_frame_count + end_frame:
                videoWriter.write(last_frame)
            else:
                break
        videoWriter.release()
        set_audio(f'{config.base_path}改写/temp/{index}_y.mp4',
                  f'{config.base_path}/改写/wav/{index}.wav',
                  f'{config.base_path}改写/temp/{index}.mp4',
                  bit_rate)


def merge_videos(video1_path, video2_path, output_path):
    # 打开第一个视频
    video1 = cv2.VideoCapture(video1_path)
    frame_width = int(video1.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(video1.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(video1.get(cv2.CAP_PROP_FPS))
    total_frames = int(video1.get(cv2.CAP_PROP_FRAME_COUNT))

    # 创建写入器，输出视频采用第一个视频的属性
    output_video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps,
                                   (frame_width, frame_height))

    # 逐帧读取第一个视频，写入输出视频中
    for i in range(total_frames):
        ret1, frame1 = video1.read()
        output_video.write(frame1)

    # 打开第二个视频
    video2 = cv2.VideoCapture(video2_path)
    total_frames = int(video2.get(cv2.CAP_PROP_FRAME_COUNT))

    # 逐帧读取第二个视频，将每一帧直接附加到输出视频的末尾
    for i in range(total_frames):
        ret2, frame2 = video2.read()
        output_video.write(frame2)

    # 释放资源
    video1.release()
    video2.release()
    output_video.release()
    os.remove(video1_path)
    os.remove(video2_path)


def set_audio(video_path, audio_path, output_path, bit_rate):
    # -b:v {bit_rate}k
    cmd = f'ffmpeg -i "{video_path}" -i "{audio_path}" -shortest -af apad -b:v {bit_rate}k -c:a aac "{output_path}"'
    run_cmd(cmd=cmd, show_banner=False)
    os.remove(video_path)


def slow_videos(video_path, output_path, pts):
    cmd = f'ffmpeg -i "{video_path}" -an -filter:v "setpts={pts}*PTS" "{output_path}"'
    run_cmd(cmd, show_banner=False)
    os.remove(video_path)


def add_to_jy(sp2):
    logger.info('开始查找剪映草稿箱的地址.........')
    disk = ['c:/', 'd:/', 'e:/', 'f:/', 'g:/', 'h:/', 'i:/']
    draft_path = None
    target_folder_name = 'com.lveditor.draft'
    for disk_c in disk:
        for root, dirs, files in os.walk(disk_c):
            if target_folder_name in dirs:
                draft_path = os.path.join(root, target_folder_name)
                break

    if not draft_path:
        draft_path = input('没有找到剪映草稿箱子地址，请手动输入：')
    logger.info(f'剪映草稿箱的地址：{draft_path}')

    os.makedirs(f'{draft_path}/原音_{config.original_video_name}', exist_ok=True)
    config.draft_content_data['tracks'].append(
        {
            "attribute": 0,
            "flag": 0,
            "id": str(uuid.uuid1()),
            "is_default_name": True,
            "name": "",
            "segments": [],
            "type": "video"
        }
    )

    for item in tqdm(sp2):
        index_item = item.index
        video_path = f'{config.base_path}改写/temp/{index_item}.mp4'
        video_name = f'{index_item}.mp4'
        cap = cv2.VideoCapture(video_path)
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_total = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        duration = int(frame_total / fps * 1000000)
        audio_clip = AudioFileClip(filename=f'{config.base_path}/改写/wav/{index_item}.wav')
        duration_audio = int(audio_clip.duration * 1000000)
        t_ = duration_audio - int(item.duration.ordinal / 1000 * 1000000)
        cap.release()
        config.draft_meta_data["draft_materials"][0]["value"].append(
            {
                "extra_info": video_name,
                "file_Path": video_path,
                "metetype": 'video',
                "id": str(uuid.uuid1()),
                'duration': duration,
                'width': frame_width,
                'height': frame_height
            }
        )

        video_material_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "_material"))
        video_track_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "_track"))

        effect_ = random.sample(config.jy_effect_list, 1)[0]
        effect_name = effect_['name']
        effect_resource_id = effect_['resource_id']
        effect_id = effect_['effect_id']
        effect_material_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "effect_material_id"))

        transitions_name = '粒子'
        transitions_resource_id = '6855565313715474952'
        transitions_id = '4212632'
        transitions_material_id = str(
            uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "transitions_material_id"))

        config.draft_content_data['materials']['videos'].append(
            {
                "category_name": "local",
                "extra_type_option": 0,
                "has_audio": True,
                "id": video_material_id,
                "material_name": video_name,
                "path": video_path,
                "type": "video"
            }
        )
        config.draft_content_data['tracks'][0]['segments'].append(
            {
                "id": video_track_id,
                "material_id": video_material_id,
                "visible": True,
                "volume": 1,
                "source_timerange": {
                    "duration": duration,
                    "start": 0
                },
                "target_timerange": {
                    "duration": duration,
                    "start": 0
                },
                'extra_material_refs': [effect_material_id, transitions_material_id]
            }
        )

        if t_ >= 3.5 * 1000000:
            logger.info(
                f'{index_item} --- {audio_clip.duration} - {item.duration.ordinal / 1000} = {t_ / 1000000} {effect_name}')
            if t_ / 1000000 / 2 > 1.1:
                config.draft_content_data['materials']['video_effects'].append(
                    {
                        "effect_id": effect_id,
                        "id": effect_material_id,
                        "name": effect_name,
                        "render_index": 11000,
                        "resource_id": effect_resource_id,
                        "time_range": {
                            "duration": t_ + 800000,
                            "start": duration - t_ - 800000
                        },
                        "track_render_index": 0,
                        "type": "video_effect",
                        "value": 1.0,
                        "version": ""
                    }
                )

            config.draft_content_data['materials']['transitions'].append(
                {
                    "effect_id": transitions_id,
                    "id": transitions_material_id,
                    "name": transitions_name,
                    "is_overlap": True,
                    "resource_id": transitions_resource_id,
                    'duration': t_ * 2,
                    "type": "transition",
                }
            )
    with open(f'{draft_path}/原音_{config.original_video_name}/draft_meta_info.json', 'w', encoding='utf-8') as f:
        json.dump(config.draft_meta_data, f, ensure_ascii=False)
    with open(f'{draft_path}/原音_{config.original_video_name}/draft_content.json', 'w', encoding='utf-8') as f:
        json.dump(config.draft_content_data, f, ensure_ascii=False)

    # -------------------------- 无原音 --------------------------
    os.makedirs(f'{draft_path}/无原音_{config.original_video_name}', exist_ok=True)
    config.draft_content_data_n['tracks'].append(
        {
            "attribute": 0,
            "flag": 0,
            "id": str(uuid.uuid1()),
            "is_default_name": True,
            "name": "",
            "segments": [],
            "type": "video"
        }
    )

    for item in tqdm(sp2):
        if item.text.startswith('说话人 2: '):
            continue
        index_item = item.index
        video_path = f'{config.base_path}改写/temp/{index_item}.mp4'
        video_name = f'{index_item}.mp4'
        cap = cv2.VideoCapture(video_path)
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_total = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        duration = int(frame_total / fps * 1000000)
        audio_clip = AudioFileClip(filename=f'{config.base_path}/改写/wav/{index_item}.wav')
        duration_audio = int(audio_clip.duration * 1000000)
        t_ = duration_audio - int(item.duration.ordinal / 1000 * 1000000)
        cap.release()
        config.draft_meta_data_n["draft_materials"][0]["value"].append(
            {
                "extra_info": video_name,
                "file_Path": video_path,
                "metetype": 'video',
                "id": str(uuid.uuid1()),
                'duration': duration,
                'width': frame_width,
                'height': frame_height
            }
        )

        video_material_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "n_material"))
        video_track_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "n_track"))

        effect_ = random.sample(config.jy_effect_list, 1)[0]
        effect_name = effect_['name']
        effect_resource_id = effect_['resource_id']
        effect_id = effect_['effect_id']
        effect_material_id = str(uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "n_effect_material_id"))

        transitions_name = '粒子'
        transitions_resource_id = '6855565313715474952'
        transitions_id = '4212632'
        transitions_material_id = str(
            uuid.uuid3(namespace=uuid.NAMESPACE_DNS, name=video_name + "n_transitions_material_id"))

        config.draft_content_data_n['materials']['videos'].append(
            {
                "category_name": "local",
                "extra_type_option": 0,
                "has_audio": True,
                "id": video_material_id,
                "material_name": video_name,
                "path": video_path,
                "type": "video"
            }
        )
        config.draft_content_data_n['tracks'][0]['segments'].append(
            {
                "id": video_track_id,
                "material_id": video_material_id,
                "visible": True,
                "volume": 1,
                "source_timerange": {
                    "duration": duration,
                    "start": 0
                },
                "target_timerange": {
                    "duration": duration,
                    "start": 0
                },
                'extra_material_refs': [effect_material_id, transitions_material_id]
            }
        )

        if t_ >= 3.5 * 1000000:
            logger.info(
                f'{index_item} --- {audio_clip.duration} - {item.duration.ordinal / 1000} = {t_ / 1000000} {effect_name}')
            if t_ / 1000000 / 2 > 1.1:
                config.draft_content_data_n['materials']['video_effects'].append(
                    {
                        "effect_id": effect_id,
                        "id": effect_material_id,
                        "name": effect_name,
                        "render_index": 11000,
                        "resource_id": effect_resource_id,
                        "time_range": {
                            "duration": t_ + 800000,
                            "start": duration - t_ - 800000
                        },
                        "track_render_index": 0,
                        "type": "video_effect",
                        "value": 1.0,
                        "version": ""
                    }
                )

            config.draft_content_data_n['materials']['transitions'].append(
                {
                    "effect_id": transitions_id,
                    "id": transitions_material_id,
                    "name": transitions_name,
                    "is_overlap": True,
                    "resource_id": transitions_resource_id,
                    'duration': t_ * 2,
                    "type": "transition",
                }
            )
    with open(f'{draft_path}/无原音_{config.original_video_name}/draft_meta_info.json', 'w', encoding='utf-8') as f:
        json.dump(config.draft_meta_data_n, f, ensure_ascii=False)
    with open(f'{draft_path}/无原音_{config.original_video_name}/draft_content.json', 'w', encoding='utf-8') as f:
        json.dump(config.draft_content_data_n, f, ensure_ascii=False)

    logger.info(f'已写入剪映草稿，打开或重启剪映可以开始编辑！！')


def cut_mute_t(in_path, out_path):
    audio_clip = AudioFileClip(filename=in_path)
    if audio_clip.duration > 1.5:
        f = audio_clip.to_soundarray()
        f_l = len(f)
        end = 0
        for i in range(f_l - 1, 0, -10):
            a, b = f[i]
            if a > 0.00015:
                end = round(i / audio_clip.fps, 2)
                break
        audio_clip_2 = audio_clip.subclip(0, end)
        audio_clip_2.write_audiofile(filename=out_path, logger=None)
    else:
        audio_clip.write_audiofile(filename=out_path, logger=None)


def change_srt_audio(mask, subtitle, audio, item, item2):
    if mask == '说话人 1: -说话人 1: ' or mask == '说话人 1: -说话人 2: ':
        content = item.text.replace('说话人 1: ', '')
        if config.change_type == 1:
            content_c = trs_to_e(content)
        elif config.change_type == 2:
            content_c = change_text_with_ali(content)
        elif config.change_type == 3:
            content_c = trs_to_c(trs_to_e(content))
        else:
            content_c = change_text_with_ali(content)

        if config.voice_type == 2:
            get_voice_local(text=content_c, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
                            speed=config.role_dict['speed'])
        if config.voice_type == 1:
            asyncio.run(
                get_voice_edge(text=content_c, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
                               speed=config.role_dict['speed']))
        cut_mute_t(f'{config.base_path}改写/wav_t/{item.index}_t.wav',
                   f'{config.base_path}改写/wav/{item.index}.wav')
        audio_clip = AudioFileClip(filename=f'{config.base_path}改写/wav/{item.index}.wav')

        _x = (item2.start.ordinal - item.end.ordinal) / 1000  # 两段srt的差值
        _y = audio_clip.duration - item.duration.ordinal / 1000  # 音频-视频的差值

        if _y < 0:
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item.end.ordinal,  # 结束时间（毫秒）
                text=f'说话人 1: {content_c}'
            ))
            return
        if _y < _x:

            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=int(item.end.ordinal + _y * 1000),  # 结束时间（毫秒）
                text=f'说话人 1: {content_c}'
            ))

            if _x - _y > 1:
                subtitle.append(pysrt.SubRipItem(
                    index=f'{item.index}_a',
                    start=int(item.end.ordinal + _y * 1000),  # 开始时间（毫秒）
                    end=item2.start.ordinal,  # 结束时间（毫秒）
                    text=f'说话人 2: System fills in automatically'
                ))
                audio.subclip((item.end.ordinal + _y * 1000) / 1000, item2.start.ordinal / 1000).write_audiofile(
                    filename=f'{config.base_path}改写/wav/{item.index}_a.wav', logger=None)
        else:
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item2.start.ordinal,  # 结束时间（毫秒）
                text=f'说话人 1: {content_c}'
            ))
    if mask == '说话人 2: -说话人 2: ' or mask == '说话人 2: -说话人 1: ':
        subtitle.append(pysrt.SubRipItem(
            index=item.index,
            start=item.start.ordinal,  # 开始时间（毫秒）
            end=item2.start.ordinal,  # 结束时间（毫秒）
            text=f'说话人 2: System fills in automatically'
        ))
        audio.subclip(item.start.ordinal / 1000, item2.start.ordinal / 1000).write_audiofile(
            filename=f'{config.base_path}改写/wav/{item.index}.wav', logger=None)
    if mask == '说话人 1: -0':
        content = item.text.replace('说话人 1: ', '')
        if config.change_type == 1:
            content_c = trs_to_e(content)
        elif config.change_type == 2:
            content_c = change_text_with_ali(content)
        elif config.change_type == 3:
            content_c = trs_to_c(trs_to_e(content))
        else:
            content_c = change_text_with_ali(content)

        if config.voice_type == 2:
            get_voice_local(text=content_c, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
                            speed=config.role_dict['speed'])
        if config.voice_type == 1:
            asyncio.run(
                get_voice_edge(text=content_c, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
                               speed=config.role_dict['speed']))
        cut_mute_t(f'{config.base_path}改写/wav_t/{item.index}_t.wav',
                   f'{config.base_path}改写/wav/{item.index}.wav')
        subtitle.append(pysrt.SubRipItem(
            index=item.index,
            start=item.start.ordinal,  # 开始时间（毫秒）
            end=item.end.ordinal,  # 结束时间（毫秒）
            text=f'说话人 1: {content_c}'
        ))
    if mask == '说话人 2: -0':
        subtitle.append(pysrt.SubRipItem(
            index=item.index,
            start=item.start.ordinal,  # 开始时间（毫秒）
            end=item.start.ordinal,  # 结束时间（毫秒）
            text=f'说话人 2: System fills in automatically'
        ))
        audio.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000).write_audiofile(
            filename=f'{config.base_path}改写/wav/{item.index}.wav', logger=None)
