import os
import random
import time

from loguru import logger
from moviepy.editor import *
import pysrt
import check
import config
from tqdm import tqdm
import cv2
import tools
import asyncio
import uuid
import json


def choose_index(index):
    if index == 6:
        get_mp3_from_video()
        return
    check.check_original_srt()
    # if index == 7:
    #     tools.mask_subtitles()
    #     return
    # if index < 4:
    #     tools.choose_role_list()
    # if index == 8:
    #     test()
    #
    # change_role(index)
    # change_srt(index)
    # generate_audio(index)
    # cute_mute(index)
    # change_srt_and_audio(index)
    # video_cut(index)

    if index == 7:
        tools.mask_subtitles()
        return
    if index == 8:
        test()

    change_role(index)
    change_srt_and_audio(index)
    video_cut(index)


def get_mp3_from_video():
    # logger.info('检查中...请稍后~')
    # audio_y = VideoFileClip(filename=config.original_video_file)
    # audio_y.audio.write_audiofile(filename=f'{config.base_path}原始/原视频/{config.original_video_name}.mp3')
    cmd = f'ffmpeg -i "{config.original_video_file}" -vn -codec:a libmp3lame -q:a 0 -y "{config.base_path}原始/原视频/{config.original_video_name}.mp3"'
    tools.run_cmd(cmd, show_banner=False)
    logger.info(f'分离音频成功！ {config.base_path}原始/原视频/{config.original_video_name}.mp3')


def change_role(index):
    if index > 1:
        return
    logger.info('第1步！ -----------开始处理srt角色----------')

    is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))

    srt_data = pysrt.open(config.original_srt_file)
    subtitle = pysrt.SubRipFile()
    if is_s == 1:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal

            end = item.end.ordinal
            if content.startswith('说话人 2: '):
                content = content.replace('说话人 2: ', '')
                subtitle.append(pysrt.SubRipItem(
                    index=index_role,
                    start=start,  # 开始时间（毫秒）
                    end=end,  # 结束时间（毫秒）
                    text=f'说话人 1: {content}'
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))
        subtitle.save(f'{config.base_path}改写/srt_c/{config.original_srt_name}_c.srt')
    else:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal
            end = item.end.ordinal
            if content.startswith('说话人 1: '):
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=start,  # 开始时间（毫秒）
                    end=end,  # 结束时间（毫秒）
                    text=item.text
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=start,  # 开始时间（毫秒）
                    end=end,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))

        subtitle.save(f'{config.base_path}改写/srt_c/{config.original_srt_name}_c.srt')

    logger.warning('第1步！ -----------开始处理srt角色   完成！')


# def change_srt(index):
#     if index > 2:
#         return
#     logger.info('第2步！  -----------开始改写文案----------')
#     srt_data = pysrt.open(f'{config.base_path}改写/srt_c/{config.original_srt_name}_c.srt')
#     subtitle = pysrt.SubRipFile()
#     for item in tqdm(srt_data.data):
#         content = item.text
#         if content.startswith('说话人 1: '):
#             content = content.replace('说话人 1: ', '')
#             if config.change_type == 1:
#                 content_gpt = tools.trs_to_e(content)
#             elif config.change_type == 2:
#                 content_gpt = tools.change_text_with_ali(content)
#             elif config.change_type == 3:
#                 content_gpt = tools.trs_to_c(tools.trs_to_e(content))
#             else:
#                 content_gpt = tools.change_text_with_ali(content)
#
#             subtitle.append(pysrt.SubRipItem(
#                 index=item.index,
#                 start=item.start.ordinal,  # 开始时间（毫秒）
#                 end=item.end.ordinal,  # 结束时间（毫秒）
#                 text=f'说话人 1: {content_gpt}'
#             ))
#         else:
#             subtitle.append(pysrt.SubRipItem(
#                 index=item.index,
#                 start=item.start.ordinal,  # 开始时间（毫秒）
#                 end=item.end.ordinal,  # 结束时间（毫秒）
#                 text=content
#             ))
#     subtitle.save(f'{config.base_path}改写/srt_gpt/{config.original_srt_name}_gpt.srt')
#     logger.warning('第2步！  -----------开始改写文案  完成！')
#
#
# def generate_audio(index):
#     if index > 3:
#         return
#     tools.delete_files_in_folder(f'{config.base_path}改写/wav')
#     tools.delete_files_in_folder(f'{config.base_path}改写/wav_t')
#     logger.info('第3步！ -----------开始生成新的音频片段----------')
#     srt_data = tools.get_srt_content()
#     for item in tqdm(srt_data):
#         content = item.text
#         if content.startswith('说话人 1: '):
#             content = content.replace('说话人 1: ', '')
#             if config.voice_type == 2:
#                 tools.get_voice_local(text=content, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
#                                       speed=config.role_dict['speed'])
#             if config.voice_type == 1:
#                 asyncio.run(
#                     tools.get_voice_edge(text=content, file_name=f'{item.index}_t.wav', role=config.role_dict['name'],
#                                          speed=config.role_dict['speed']))
#
#     logger.warning('第3步！ -----------开始生成新的音频片段  完成！')

def change_srt_and_audio(index):
    if index > 2:
        return
    tools.choose_role_list()
    logger.info('第2步！ -----------开始处理srt 并生成语音----------')
    get_mp3_from_video()
    srt_data = pysrt.open(f'{config.base_path}改写/srt_c/{config.original_srt_name}_c.srt')
    subtitle = pysrt.SubRipFile()
    audio = AudioFileClip(filename=f'{config.base_path}原始/原视频/{config.original_video_name}.mp3')
    tools.delete_files_in_folder(f'{config.base_path}改写/wav')
    tools.delete_files_in_folder(f'{config.base_path}改写/wav_t')
    for index, item in enumerate(tqdm(srt_data)):
        if index + 1 == len(srt_data):
            s = item.text[:7]
            e = '0'
            tools.change_srt_audio(mask=f'{s}-{e}', audio=audio, subtitle=subtitle, item=item, item2=None)
            break
        s = item.text[:7]
        item_ = srt_data[index + 1]
        e = item_.text[:7]
        tools.change_srt_audio(mask=f'{s}-{e}', audio=audio, subtitle=subtitle, item=item, item2=item_)
    subtitle.save(f'{config.base_path}改写/srt_gpt/{config.original_srt_name}_gpt.srt')


# def cute_mute(index):
#     if index > 4:
#         return
#     get_mp3_from_video()
#     logger.info('第4步！  -----------开始检查音频----------')
#     srt_data = tools.get_srt_content()
#     audio = AudioFileClip(filename=f'{config.base_path}原始/原视频/{config.original_video_name}.mp3')
#     for item in tqdm(srt_data):
#         content = item.text
#         if content.startswith('说话人 1: '):
#             if os.path.exists(f'{config.base_path}改写/wav_t/{item.index}_t.wav'):
#                 audio_clip = AudioFileClip(filename=f'{config.base_path}改写/wav_t/{item.index}_t.wav')
#                 if audio_clip.duration > 1.5:
#                     f = audio_clip.to_soundarray()
#                     f_l = len(f)
#                     end = 0
#                     for i in range(f_l - 1, 0, -10):
#                         a, b = f[i]
#                         if a > 0.00015:
#                             end = round(i / audio_clip.fps, 2)
#                             break
#                     audio_clip_2 = audio_clip.subclip(0, end)
#                     audio_clip_2.write_audiofile(filename=f'{config.base_path}改写/wav/{item.index}.wav', logger=None)
#                 else:
#                     audio_clip.write_audiofile(filename=f'{config.base_path}改写/wav/{item.index}.wav', logger=None)
#         else:
#             audio.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000).write_audiofile(
#                 filename=f'{config.base_path}改写/wav/{item.index}.wav', logger=None)
#
#     logger.warning('第4步！  -----------检查音频  完成！')


def video_cut_old(index):
    if index > 5:
        return
    logger.info('第5步！ -----------开始合成新的视频----------')
    tools.delete_files_in_folder(f'{config.base_path}改写/temp')
    clip = VideoFileClip(config.original_video_file)
    sp2 = tools.get_srt_content()
    clips = []
    for item in tqdm(sp2):
        index_item = item.index
        content = item.text
        subclip_t = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000)
        if content.startswith('说话人 1: '):

            audio_clip = AudioFileClip(filename=f'{config.base_path}/改写/wav/{index_item}.wav')
            if audio_clip.duration > item.duration.ordinal / 1000:
                t_ = round(audio_clip.duration - item.duration.ordinal / 1000, 2)
                # subclip = tools.add_zoom_effect(subclip_t, t_, index_item)
                if t_ > 3:
                    # logger.info(f"Zoom-in  :{item.index}")
                    subclip = tools.add_zoom_effect(subclip_t, t_, index_item)
                if 1.3 <= t_ <= 3:
                    # logger.info(f"fadeout  :{item.index}")
                    new_clip = ImageClip(subclip_t.get_frame(t=subclip_t.duration), duration=t_).fadeout(t_)
                    subclip = concatenate_videoclips([subclip_t, new_clip])
                if t_ < 1.3:
                    # logger.info(f"speedx  :{item.index}")
                    subclip = subclip_t.fx(vfx.speedx, round((item.duration.ordinal / 1000) / audio_clip.duration, 2))
            else:
                subclip = subclip_t
            f_clip = subclip.set_audio(audio_clip)
        else:
            f_clip = subclip_t
        clips.append(f_clip)
    clips_t = concatenate_videoclips(clips)
    clips_t.write_videofile(f'{config.base_path}改写/mp4/有原音_{config.original_video_name}.mp4', threads=4, fps=24)
    # clips_t.close()
    logger.info(f' ------有原音_{config.original_video_name}.mp4 完成')
    indices_to_delete = []
    for item in sp2:
        if item.text.startswith('说话人 2: '):
            indices_to_delete.append(item.index - 1)
    indices_to_delete.sort(reverse=True)

    for item in indices_to_delete:
        del clips[item]
    clips_y = concatenate_videoclips(clips)
    clips_y.write_videofile(f'{config.base_path}改写/mp4/无原音_{config.original_video_name}.mp4', threads=4, fps=24)
    logger.info(f' ------无原音_{config.original_video_name}.mp4 完成')
    tools.delete_files_in_folder(f'{config.base_path}改写/temp')
    logger.warning('第5步！ -----------开始合成新的视频 完成')


def video_cut(index):
    if index > 3:
        return
    logger.info('第3步！ -----------开始合成新的视频----------')

    tools.delete_files_in_folder(f'{config.base_path}改写/temp')
    cap = cv2.VideoCapture(f'{config.original_video_file}')
    sp2 = tools.get_srt_content()
    for item in tqdm(sp2):
        index_item = item.index
        #     # logger.info(f'{index_item}    {item.start} - {item.end}  ')
        #     # tools.cut_video_and_effect(cap=cap,
        #     #                            start_time=item.start.ordinal / 1000,
        #     #                            end_time=item.end.ordinal / 1000,
        #     #                            index=index_item,
        #     #                            duration=item.duration.ordinal / 1000,
        #     #                            content=item.text
        #     #                            )
        tools.cut_video_and_freeze(cap=cap,
                                   start_time=item.start.ordinal / 1000,
                                   end_time=item.end.ordinal / 1000,
                                   index=index_item,
                                   duration=item.duration.ordinal / 1000,
                                   content=item.text
                                   )
    cap.release()

    tools.add_to_jy(sp2)
    # list_file_y = 'concat_videos_y.txt'
    # with open(list_file_y, 'w', encoding='utf-8') as f:
    #     for item in tqdm(sp2):
    #         index_item = item.index
    #         f.write(f"file '{config.base_path}改写/temp/{index_item}.mp4'\n")
    # cmd_y = f'ffmpeg -f concat -safe 0 -y -i {list_file_y} -c copy "{config.base_path}改写/mp4/有原音_{config.original_video_name}.mp4"'
    # tools.run_cmd(cmd_y, show_banner=False)
    # logger.info(f' ------有原音_{config.original_video_name}.mp4 完成')
    #
    # list_file_n = 'concat_videos_n.txt'
    # with open(list_file_n, 'w', encoding='utf-8') as f:
    #     for item in tqdm(sp2):
    #         index_item = item.index
    #         content = item.text
    #         if content.startswith('说话人 1: '):
    #             f.write(f"file '{config.base_path}改写/temp/{index_item}.mp4'\n")
    # cmd_n = f'ffmpeg -f concat -safe 0 -y -i {list_file_n} -c copy "{config.base_path}改写/mp4/无原音_{config.original_video_name}.mp4"'
    # tools.run_cmd(cmd_n, show_banner=False)
    # logger.info(f' ------无原音_{config.original_video_name}.mp4 完成')
    # logger.warning('第5步！ -----------合成新的视频 完成')
    # os.remove(list_file_y)
    # os.remove(list_file_n)


def test():
    tools.delete_files_in_folder(f'{config.base_path}改写/wav_t/')
    tools.delete_files_in_folder(f'{config.base_path}改写/wav/')
    srt_data = pysrt.open(f'{config.original_srt_file}')
    for item in tqdm(srt_data):
        content = item.text.split(':')
        if content[0] == '梁朝伟':
            # tools.get_voice_local(text=content[1], file_name=f'{item.index}_t.wav', role='小团团',
            #                       speed='1.1')
            asyncio.run(
                tools.get_voice_edge(text=content[1], file_name=f'{item.index}_t.wav', role='zh-CN-XiaoyiNeural',
                                     speed='+10%'))

        else:
            # tools.get_voice_local(text=content[1], file_name=f'{item.index}_t.wav', role='周杰伦',
            #                       speed='1.0')
            asyncio.run(
                tools.get_voice_edge(text=content[1], file_name=f'{item.index}_t.wav', role='zh-CN-YunxiNeural',
                                     speed="+10%"))
        tools.cut_mute_t(f'{config.base_path}改写/wav_t/{item.index}_t.wav',
                         f'{config.base_path}改写/wav/{item.index}.wav')
