# This is a sample Python script.
import os

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.


from loguru import logger
import pysrt
import azure.cognitiveservices.speech as speechsdk
import subprocess as sp
from moviepy.editor import *
from openai import OpenAI
from tqdm import tqdm
import requests
import random
import dashscope
from hashlib import md5
import edge_tts
import asyncio
from http import HTTPStatus

base_path = f'{os.getcwd()}/'
v = '3.0'
voices_role_list = ({'Name': 'zh-CN-XiaoxiaoNeural', 'dec': '晓晓-女'},
                    {'Name': 'zh-CN-XiaoyiNeural', 'dec': '晓怡-女'},
                    {'Name': 'zh-CN-YunjianNeural', 'dec': '云健-男'},
                    {'Name': 'zh-CN-YunxiNeural', 'dec': '云希-男'},
                    {'Name': 'zh-CN-YunxiaNeural', 'dec': '云夏-男'},
                    {'Name': 'zh-CN-YunyangNeural', 'dec': '云阳-男'},
                    {'Name': 'es-US-AlonsoNeural', 'dec': 'AlonsoNeural-Male（勿选）'},
                    )


def text_op(op=1, ssml=None):
    file_name = get_file_name(f'{base_path}改写', 'srt')
    srt_data = pysrt.open(f'{base_path}改写/{file_name}.srt')
    if op == 1:
        if not os.path.exists(f'{base_path}改写/wav'):
            os.mkdir(f'{base_path}改写/wav')
        for item in tqdm(srt_data.data):
            content = item.text
            if content.startswith('说话人 1: '):
                content = content.replace('说话人 1: ', '')
                # logger.info(f'No.{item.index} : {content}')
                asyncio.run(get_voice_edge(text=content, file_name=f'{item.index}-t.wav', role=ssml))
                # get_voice(text=content, file_name=f'{item.index}.wav', ssml=ssml)

        logger.info('--------生成语音 ok！---------')

    if op == 2:
        return srt_data.data


def get_voice(text, file_name, ssml):
    # Creates an instance of a speech config with specified subscription key and service region.
    speech_key = "5dcf3bcf6329444291ede5a68cec1791"
    service_region = "eastus"

    speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)

    # Note: the voice setting will not overwrite the voice element in input SSML.
    speech_config.speech_synthesis_voice_name = "zh-CN-YunhaoNeural"
    speech_config.set_speech_synthesis_output_format(speechsdk.SpeechSynthesisOutputFormat.Riff24Khz16BitMonoPcm)

    # use the default speaker as audio output.
    speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
    # result = speech_synthesizer.speak_text_async(text).get()
    ssml_s = ssml.replace('xxxxx', text)
    result = speech_synthesizer.speak_ssml_async(ssml_s).get()

    # Check result
    if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
        # logger.warning(f"ok! {file_name}")
        stream = speechsdk.AudioDataStream(result)
        stream.save_to_wav_file(f'{base_path}改写/wav/{file_name}')

    elif result.reason == speechsdk.ResultReason.Canceled:
        cancellation_details = result.cancellation_details
        logger.warning("Speech synthesis canceled: {}".format(cancellation_details.reason))
        if cancellation_details.reason == speechsdk.CancellationReason.Error:
            logger.warning("Speech synthesis canceled: {}".format(cancellation_details.error_details))
            raise Exception(cancellation_details.error_details)

        raise Exception(f'{cancellation_details.reason} - {cancellation_details.error_details}')


async def get_voice_edge(text, file_name, role):
    # logger.info(text)
    # text1 = text.replace('，', '\n').replace('。', '')
    # logger.warning(f'----:{text1}')
    communicate = edge_tts.Communicate(text, role, rate='+20%')
    await communicate.save(f'{base_path}改写/wav/{file_name}')


def t2s(t):
    s_t, e_t = t.strip().split('->')
    s_h, s_m, s_s = s_t.strip().split(':')
    e_h, e_m, e_s = e_t.strip().split(':')
    s_ss = int(s_h) * 3600 + int(s_m) * 60 + int(s_s.strip().split(",")[0])
    e_ss = int(e_h) * 3600 + int(e_m) * 60 + int(e_s.strip().split(",")[0])
    return s_ss, e_ss


def volume0():
    get_mp3_from_video()
    logger.warning('获取原音 ok!')
    sp2 = text_op(2, None)
    between = ''
    start_s = None
    end_s = None
    state = '说话人 2: '
    ss = []
    for item in tqdm(sp2):
        index_item = item.index
        content = item.text
        content1 = content[:7]

        if not content1 == state:
            state = content1
            ss.append(f'{item.start.ordinal / 1000}')
        if len(ss) == 2:
            str1 = f'between(t,{ss[0]},{ss[1]})'
            between = between + '+' + str1
            ss.clear()
            # logger.debug(f'{index_item}---{str1}')
        if index_item == len(sp2) and state == '说话人 1: ':
            video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
            str1 = f'between(t,{ss[0]},{VideoFileClip(f"{base_path}原始/原视频/{video_name}.mp4").duration})'
            # logger.debug(f'{index_item}+++{str1}')
            between = between + '+' + str1
        # if item.text.startswith('说话人 1: '):
        #     if not start_s:
        #         start_s = f'{item.start.ordinal / 1000}'
        #         logger.info(index_item)
        #
        # if item.text.startswith('说话人 2: '):
        #     if index_item > 1:
        #         item_pr = sp2[index_item - 2]
        #         if item_pr.text.startswith('说话人 2: '):
        #             pass
        #         else:
        #             if not end_s:
        #                 end_s = f'{item.start.ordinal / 1000}'
        #                 logger.debug(index_item)
        #
        # if start_s and end_s:
        #     str1 = f'between(t,{start_s},{end_s})'
        #     start_s = None
        #     end_s = None
        #     logger.warning(str1)
        #     between = between + '+' + str1
        # if index_item == len(sp2):
        #     video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
        #     str1 = f'between(t,{start_s},{VideoFileClip(f"{base_path}原始/原视频/{video_name}.mp4").duration})'
        #     logger.debug(str1)
        #     between = between + '+' + str1
    between = between[1:]
    audio_name = get_file_name(f'{base_path}原始/原视频', 'mp3')
    current_path = os.getcwd()
    cmd = f'''ffmpeg -i "{current_path}\\原始\\原视频\\{audio_name}.mp3" -af "volume=enable='{between}':volume=0" -y "{current_path}\\改写\\消音_{audio_name}.mp3" '''
    # logger.info(cmd)
    sp.call(cmd, shell=True)
    logger.warning('------消音完成-------')


def volume1():
    change_speed()
    volume0_audio_name = get_file_name(f'{base_path}改写', 'mp3')

    sp2 = text_op(2, None)
    clips = []
    audio_y = AudioFileClip(filename=f'{base_path}改写/{volume0_audio_name}.mp3')
    clips.append(audio_y)
    for item in tqdm(sp2):
        if item.text.startswith('说话人 1: '):
            # logger.info(f'{item.start.ordinal / 1000}')
            audio_g = AudioFileClip(filename=f'{base_path}改写/wav/{item.index}-a.wav').set_start(
                item.start.ordinal / 1000)
            clips.append(audio_g)
    clips_t = CompositeAudioClip(clips=clips)
    clips_t.write_audiofile(f'{base_path}改写/改写_{volume0_audio_name}.wav')
    logger.warning('-------修改音频完成 -------')


def add_audio():
    video_name = get_file_name(f'{base_path}原始/原视频/', 'mp4')
    video_clip = VideoFileClip(f'{base_path}原始/原视频/{video_name}.mp4')

    audio_name = get_file_name(f'{base_path}改写', 'wav')
    audio_clip = AudioFileClip(f'{base_path}改写/{audio_name}.wav')
    final_clip = video_clip.set_audio(audio_clip)
    final_clip.write_videofile(filename=f'{base_path}改写/改写_{video_name}.mp4')
    logger.warning('-------恭喜 完成！！！！ -------')


def change_speed():
    sp2 = text_op(2, None)
    for item in sp2:
        if item.text.startswith('说话人 1: '):
            if item.duration.ordinal == 0:
                os.rename(f'{base_path}改写/wav/{item.index}.wav', f'{base_path}改写/wav/{item.index}-a.wav')
            else:
                audio_clip = AudioFileClip(filename=f'{base_path}改写/wav/{item.index}.wav')
                speed = round(audio_clip.duration / (item.duration.ordinal / 1000), 2)
                if speed > 1.9:
                    speed = 1.5
                if speed < 0.6:
                    speed = 0.8
                new_au = audio_clip.fl_time(lambda t: speed * t, apply_to=['mask', 'audio'])
                new_au = new_au.set_duration(audio_clip.duration / speed)  # 1.1表示调整速度
                new_au.write_audiofile(filename=f'{base_path}改写/wav/{item.index}-a.wav')
                # os.remove(f'{base_path}改写/wav/{item.index}.wav')
    # in_path = f'{base_path}改写/wav/{audio_index}.wav'
    # audio_clip = AudioFileClip(f'{in_path}')
    # new_au = audio_clip.fl_time(lambda t: 2 * t, apply_to=['mask', 'audio'])
    # new_au = new_au.set_duration(audio_clip.duration / 2)  # 1.1表示调整速度
    # new_au.write_audiofile(filename=f'{out_path}')


def get_role_list():
    for index_role, item in enumerate(voices_role_list):
        logger.warning(f'{index_role + 1}: {item["dec"]}')
    # role_path = f'{base_path}配音角色'
    # flies = os.listdir(role_path)
    # for index, item in enumerate(flies):
    #     logger.warning(f'{index + 1}:{item}')


def choose_role(index_role):
    return voices_role_list[index_role - 1]['Name']
    # role_path = f'{base_path}配音角色'
    # flies = os.listdir(role_path)
    # role = flies[index - 1]
    # with open(f'{role_path}/{role}', "r") as file:
    #     content = file.read()
    # return content


def change_text_with_gpt(text):
    try:
        key = 'sk-IpPCbO4zyxEkVL88E8Bc577f68F5421b939e677081C53208'
        OpenAI.api_timeout = 60
        client = OpenAI(base_url='https://api.chatgptid.net/v1',
                        api_key=key)
        message = f'用python写一段代码，要求：使用Moviepy实现淡入淡出效果。'
        completion = client.chat.completions.create(model='gpt-3.5-turbo',
                                                    messages=[{"role": "user", "content": message}])
        choice_list = completion.choices
        all_content = []
        for choice in choice_list:
            all_content.append(choice.message.content)
        logger.info(all_content[0])
        return all_content[0]
    except Exception as e:
        logger.warning(e)
        change_text_with_gpt(text)


def change_text_with_ali(text):
    resp = dashscope.Generation.call(model='qwen-turbo',
                                     prompt=f'你好，改写：“{text}” 这句话。要求改写后必须字数一样。',
                                     api_key='sk-efd474b127814c80989073bc1e2adf2b')
    if resp.status_code == HTTPStatus.OK:
        # logger.info(f"\n---{text}\n+++{resp.output['text']}\n----------")
        return resp.output['text']
    else:
        return ''


def trs_to_e(text):
    appid = '20240403002013842'
    appkey = 'uuFg51ktnuJWzGIQ01Q5'
    salt = random.randint(32768, 65536)
    sign = md5(f'{appid}{text}{salt}{appkey}'.encode('utf-8')).hexdigest()
    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
    payload = {'appid': appid, 'q': text, 'from': 'zh', 'to': 'en', 'salt': salt, 'sign': sign}
    r = requests.post('http://api.fanyi.baidu.com/api/trans/vip/translate', params=payload, headers=headers)
    result = r.json()
    # logger.info(result['trans_result'][0]['dst'])
    return result['trans_result'][0]['dst']


def change_srt():
    file_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    out_path = f'{base_path}改写/{file_name}_gpt.srt'
    srt_data = pysrt.open(f'{base_path}原始/原srt/{file_name}.srt')
    subtitle = pysrt.SubRipFile()
    for item in tqdm(srt_data.data):
        content = item.text
        if content.startswith('说话人 1: '):
            content = content.replace('说话人 1: ', '')
            # logger.info(f'原-> {item.index}:  {content}')
            content_gpt = change_text_with_ali(content)
            # content_gpt = trs_to_e(content)
            # logger.warning(f'GPT-> {item.index}:  {content_gpt}')
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item.end.ordinal,  # 结束时间（毫秒）
                text=f'说话人 1: {content_gpt}'
            ))
        else:
            subtitle.append(pysrt.SubRipItem(
                index=item.index,
                start=item.start.ordinal,  # 开始时间（毫秒）
                end=item.end.ordinal,  # 结束时间（毫秒）
                text=content
            ))
    subtitle.save(out_path)
    logger.info('-----------改写 完成!----------')


def change_role(is_s):
    file_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    if file_name.endswith('_c'):
        logger.info('------- 完成角色调整la ----------')
        return
    srt_data = pysrt.open(f'{base_path}原始/原srt/{file_name}.srt')
    subtitle = pysrt.SubRipFile()
    if is_s == 1:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal

            end = item.end.ordinal
            if content.startswith('说话人 2: '):
                content = content.replace('说话人 2: ', '')
                subtitle.append(pysrt.SubRipItem(
                    index=index_role,
                    start=start,  # 开始时间（毫秒）
                    end=end,  # 结束时间（毫秒）
                    text=f'说话人 1: {content}'
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))
        subtitle.save(f'{base_path}原始/原srt/{file_name}_c.srt')
        os.remove(f'{base_path}原始/原srt/{file_name}.srt')
    else:
        for item in tqdm(srt_data.data):
            content = item.text
            index_role = item.index
            if index_role == 1:
                start = 0
            else:
                start = item.start.ordinal
            end = item.end.ordinal
            if content.startswith('说话人 1: '):
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text=item.text
                ))
            else:
                subtitle.append(pysrt.SubRipItem(
                    index=item.index,
                    start=item.start.ordinal,  # 开始时间（毫秒）
                    end=item.end.ordinal,  # 结束时间（毫秒）
                    text='说话人 2: ' + content[7:]
                ))

        subtitle.save(f'{base_path}原始/原srt/{file_name}_c.srt')
        os.remove(f'{base_path}原始/原srt/{file_name}.srt')
    # srt_file_name = get_file_name(f'{base_path}原始/原视频','srt')
    # if srt_file_name:
    #     os.remove(f'{base_path}原始/原视频/{srt_file_name}.srt')
    # os.rename(f'{base_path}原始/原srt/{file_name}.srt', f'{base_path}原始/原视频/{file_name}.srt')
    logger.info('------- 完成角色调整 ---------- ')


def get_mp3_from_video():
    in_path = f'{base_path}原始/原视频/'
    file_name = get_file_name(in_path, 'mp4')
    audio_y = VideoFileClip(filename=f'{in_path}{file_name}.mp4')
    audio_y.audio.write_audiofile(filename=f'{in_path}{file_name}.mp3')


def get_file_name(path, file_type):
    files = os.listdir(f'{path}')
    file_name = None
    for item in files:
        if os.path.splitext(item)[1][1:] == file_type:
            file_name = os.path.splitext(item)[0]
    return file_name


def init_dir():
    if not os.path.exists(f'{base_path}改写'):
        os.mkdir(f'{base_path}改写')
        os.mkdir(f'{base_path}改写/wav')
    else:
        files = os.listdir(f'{base_path}改写')
        if 'wav' not in files:
            os.mkdir(f'{base_path}改写/wav')

    if not os.path.exists(f'{base_path}原始'):
        os.mkdir(f'{base_path}原始')
        os.mkdir(f'{base_path}原始/原视频')
        os.mkdir(f'{base_path}原始/原srt')
    else:
        files = os.listdir(f'{base_path}原始')
        if '原srt' not in files:
            os.mkdir(f'{base_path}原始/原srt')
        if '原视频' not in files:
            os.mkdir(f'{base_path}原始/原视频')

    video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    srt_name = get_file_name(f'{base_path}原始/原srt', 'srt')
    config = get_file_name(base_path, 'text')

    # if not os.path.exists(f'{base_path}/配音角色'):
    #     raise Exception('文件夹下面没有‘配音角色’文件夹！')
    if not video_name:
        raise Exception('文件夹下面没有MP4格式的视频！')
    # if not srt_name:
    #     raise Exception('文件夹下面没有srt文件！')
    # if not config:
    #     raise Exception('文件夹下面没有config配置文件！')
    # os.rename(f'{base_path}{video_name}.mp4',f'{base_path}原始/原视频/{video_name}.mp4')
    # os.rename(f'{base_path}{srt_name}.srt', f'{base_path}原始/原srt/{srt_name}.srt')


def cute_mute():
    file_name = get_file_name(f'{base_path}改写', 'srt')
    srt_data = pysrt.open(f'{base_path}改写/{file_name}.srt')
    for item in tqdm(srt_data.data):
        content = item.text
        if content.startswith('说话人 1: '):
            if os.path.exists(f'{base_path}改写/wav/{item.index}-t.wav'):
                audio_clip = AudioFileClip(filename=f'{base_path}改写/wav/{item.index}-t.wav')
                f = audio_clip.to_soundarray()
                f_l = len(f)
                end = 0
                for i in range(f_l - 1, 0, -10):
                    a, b = f[i]
                    if a > 0.00015:
                        end = round(i / audio_clip.fps, 2)
                        # logger.debug(f'{a} - {b}')
                        # logger.info(
                        #     f'{i}/{f_l}- 音量：{audio_clip.max_volume()} - 时长：{audio_clip.duration} - {audio_clip.fps} - {end}')

                        break
                audio_clip_2 = audio_clip.subclip(0, end)
                audio_clip_2.write_audiofile(filename=f'{base_path}改写/wav/{item.index}.wav')
                audio_clip.close()
                audio_clip_2.close()
                os.remove(f'{base_path}改写/wav/{item.index}-t.wav')


def video_cut():
    video_name = get_file_name(f'{base_path}原始/原视频', 'mp4')
    clip = VideoFileClip(f'{base_path}原始/原视频/{video_name}.mp4')
    sp2 = text_op(2, None)
    clips = []
    for item in tqdm(sp2):
        index_item = item.index
        content = item.text
        if content.startswith('说话人 1: '):
            audio_clip = AudioFileClip(filename=f'{base_path}/改写/wav/{index_item}.wav')
            if audio_clip.duration > item.duration.ordinal / 1000:
                subclip = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000).fx(vfx.speedx, round(
                    (item.duration.ordinal / 1000) / audio_clip.duration, 2))
            else:
                subclip = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000)
            f_clip = subclip.set_audio(audio_clip)
        else:
            f_clip = clip.subclip(item.start.ordinal / 1000, item.end.ordinal / 1000)

        clips.append(f_clip)
    clips_t = concatenate_videoclips(clips)
    clips_t.write_videofile(f'{base_path}改写/改写_111.mp4')


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    try:
        init_dir()
        index = int(
            input(
                f" ---- v:{v} ----\n 0 全自动\n 1 改写文案\n 2 生成新的语言片段\n 3 原视频消音\n 4 合成新的配音\n 5 合成新的视频\n 6 提取原片音频\n"))

        if index == 1:
            logger.info('-----------开始处理角色----------')
            is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))
            change_role(is_s)
            logger.info('-----------开始改写文案----------')
            change_srt()
        if index == 2:
            get_role_list()
            index_2 = int(input('请输入配音角色序号:'))
            ssml = choose_role(index_2)
            logger.info('-----------开始生成语音----------')
            text_op(1, ssml)
        if index == 3:
            logger.info('-----------开始原视频消音----------')
            volume0()
        if index == 4:
            logger.info('-----------开始合成新配音----------')
            volume1()
        if index == 5:
            logger.info('-----------开始合成新的视频----------')
            add_audio()
        if index == 6:
            logger.info('-----------开始提取音频----------')
            get_mp3_from_video()
        if index == 7:
            logger.info('-----------开始处理角色----------')
            is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))
            change_role(is_s)
        if index == 8:
            change_text_with_gpt(1)

        if index == 0:
            get_role_list()
            index_2 = int(input('请输入配音角色序号:\n'))
            role_index = choose_role(index_2)
            logger.info('-----------开始处理角色----------')
            is_s = int(input('请输入序号：（视频开头是原音还是解说）\n 1 开头是原音 \n 2 开头是解说\n '))
            change_role(is_s)
            logger.info('1 ---> 开始改写文案----------')
            change_srt()
            logger.info('2 ---> 开始生成语音----------')
            text_op(1, role_index)
            logger.info('3 ---> 开始原视频消音----------')
            volume0()
            logger.info('4 ---> 开始合成新配音----------')
            volume1()
            logger.info('5 ---> 开始合成新的视频----------')
            add_audio()
    except Exception as e:
        input(f'发生以下错误：\n  {e}')
    finally:
        input('回车退出')

# pyinstaller -F -c --add-binary "C:\Users\Administrator\PycharmProjects\modife_v\venv\Lib\site-packages\azure\cognitiveservices\speech\Microsoft.CognitiveServices.Speech.
# core.dll;." --onefile  app.py

