import random

import edge_tts
import re, math
import asyncio

from moviepy.audio.io.AudioFileClip import AudioFileClip

from modelscope.pipelines import pipeline
from modelscope import Tasks

lre_pipeline = pipeline(
    task='speech-language-recognition',
    model='damo/speech_campplus_five_lre_8k',
    model_revision='v1.0.0'
)

inference_pipeline_zh = pipeline(
    task=Tasks.auto_speech_recognition,
    model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
    output_dir='./decode_dir'
)


def mktimestamp(time_unit: float) -> str:
    """
    mktimestamp returns the timecode of the subtitle.

    The timecode is in the format of 00:00:00.000.

    Returns:
        str: The timecode of the subtitle.
    """
    hour = math.floor(time_unit / 10 ** 7 / 3600)
    minute = math.floor((time_unit / 10 ** 7 / 60) % 60)
    seconds = (time_unit / 10 ** 7) % 60
    return f"{hour:02d}:{minute:02d}:{seconds:06.3f}"


def kykgpt_asr(file_name):
    try:
        result = lre_pipeline(file_name)
        rec_result = inference_pipeline_zh(audio_in=file_name)
        return True, rec_result["text"]
    except Exception as ex:
        return False, "error"


def cut_sent2(para):
    para = re.sub('([,，])', r'\n', para)
    para = para.rstrip()
    paras = []
    for p in para.split("\n"):
        paras.append(p.rstrip('，,'))
    return paras


def cut_sent(text):
    para_list = text.split("\n")
    results = []
    for para in para_list:
        if len(para.strip()) > 0:
            para = re.sub('([。！？\?])([^”’])', r"\1\n\2", para)
            para = re.sub('(\.{6})([^”’])', r"\1\n\2", para)
            para = re.sub('(\…{2})([^”’])', r"\1\n\2", para)
            para = re.sub('([。！？\?][”’])([^，。！？\?])', r'\1\n\2', para)
            para = para.rstrip()
            for p in para.split("\n"):
                results.append(p.rstrip(' 。！？.!?'))
    return results


def gen_subs(text, max_words=30):
    subs = []
    for i in cut_sent(text):
        if len(i) > max_words:
            for j in cut_sent2(i):
                if len(j) > max_words:
                    cut = int(len(j) * 0.6)
                    j = '\n'.join([j[:cut], j[cut:]])
                subs.append(j)
        else:
            subs.append(i)
    return subs


async def do_trans(text, mp3_file, srt_file, line_words, speaker_id=None) -> None:
    # TEXT="客一客自研营销科技平台软件产品，为泛金融类ToB场景方提供一站式的SaaS化服务方案。包括提供全国性银行/持牌机构金融产品申请通道供给、一站式的移动互联网金融服务工具。2020~2021年间累计交易信贷业务规模超过222亿，截至2023年3月累计交易信贷业务规模已超454亿。依靠S2B2C的平台模式，累计帮助全国超过100万中小微企业获得融资贷款，解决资金与经营生产难题，助力金融服务最后一公里。"

    """
    Name: zh-CN-XiaoxiaoNeural
    Gender: Female

    Name: zh-CN-XiaoyiNeural
    Gender: Female

    Name: zh-CN-YunjianNeural
    Gender: Male

    Name: zh-CN-YunxiNeural
    Gender: Male

    Name: zh-CN-YunxiaNeural
    Gender: Male

    Name: zh-CN-YunyangNeural
    Gender: Male

    Name: zh-CN-liaoning-XiaobeiNeural
    Gender: Female

    Name: zh-CN-shaanxi-XiaoniNeural
    Gender: Female

    """
    speaker = None
    speakers = [
        {"Name": "zh-CN-XiaoxiaoNeural", "Gender": "Female"},
        {"Name": "zh-CN-XiaoyiNeural", "Gender": "Female"},
        {"Name": "zh-CN-YunjianNeural", "Gender": "Male"},
        {"Name": "zh-CN-YunxiNeural", "Gender": "Male"},
        {"Name": "zh-CN-YunxiaNeural", "Gender": "Male"},
        {"Name": "zh-CN-YunyangNeural", "Gender": "Male"},
        {"Name": "zh-CN-liaoning-XiaobeiNeural", "Gender": "Female"},
        {"Name": "zh-CN-shaanxi-XiaoniNeural", "Gender": "Female"}
    ]
    print(speaker_id)
    if speaker_id:
        speaker = speakers[speaker_id]
        print(speaker)
    if not speaker:
        speaker = random.choice(speakers)
    VOICE = speaker.get("Name")
    print("VOICE", VOICE)
    # VOICE = 'zh-CN-YunjianNeural'
    communicate = edge_tts.Communicate(text, VOICE)

    submaker = edge_tts.SubMaker()
    with open(mp3_file, "wb") as file:
        async for chunk in communicate.stream():
            if chunk["type"] == "audio":
                file.write(chunk["data"])
            if chunk["type"] == "WordBoundary":
                submaker.create_sub((chunk["offset"], chunk["duration"]), chunk["text"])
    subs = gen_subs(text, line_words)
    offset = []
    index = 0
    start_time = None
    end_time = None
    for s in subs:
        if start_time and end_time:
            offset.append((mktimestamp(start_time), mktimestamp(end_time)))
        start_time = submaker.offset[index][0]
        while index < len(submaker.subs) and len(s) > 1:
            r = re.search(submaker.subs[index], s)
            if r:
                # print(r.span()[1])
                s = s[r.span()[1]:]
                # print(s)
            index += 1
        end_time = submaker.offset[index - 1][1]
    offset.append((mktimestamp(start_time), mktimestamp(end_time + 3 * (10 ** 7))))
    srt_text = ''
    for i in range(len(subs)):
        srt_text += (f"{i + 1}\n"
                     f"{offset[i][0].replace('.', ',')} --> {offset[i][1].replace('.', ',')}\n"
                     f"{subs[i]}\n\n")
    with open(srt_file, "w", encoding="utf-8") as file:
        file.write(srt_text)
    return "OK"


def text2audio(text, mp3_file, srt_file, line_words, speaker_id=None):
    loop = asyncio.new_event_loop()
    return loop.run_until_complete(do_trans(text, mp3_file, srt_file, line_words, speaker_id))


def testAudio():
    audio_clip = AudioFileClip(r"E:\project\AI\gpt_auto_video\web_app\upload\recorder.mp3")
    audio_clip.write_audiofile(r"new_audio.mp3")


if __name__ == '__main__':
    testAudio()

# text = "客一客自研营销科技平台软件产品，为泛金融类ToB场景方提供一站式的SaaS化服务方案。包括提供全国性银行 / 持牌机构金融产品申请通道供给、一站式的移动互联网金融服务工具。2020~2021年间累计交易信贷业务规模超过222亿，截至2023年3月累计交易信贷业务规模已超454亿。依靠S2B2C的平台模式，累计帮助全国超过100万中小微企业获得融资贷款。解决资金与经营生产难题，助力金融服务最后一公里。"
# file_name = get_file_name( r"E:\workspace\kyk.com\gpt_auto_video\backend\static","")
# mp3_file_path = file_name + ".mp3"
# srt_file_path = file_name + ".srt"
# print(mp3_file_path,srt_file_path)
