import sys, os
import logging

logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(
    level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)


import torch
import argparse
import commons
import utils
from models import SynthesizerTrn
from text.symbols import symbols
from text import cleaned_text_to_sequence, get_bert
from text.cleaner import clean_text
import gradio as gr
import webbrowser
from moviepy.editor import *
import json
import random

net_g = None


def get_text(text, language_str, hps):
    # 获取文本表示
    norm_text, phone, tone, word2ph = clean_text(text, language_str)
    phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)

    if hps.data.add_blank:
        phone = commons.intersperse(phone, 0)
        tone = commons.intersperse(tone, 0)
        language = commons.intersperse(language, 0)
        for i in range(len(word2ph)):
            word2ph[i] = word2ph[i] * 2
        word2ph[0] += 1
    bert = get_bert(norm_text, word2ph, language_str)
    del word2ph

    assert bert.shape[-1] == len(phone)

    phone = torch.LongTensor(phone)
    tone = torch.LongTensor(tone)
    language = torch.LongTensor(language)

    return bert, phone, tone, language


def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
    # 推理程序
    global net_g
    bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
    with torch.no_grad():
        x_tst = phones.to(device).unsqueeze(0)
        tones = tones.to(device).unsqueeze(0)
        lang_ids = lang_ids.to(device).unsqueeze(0)
        bert = bert.to(device).unsqueeze(0)
        x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
        del phones
        speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
        audio = (
            net_g.infer(
                x_tst,
                x_tst_lengths,
                speakers,
                tones,
                lang_ids,
                bert,
                sdp_ratio=sdp_ratio,
                noise_scale=noise_scale,
                noise_scale_w=noise_scale_w,
                length_scale=length_scale,
            )[0][0, 0]
            .data.cpu()
            .float()
            .numpy()
        )
        del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
        return audio


def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
    # 生成音频
    with torch.no_grad():
        audio = infer(
            text,
            sdp_ratio=sdp_ratio,
            noise_scale=noise_scale,
            noise_scale_w=noise_scale_w,
            length_scale=length_scale,
            sid=speaker,
        )
    return "生成成功！", (hps.data.sampling_rate, audio)


def vc_fn(input_video, input_audio, wait_time_before):
    wait_time_before = int(wait_time_before)
    # 读取视频和音轨
    video_clip = VideoFileClip(input_video, audio=True)  # 替换为您的视频文件
    audio_clip = AudioFileClip(input_audio)  # 替换为您的音轨文件
    video_clip = video_clip.subclip(0, audio_clip.duration + wait_time_before)

    # 将音轨添加到视频
    mix_audio = CompositeAudioClip(
        [video_clip.audio, audio_clip.set_start(wait_time_before)]
    )
    video_clip = video_clip.set_audio(mix_audio)

    # 保存合并后的视频
    output_video_path = "./video&audio/output_video.mp4"
    video_clip.write_videofile(output_video_path, codec="libx264")

    # 关闭视频和音轨文件
    video_clip.close()
    audio_clip.close()
    return "合并成功，文件保存到当前目录video&audio/output_video.mp4。", output_video_path


def btn_speaker_fn(speaker):
    # 加载不同模型
    _ = utils.load_checkpoint(
        "./logs/" + speaker + "/1.pth", net_g, None, skip_optimizer=True
    )


def train_btn(
    dataset_path, dataset_name, continue_train=False, epochs=200, batch_size=5
):
    os.system("python transcribe_genshin.py")
    os.system("python preprocess_text.py")
    os.system("python bert_gen.py")
    # 读取JSON文件内容
    with open("./configs/train.json", "r") as file:
        data = json.load(file)

    # 修改对象的属性值
    data["train"]["epochs"] = epochs
    data["train"]["batch_size"] = batch_size
    # 将修改后的对象保存到JSON文件
    with open("./configs/train.json", "w") as file:
        json.dump(data, file)

    if continue_train:
        os.system("python train_ms.py --cont")
    else:
        os.system("python train_ms.py")

    return "训练完成！"


if __name__ == "__main__":
    # , default="./logs/otto/1.pth"
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model_dir", default="./logs/otto/1.pth", help="path of your model"
    )
    parser.add_argument(
        "--config_dir", default="./configs/config.json", help="path of your config file"
    )
    parser.add_argument("--share", default=False, help="make link public")
    parser.add_argument(
        "-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log"
    )

    args = parser.parse_args()
    if args.debug:
        logger.info("Enable DEBUG-LEVEL log")
        logging.basicConfig(level=logging.DEBUG)
    hps = utils.get_hparams_from_file(args.config_dir)
    device = "cuda:0" if torch.cuda.is_available() else "cpu"

    net_g = SynthesizerTrn(
        len(symbols),
        hps.data.filter_length // 2 + 1,
        hps.train.segment_size // hps.data.hop_length,
        n_speakers=hps.data.n_speakers,
        **hps.model
    ).to(device)
    _ = net_g.eval()

    speaker_ids = hps.data.spk2id
    speakers = list(speaker_ids.keys())

    my_theme_list = hps.theme
    with gr.Blocks(theme=random.choice(my_theme_list)) as app:
        gr.Markdown("# 基于语音合成的配音系统")
        with gr.Tab("文本转语音"):
            with gr.Row():
                with gr.Column():
                    text = gr.TextArea(
                        label="文本框", placeholder="Input Text Here", value="请输入想要配音的文字。"
                    )
                    with gr.Row():
                        speaker = gr.Dropdown(
                            choices=speakers, value=speakers[0], label="说话人", scale=3
                        )
                        speaker_btn = gr.Button("加载模型", variant="primary", scale=1)
                        speaker_btn.click(btn_speaker_fn, inputs=speaker)
                    sdp_ratio = gr.Slider(
                        minimum=0.1, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比"
                    )
                    noise_scale = gr.Slider(
                        minimum=0.1, maximum=1, value=0.1, step=0.01, label="感情调节"
                    )
                    noise_scale_w = gr.Slider(
                        minimum=0.1, maximum=1, value=0.8, step=0.01, label="音素长度"
                    )
                    length_scale = gr.Slider(
                        minimum=0.1, maximum=2, value=1.1, step=0.01, label="生成长度"
                    )
                    btn = gr.Button("点击生成", variant="primary")
                with gr.Column():
                    text_output = gr.Textbox(label="提示信息")
                    audio_output = gr.Audio(label="音频输出")
                    gr.Markdown("## ")
                    gr.Markdown("## 本系统基于Bert-VITS2模型。")
                    gr.Markdown("## 在左侧文本框输入想要转换的文字和说话人，调节参数即可生成声音。")
                    gr.Markdown("## 生成后下载上方音频输出进入下一步配音。")
        btn.click(
            tts_fn,
            inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale],
            outputs=[text_output, audio_output],
        )

        with gr.Tab("视频结合音轨"):
            with gr.Column():
                with gr.Row():
                    with gr.Column(scale=1):
                        file_input_video = gr.Video(
                            value="./video&audio/video.mp4", label="导入相关视频"
                        )
                        file_input_video_path = gr.Textbox(
                            value="./video&audio/video.mp4",
                            label="相关视频路径",
                            info="建议填写绝对路径",
                        )
                    file_input_audio = gr.Audio(
                        type="filepath",
                        value="./video&audio/audio.wav",
                        label="导入相关音频",
                        scale=1,
                    )
            with gr.Column():
                wait_time = gr.Textbox(value=3, label="开始说话前等待秒数", info="请输入小于视频长度的整数")
                message_box = gr.Textbox(label="生成结果提示")
                file_output_video = gr.Video(label="输出视频")
                btn = gr.Button("生成", variant="primary")
        btn.click(
            vc_fn,
            inputs=[file_input_video_path, file_input_audio, wait_time],
            outputs=[message_box, file_output_video],
        )

        with gr.Tab("自定义训练音色"):
            with gr.Column():
                dataset_path = gr.Textbox(
                    label="训练数据地址",
                    info="建议填写绝对路径",
                    lines=1,
                    placeholder="D:\otto-Bert-VITS2\genshin_dataset",
                )
                dataset_name = gr.Textbox(
                    label="模型名（角色名）",
                    info="声音模型训练保存的名字",
                    lines=1,
                    placeholder="ayaka",
                )
                continue_train = gr.Radio(
                    ["是", "否"],
                    value="是",
                    label="是否重新训练",
                    info="重新训练选择是，接着已经保存的模型继续训练选择否",
                )
                # whisper_model_size = gr.Radio(["tiny", "base", "small", "medium", "large"], value = "medium", label = "语音识别模型", info = "8G显存选medium，8G以上选large")
                max_epochs = gr.Slider(
                    2, 1000, value=200, label="训练epochs次数", info="迭代训练的轮次，默认200"
                )
                batch_size = gr.Slider(
                    2, 256, step=1, value=5, label="batch_size大小", info="越大训练越快，显存消耗越大"
                )
                text_output = gr.TextArea(label="输出结果")
                btn = gr.Button("开始训练", variant="primary")
                btn.click(
                    train_btn,
                    inputs=[
                        dataset_path,
                        dataset_name,
                        continue_train,
                        max_epochs,
                        batch_size,
                    ],
                    outputs=text_output,
                )

    webbrowser.open("http://127.0.0.1:7860")
    app.queue()
    app.launch(server_port=7860, show_error=True, share=True)
