from os import path
import os, sys

current_path = os.path.dirname(os.path.abspath(__file__))
if 'PYTHONPATH_CHECK' not in os.environ:
    sys.path.append(os.path.join(current_path, ".lib/pyopenjtalk-0.3.2-cp310-cp310-win_amd64/lib"))
    os.environ['PYTHONPATH_CHECK'] = "True"
    os.environ["PYTHONPATH"] = os.environ["PYTHONPATH"] + f";{sys.path[-1]}"

import gradio as gr
import logging
import json
import torchaudio

logging.basicConfig(level=logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
import shutil
import torch
import commons
import utils
import datetime
from models import SynthesizerTrn
from text import text_to_sequence_with_hps as text_to_sequence
from scipy.io.wavfile import write
import subprocess
from finetune import main as train_main


def tab_infer():
    device = torch.device("cpu")  # cpu  mps
    hps = None
    net_g = None
    config = None
    current_config_model = {}

    def begin_infer(input_mode, text, speaker, noise_scale, noise_scale_w, length_scale):
        '''
        noise_scale: 控制情感变化程度
        noise_scale_w: 控制音素发音长度
        length_scale: 语速，越小语速越快
        '''

        input_mode = str(input_mode)
        if input_mode == '0':
            text_cleaners = ['my_infer_ce_cleaners']
        elif input_mode == '1':
            text = f'[JA]{text}[JA]'
            text_cleaners = ['cjke_cleaners2']
        else:
            text_cleaners = hps.data.text_cleaners

        def get_text(text, hps):
            text_norm = text_to_sequence(text, hps.symbols, text_cleaners)
            if hps.data.add_blank:
                text_norm = commons.intersperse(text_norm, 0)
            text_norm = torch.LongTensor(text_norm)
            return text_norm

        speaker_id = int(hps.speakers[speaker])
        with torch.no_grad():
            stn_tst = get_text(text, hps)
            x_tst = stn_tst.to(device).unsqueeze(0)
            x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(device)
            sid = torch.LongTensor([speaker_id]).to(device)  # speaker id
            audio = \
                net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
                            length_scale=length_scale)[
                    0][0, 0].data.cpu().float().numpy()
            wav_name = f"./outputs/tts/{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.wav"
            if not path.exists(path.dirname(wav_name)):
                os.makedirs(path.dirname(wav_name))
            write(wav_name, hps.data.sampling_rate, audio)
        return wav_name

    def load_config(config_name):
        nonlocal hps
        hps = utils.get_hparams_from_file(config[config_name])
        if isinstance(hps.speakers, list):  # 适配MoeGoe配置文件
            tmp_dict = {}
            for index, item in enumerate(hps.speakers):
                tmp_dict[item] = str(index)
            hps.speakers = tmp_dict

    def load_model(model_name):
        model_path = current_config_model[model_name]
        symbols = hps.symbols
        nonlocal net_g
        net_g = SynthesizerTrn(
            len(symbols),
            hps.data.filter_length // 2 + 1,
            hps.train.segment_size // hps.data.hop_length,
            n_speakers=hps.data.n_speakers,
            **hps.model).to(device)
        _ = net_g.eval()
        _ = utils.load_checkpoint(model_path, net_g, None)

    def get_config():
        models_dir = path.join(current_path, 'models')
        nonlocal config
        config = {subdir: path.join(models_dir, path.join(subdir, "config.json")) for subdir in
                  os.listdir(models_dir)
                  if path.isdir(path.join(models_dir, subdir)) and path.exists(
                path.join(models_dir, path.join(subdir, "config.json")))}
        return list(config.keys())

    def get_model(config_path):
        '''
        根据config获取model
        '''
        dir_path = path.dirname(config[config_path])
        nonlocal current_config_model
        current_config_model = {f: os.path.join(dir_path, f) for f in os.listdir(dir_path) if f.endswith('.pth')}
        model_list = list(current_config_model.keys())
        if len(model_list) != 0:
            load_model(model_list[0])
        return model_list

    def config_changed(config_path, p, drop_sid):
        if not config_path:
            return [p.update(choices=[], value=None), drop_sid.update(choices=[], value=None)]
        load_config(config_path)
        model_list = get_model(config_path)
        if len(model_list) != 0:
            drop_sid_choices = list(hps.speakers.keys())
            return [p.update(choices=model_list, value=model_list[0]),
                    drop_sid.update(choices=drop_sid_choices, value=drop_sid_choices[0])]
        return [p.update(choices=[], value=None), drop_sid.update(choices=[], value=None)]

    def refresh_config(drop_config, drop_model, drop_sid):
        config_list = get_config()
        if len(config_list) != 0:
            load_config(config_list[0])
            model_list = get_model(config_list[0])
            drop_sid_choices = list(hps.speakers.keys())
            return [drop_config.update(choices=config_list, value=config_list[0]),
                    drop_model.update(choices=model_list, value=model_list[0]),
                    drop_sid.update(choices=drop_sid_choices, value=drop_sid_choices[0])]
        return [drop_config.update(choices=config_list), drop_model.update(choices=[]), drop_sid.update(choices=[])]

    def open_output():
        path = './outputs/tts'
        if not os.path.exists(path):
            path = os.path.dirname(path)
            if not os.path.exists(path):
                os.makedirs(path)
        path = os.path.abspath(path.replace("/", "\\"))
        subprocess.Popen(f'explorer {path}')

    with gr.Tab("推理") as tab:
        gr.Markdown()
        gr.Markdown(
            "# <center> FBI Warning:\n"
            "# <center> 严禁将模型用于任何商业项目，否则后果自负\n"
            '<div align="center"><a><font color="#dd0000">结果有随机性，可多次生成取最佳效果</font></a></div>'
            '<div align="center"><a><font color="#dd0000">标点符号会影响生成的结果</font></a></div>'
        )
        with gr.Tab("推理"):
            with gr.Row():
                with gr.Column():
                    with gr.Row():
                        btn_refresh = gr.Button("刷新").style(size='sm')
                    with gr.Row():
                        with gr.Column():
                            drop_config = gr.Dropdown(label="选择配置文件", choices=get_config(), type="value")
                            if len(drop_config.choices) != 0:
                                drop_config.value = drop_config.choices[0]
                                load_config(drop_config.value)
                        with gr.Column():
                            drop_model = gr.Dropdown(label="选择模型", type="value", interactive=True)  # value  index
                            if len(drop_config.choices) != 0:
                                drop_model.choices = get_model(drop_config.value)
                                if len(drop_model.choices) != 0:
                                    drop_model.value = drop_model.choices[0]
                    input_mode = gr.Dropdown(label="语言选择",
                                             info='语言标签有:[ZH]中文内容[ZH], [EN]english content[EN], [JA]こんにちは[JA]. 除非选项中明确说明需要自行使用语言标记包裹，否则不用自己加语言标记。',
                                             type='index',
                                             choices=['中英，英文（可选项）需要使用[EN][EN]包裹', '日语',
                                                      '与配置文件一致，每种语言需要自行使用相应的标签包裹'],
                                             value='中英，英文（可选项）需要使用[EN][EN]包裹')

                    input_text = gr.Textbox(label="输入文本", lines=5)
                    drop_sid = gr.Dropdown(label="选择说话人", interactive=True)
                    btn_refresh.click(lambda: refresh_config(drop_config, drop_model, drop_sid), inputs=[],
                                      outputs=[drop_config, drop_model, drop_sid])
                    if len(hps.speakers) != 0:
                        drop_sid.choices = list(hps.speakers.keys())
                        drop_sid.value = drop_sid.choices[0]
                    drop_config.change(fn=lambda param: config_changed(param, drop_model, drop_sid), inputs=drop_config,
                                       outputs=[drop_model, drop_sid])
                    drop_model.change(fn=load_model, inputs=drop_model, outputs=[])
                    with gr.Row():
                        with gr.Column():
                            btn_submit = gr.Button(value="开始合成", variant='primary')
                    with gr.Row():
                        ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1,
                                       value=0.666, interactive=True)  # 0.667  0.6
                        nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1,
                                        value=0.668, interactive=True)  # 0.8  0.668
                        ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1,
                                       value=1.2, interactive=True)
                with gr.Column():
                    output = gr.Audio(type='filepath')
                    btn_open = gr.Button(value="打开输出目录")
                btn_submit.click(fn=begin_infer, inputs=[input_mode, input_text, drop_sid, ns, nsw, ls],
                                 outputs=[output])
                btn_open.click(fn=open_output, inputs=[], outputs=[])
        with gr.Tab("导入模型"):
            def import_model(model_file, config_file):
                model_file = os.path.abspath(model_file)
                if not path.exists(model_file) or not model_file.endswith('.pth'):
                    return "模型文件不存在，或者不是以.pth结尾！请重新输入！"
                model_name = path.basename(model_file)
                new_model_dir = path.join(current_path, f"models/{model_name.split('.')[0]}")
                if path.exists(new_model_dir):
                    i = 1
                    while path.exists(new_model_dir + '_' + str(i)):
                        i += 1
                    new_model_dir = new_model_dir + '_' + str(i)
                os.makedirs(new_model_dir)
                shutil.copy(model_file, path.join(new_model_dir, model_name))
                shutil.copy(config_file.name, path.join(new_model_dir, 'config.json'))
                return f"导入成功，模型名称为：{path.basename(new_model_dir) + '.' + model_name.split('.')[-1]}, 请到推理中刷新后查看！"

            # def import_model(model_file, config_file):
            #     model_name = path.basename(model_file.name)
            #     new_model_dir = path.join(current_path, f"models/{model_name.split('.')[0]}")
            #     if path.exists(new_model_dir):
            #         i = 1
            #         while path.exists(new_model_dir + '_' + str(i)):
            #             i += 1
            #         new_model_dir = new_model_dir + '_' + str(i)
            #     os.makedirs(new_model_dir)
            #     shutil.move(model_file.name, path.join(new_model_dir, model_name))
            #     shutil.move(config_file.name, path.join(new_model_dir, 'config.json'))
            #     return f"导入成功，模型名称为：{path.basename(new_model_dir) + '.' + model_name.split('.')[-1]}, 请到推理中刷新后查看！"

            with gr.Row():
                with gr.Column():
                    input_model = gr.Textbox(label="输入模型文件路径 .pth结尾！")
                    # input_model = gr.File(file_types=['.pth'], type="file", label="选择模型文件")
                with gr.Column():
                    input_config = gr.File(file_types=['.json'], type="file", label="选择配置文件")
            with gr.Row():
                btn_import_model = gr.Button(value="开始导入")
            with gr.Row():
                output_model_tip = gr.Textbox()
            btn_import_model.click(fn=import_model, inputs=[input_model, input_config], outputs=[output_model_tip])

    return tab


def tab_train():
    file_list_obj = None
    config = None
    current_config_model = {}

    def begin_train(train_data, eval_interval, batch_size, max_epoch, is_continue, config_name, model_name):

        def get_speakers(train_data_path):
            speakers = {}
            with open(train_data_path, 'r', encoding='utf-8') as file_obj:
                lines = file_obj.readlines()
            for line in lines:
                line = line.split('|')
                assert len(line) == 3
                speakers[f"speaker_{line[1]}"] = line[1]
            return speakers

        train_data = file_list_obj[train_data]
        if is_continue:
            config_name = config[config_name]
            model_name = current_config_model[model_name]
            with open(config_name, 'r', encoding='utf-8') as f:
                hps = json.load(f)
            hps['data']['training_files'] = train_data
            hps['train']['batch_size'] = int(batch_size)
            hps['train']['eval_interval'] = int(eval_interval)
            hps['train']['log_interval'] = 10
            hps['train']['epochs'] = int(max_epoch)
            hps['model_dir'] = path.join(current_path, 'models')
            hps['speakers'] = get_speakers(train_data)
            hps['data']['n_speakers'] = len(hps['speakers'])
            hps['drop_speaker_embed'] = False
        else:
            with open(path.join(current_path, "configs/finetune_speaker.json"), 'r', encoding='utf-8') as f:
                hps = json.load(f)
            hps['data']['training_files'] = train_data
            hps['train']['batch_size'] = int(batch_size)
            hps['train']['eval_interval'] = int(eval_interval)
            hps['train']['log_interval'] = 10
            hps['train']['epochs'] = int(max_epoch)
            hps['model_dir'] = path.join(current_path, 'models')
            hps['speakers'] = get_speakers(train_data)
            hps['data']['n_speakers'] = len(hps['speakers'])
            # hps['data']['n_speakers'] = 999
            hps['drop_speaker_embed'] = True
            model_name = None
        train_main(hps, model_name)
        return "训练完成"

    def get_train_data():
        filelist_dir = path.join(current_path, 'filelists')
        if not path.exists(filelist_dir):
            os.makedirs(filelist_dir)
        nonlocal file_list_obj
        file_list_obj = {f: os.path.join(filelist_dir, f) for f in os.listdir(filelist_dir) if f.endswith('.cleaned')}
        return list(file_list_obj.keys())

    def refresh_train_data(drop_train_data):
        file_list = get_train_data()
        if len(file_list) != 0:
            return drop_train_data.update(choices=file_list, value=file_list[0])
        return drop_train_data.update(choices=file_list)

    def get_config():
        models_dir = path.join(current_path, 'models')
        nonlocal config
        config = {subdir: path.join(models_dir, path.join(subdir, "config.json")) for subdir in
                  os.listdir(models_dir)
                  if path.isdir(path.join(models_dir, subdir)) and path.exists(
                path.join(models_dir, path.join(subdir, "config.json")))}
        return list(config.keys())

    def get_model(config_path):
        '''
        根据config获取model
        '''
        dir_path = path.dirname(config[config_path])
        nonlocal current_config_model
        current_config_model = {f: os.path.join(dir_path, f) for f in os.listdir(dir_path) if f.endswith('.pth')}
        return list(current_config_model.keys())

    def config_changed(config_path, p):
        if not config_path:
            return p.update(choices=[], value=None)
        model_list = get_model(config_path)
        if len(model_list) != 0:
            return p.update(choices=model_list, value=model_list[0])
        return p.update(choices=[], value=None)

    def refresh_config(drop_config):
        config_list = get_config()
        if len(config_list) != 0:
            return drop_config.update(choices=config_list, value=config_list[0])
        return drop_config.update(choices=config_list)

    def ck_continue_changed(is_checked, drop_config):
        return [refresh_config(drop_config), {"visible": is_checked, "__type__": "update"}]

    with gr.Tab("训练（微调）") as tab:
        with gr.Row():
            with gr.Column():
                with gr.Row():
                    btn_refresh = gr.Button("刷新").style(size='sm')
                with gr.Row():
                    drop_train_data = gr.Dropdown(label="选择训练数据", choices=get_train_data(), type="value")
                    if len(drop_train_data.choices) != 0:
                        drop_train_data.value = drop_train_data.choices[0]
                    btn_refresh.click(lambda: refresh_train_data(drop_train_data), inputs=[], outputs=drop_train_data)
                with gr.Row():
                    ck_continue = gr.Checkbox(label='继续训练')
                with gr.Row(visible=False) as continue_train:
                    with gr.Column():
                        drop_config = gr.Dropdown(label="选择配置文件", choices=get_config(), type="value")
                        if len(drop_config.choices) != 0:
                            drop_config.value = drop_config.choices[0]
                    with gr.Column():
                        drop_model = gr.Dropdown(label="选择模型", type="value", interactive=True)  # value  index
                        if len(drop_config.choices) != 0:
                            drop_model.choices = get_model(drop_config.value)
                            if len(drop_model.choices) != 0:
                                drop_model.value = drop_model.choices[0]
                ck_continue.change(fn=lambda p1: ck_continue_changed(p1, drop_config),
                                   inputs=[ck_continue], outputs=[drop_config, continue_train])
                drop_config.change(fn=lambda param: config_changed(param, drop_model), inputs=drop_config,
                                   outputs=drop_model)
                with gr.Row():
                    eval_interval = gr.Number(value=200, label='保存间隔', info='每迭代多少步就保存一次')
                    batch_size = gr.Number(value=8, label='批量大小', info='越大占用显存越高，训练越快')
                with gr.Row():
                    max_epoch = gr.Number(value=35, label='最大迭代次数',
                                          info='一个角色1000条训练数据时迭代35次的效果可观, 不足1000条则按相应比例增大迭代次数。在给定的数据样本下，最大迭代次数对最终效果的提升是有上限的，并非越大越好！')
                with gr.Row():
                    btn_submit = gr.Button(value="开始训练", variant='primary')
            with gr.Column():
                output = gr.Textbox(label='输出')
            btn_submit.click(fn=begin_train,
                             inputs=[drop_train_data, eval_interval, batch_size, max_epoch, ck_continue, drop_config,
                                     drop_model],
                             outputs=[output])
    return tab


def tab_process_data():
    from process_audio import process_data
    from process_audio import text_clean
    language_tag = {
        "中文": "[ZH]",
        "日文": "[JA]",
        "英文": "[EN]"
    }

    def start_by_audio(audio_dir, audio_language, whisper_size):
        audio_language = language_tag[audio_language]
        if not path.exists(audio_dir):
            return "文件或目录不存在！"
        size_map = {'小': 'base', '中': 'medium', '大': 'large'}
        process_data(audio_dir, audio_language, size_map[whisper_size])
        return "训练数据制作完成，请到训练面板中刷新！"

    def start_by_file(txt_file, audio_language):
        audio_language = language_tag[audio_language]
        with open(txt_file.name, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for li in lines:
                path = li.split('|')[0].strip()
                if not os.path.isfile(path):
                    return f"音频文件{path}不存在，请检查后重试！"
        text_clean(txt_file.name, audio_language)
        return "训练数据制作完成，请到训练面板中刷新！"

    with gr.Tab("制作训练数据") as tab:
        with gr.Tab("使用音频制作"):
            gr.Markdown(
                "###  输入格式可以是：各种视频，或者音频\n"
            )
            with gr.Row():
                with gr.Column():
                    with gr.Row():
                        audio_dir = gr.Textbox(label='音频',
                                               info='可以是文件或者文件夹路径，选择文件夹时会读取该目录下的所有可用数据')
                    with gr.Row():
                        audio_language = gr.Dropdown(label="音频语言", type="value", interactive=True,
                                                     info="选择音频的所属语言",
                                                     choices=list(language_tag.keys()), value="中文")  # value  index
                    with gr.Row():
                        whisper_size = gr.Radio(["小", "中", "大"], label="预处理模型大小", value="中",
                                                info="模型越大，显存占用越高，效果越好！")
                    with gr.Row():
                        btn_submit_1 = gr.Button(value="开始制作", variant='primary')
                with gr.Column():
                    output = gr.Textbox(label='输出')
                btn_submit_1.click(fn=start_by_audio, inputs=[audio_dir, audio_language, whisper_size],
                                   outputs=[output])
        with gr.Tab("使用音频+文本内容制作"):
            with gr.Row():
                with gr.Column():
                    with gr.Row():
                        gr.File(label='示例数据', value=path.join(current_path, 'filelist_example.txt'))
                    with gr.Row():
                        txt_file = gr.File(label='选择满足示例数据格式要求的txt文件', file_types=['.txt'], type='file')
                    with gr.Row():
                        audio_language2 = gr.Dropdown(label="音频语言", type="value", interactive=True,
                                                      info="选择音频的所属语言",
                                                      choices=list(language_tag.keys()), value="中文")  # value  index
                    with gr.Row():
                        btn_submit_2 = gr.Button(value="开始制作", variant='primary')
                with gr.Column():
                    output2 = gr.Textbox(label='输出')
                btn_submit_2.click(fn=start_by_file, inputs=[txt_file, audio_language2], outputs=[output2])
            pass
    return tab


def main():
    with gr.Blocks() as demo:
        tab_infer()
        tab_train()
        tab_process_data()
    return demo


if __name__ != "__mp_main__":
    main().launch(debug=False)
if __name__ == "__main__":
    pass
