import gradio as gr

def tts_fn():
    return

def vc_fn():
    return

if __name__ == "__main__":
    speakers = ["cyh","aaa"]
    languages = ["ZH", "JP", "EN", "mix"]
    
    with gr.Blocks() as app:
        gr.Markdown("### 基于语音合成的配音系统原型")

        with gr.Tab("文本转语音"):
            with gr.Row():
                with gr.Column():
                    text = gr.TextArea(
                        label="输入文本内容"
                    )
                    speaker = gr.Dropdown(
                        choices=speakers, label="选择说话人"
                    )
                    noise_scale = gr.Slider(
                        minimum=0.1, maximum=2, value=0, step=0.1, label="感情"
                    )
                    noise_scale_w = gr.Slider(
                        minimum=0.1, maximum=2, value=0, step=0.1, label="断句长度"
                    )
                    length_scale = gr.Slider(
                        minimum=0.1, maximum=2, value=0, step=0.1, label="语速"
                    )
                    btn = gr.Button("生成音频！")
                with gr.Column():
                    text_output = gr.Textbox(label="状态信息")
                    audio_output = gr.Audio(label="输出音频")
            btn.click(tts_fn)
            
        with gr.Tab("视频结合"):
            with gr.Column():
                with gr.Row():
                    file_input_video = gr.Video(label="导入相关视频")
                    file_input_ = gr.Audio(label="导入相关音频")
            with gr.Column():
                message_box = gr.Textbox(label="生成结果提示")
                Video4 = gr.Video(label="生成结果")
                inbtw = gr.Button("生成")
                inbtw.click(
                    vc_fn, inputs=[file_input_video, file_input_],
                    outputs=[message_box, Video4]
                )
        
        with gr.Tab("自定义训练音色"):
            with gr.Row():
                with gr.Column():
                    dataset_path = gr.Textbox(
                        label = "训练数据地址",
                        info = "wav音频文件，建议填写绝对路径",
                        lines = 1,
                        placeholder = "F:\\Code\\VITS_fast_finetune\\raw_audio",
                    )
                    dataset_name = gr.Textbox(
                        label = "模型名（角色名）",
                        info = "声音模型训练保存的名字",
                        lines = 1,
                        placeholder = "cyh",
                    )
                    continue_train = gr.Radio(["是", "否"], value = "是", label = "是否重新训练", info = "重新训练选择是，接着已经保存的模型继续训练选择否")
                    whisper_model_size = gr.Radio(["tiny", "base", "small", "medium", "large"], value = "medium", label = "语音识别模型", info = "8G显存选medium，8G以上选large")
                    max_epochs = gr.Slider(2, 1000, value = 200, label = "训练epochs次数", info = "迭代训练的轮次，默认200")
                    batch_size = gr.Slider(2, 256, step = 2, value = 4, label = "batch_size大小", info = "越大训练越快，显存消耗越大")

                with gr.Column():
                    text_output = gr.TextArea(
                        label = "输出结果",
                        lines = 23,
                        )
                    btn = gr.Button("开始训练")
                    btn.click(train_btn,
                                inputs = [dataset_path, dataset_name, continue_train, max_epochs, whisper_model_size, batch_size],
                                outputs = text_output)

    print("推理页面已开启!")
    app.launch(share=True,
               auth=("111", '111'),
               )
