# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import time

import gradio as gr
import random
import librosa
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))


inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮',
                 '3s极速复刻': '1. 选择prompt音频文件，或录入prompt音频，注意不超过30s，若同时提供，优先选择prompt音频文件\n2. 输入prompt文本\n3. 点击生成音频按钮',
                 '跨语种复刻': '1. 选择prompt音频文件，或录入prompt音频，注意不超过30s，若同时提供，优先选择prompt音频文件\n2. 点击生成音频按钮',
                 '自然语言控制': '1. 选择预训练音色\n2. 输入instruct文本\n3. 点击生成音频按钮'}
stream_mode_list = [('否', False), ('是', True)]
local_avilable_modes={
    'CosyVoice2-0.5B': 'iic/CosyVoice2-0.5B',
    'CosyVoice-300M': 'pretrained_models/CosyVoice-300M',
    'CosyVoice-300M-25Hz': 'pretrained_models/CosyVoice-300M-25Hz',
    'CosyVoice-300M-SFT': 'pretrained_models/CosyVoice-300M-SFT',
    'CosyVoice-300M-Instruct': 'pretrained_models/CosyVoice-300M-Instruct',

}
max_val = 0.8
server_port:int=8000
model_dir = 'pretrained_models/CosyVoice2-0.5B'
def generate_seed():
    seed = random.randint(1, 100000000)
    return {
        "__type__": "update",
        "value": seed
    }


def postprocess(speech, top_db=60, hop_length=220, win_length=440):
    speech, _ = librosa.effects.trim(
        speech, top_db=top_db,
        frame_length=win_length,
        hop_length=hop_length
    )
    if speech.abs().max() > max_val:
        speech = speech / speech.abs().max() * max_val
    return speech


def change_instruction(mode_checkbox_group):
    return instruct_dict[mode_checkbox_group]


def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
                   seed, stream, speed):
    timeout = 3
    yield None, 'jindu.wav 1/3'
    time.sleep(1)
    yield None, 'jindu.wav 2/3'
    time.sleep(1)
    yield None, 'jindu.wav 3/3'

def generate_srf(prompt_text, prompt_wav_upload, spk_id):
    # logging.info('get srf inference request')
    # if prompt_wav_upload is not None:
    #     prompt_wav = prompt_wav_upload
    #     prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
    #     cosyvoice.add_zero_shot_spk_from_wav(prompt_text,prompt_speech_16k,spk_id)
    # gr.Info('srf生成成功')
    # sft_spk=cosyvoice.list_available_spks()
    sft_spk.append(f"tst_{random.randint(1,100)}")
    print(sft_spk)
    return sft_spk

def handle_local_model(local_model_dropdown):
    model_dir =local_avilable_modes[local_model_dropdown]

    gr.Info('本地模型加载成功')
    return ["11"]

def dynamic_stf_component(sft_spks):
    if len(sft_spks) == 0:
        return gr.Dropdown(choices=[''], label='选择预训练音色', value='', scale=2)
    else:
        return gr.Dropdown(choices=sft_spks, label='选择预训练音色', value=sft_spks[0], scale=2)
def main():
    with gr.Blocks() as demo:
        gr.Markdown("### 通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。")
        gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) \
                    预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) \
                    [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) \
                    [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)")
        gr.Markdown("#### 请输入需要合成的文本，选择推理模式，并按照提示步骤进行操作")
        prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本，需与prompt音频内容一致，暂时不支持自动识别...", value='')
        tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。")
        with gr.Row():
            mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0])
            instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=5)
            sft_dropdown =gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=2)
            local_model_dropdown = gr.Dropdown(choices=list(local_avilable_modes.keys()), label='选择本地模型', value=list(local_avilable_modes.keys())[0], scale=2)
            stream = gr.Radio(choices=stream_mode_list, label='是否流式推理', value=stream_mode_list[0][1])
            speed = gr.Number(value=1, label="速度调节(仅支持非流式推理)", minimum=0.5, maximum=2.0, step=0.1)
            with gr.Column(scale=2):
                seed_button = gr.Button(value="\U0001F3B2")
                seed = gr.Number(value=0, label="随机推理种子")
        with gr.Row():
            prompt_wav_upload = gr.Audio(sources='upload', type='filepath',
                                         label='选择prompt音频文件，注意采样率不低于16khz')
            prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件')

        with gr.Row():
            pre_srf_spk_text = gr.Textbox(label="预训练sft名称", lines=1, placeholder="请输入拼音.", value='')
            pre_srf_button = gr.Button("生成sft")
            @pre_srf_button.click(
                inputs=[prompt_text, prompt_wav_upload, pre_srf_spk_text],
                outputs=[sft_dropdown]
            )
            def _generate_srf(prompttext, promptwav_upload, presrfspktext):
                all_srfs=generate_srf(prompttext, promptwav_upload, presrfspktext)
                return gr.Dropdown(choices=all_srfs, label='选择预训练音色', value=all_srfs[0], scale=2)

        instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='')
        generate_button = gr.Button("生成音频")
        with gr.Row():
            process_label = gr.Label(label="当前进度")
            audio_output = gr.Audio(label="合成音频", autoplay=True, streaming=True)
        seed_button.click(generate_seed, inputs=[], outputs=seed)
        generate_button.click(generate_audio,
                              inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,
                                      seed, stream, speed],
                              outputs=[audio_output,process_label])
        mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])
        local_model_dropdown.change(fn=handle_local_model, inputs=[local_model_dropdown], outputs=[sft_dropdown])
    demo.queue(max_size=4, default_concurrency_limit=2)
    demo.launch(server_name='0.0.0.0', server_port=server_port)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--port',
                        type=int,
                        default=8000)
    parser.add_argument('--model_dir',
                        type=str,
                        default='iic/CosyVoice2-0.5B',
                        help='local path or modelscope repo id')
    args = parser.parse_args()
    server_port = args.port
    sft_spk=['1.','2']
    main()
