import gradio as gr
import json
import os, sys
from request_send import load_character_emotions, read_aloud, get_redis_message, get_bot_message, \
    save_bot_message, speech_recognition, default_chat_data, default_asr_data, default_read_data, \
    change_character_list, change_batch_size, stopAudioPlay, character_info_endpoint
import time

now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append(os.path.join(now_dir, "GPT_SoVITS"))

os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "9200796cba4b2569d70549a440e2ee16c690401256613ec3cdeffc46edb47652"

session_id = "foo"

port = 5000
# 取得模型文件夹路径
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json")
if os.path.exists(config_path):
    with open(config_path, "r", encoding="utf-8") as f:
        _config = json.load(f)
        port = _config.get("tts_port", 5000)
        default_batch_size = _config.get("batch_size", 10)
        is_share = _config.get("is_share", "false").lower() == "true"

with (gr.Blocks() as app):
    with gr.Row():
        with gr.Column(scale=2):
            text_for_speech = gr.State("你好")


            def print_like_dislike(x: gr.LikeData):
                print(x.index, x.value, x.liked)


            def user_chat(user_message, history):
                print(f"input begin:{time.time()}")
                history = history + [[user_message, None]]
                return "", history


            def file_upload(file, history):
                history = history + [[f"上传了文件{file}", None]]
                return history


            def ban_input():
                ban1 = gr.update(elem_id="text_input", interactive=False)
                ban2 = gr.update(elem_id="upload", interactive=False)
                ban3 = gr.update(elem_id="submit_audio", interactive=False)
                return ban1, ban2, ban3


            def restore_input():
                restore1 = gr.update(elem_id="text_input", interactive=True)
                restore2 = gr.update(elem_id="upload", interactive=True)
                restore3 = gr.update(elem_id="submit_audio", interactive=True)
                return restore1, restore2, restore3


            chatbot = gr.Chatbot(
                value=get_redis_message(session_id),
                elem_id="chatbot",
                bubble_full_width=False,
                avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.jpg"))),
                height="70vh"
            )

            with gr.Tabs():
                tab_text = gr.Tab(label="对话框")
                tab_audio = gr.Tab(label="音频")
                with tab_text:
                    with gr.Row():
                        txt = gr.Textbox(
                            elem_id="text_input",
                            scale=4,
                            show_label=False,
                            placeholder="Enter text and press enter, or upload an image",
                            container=False,
                        )
                        btn = gr.UploadButton(elem_id="upload", label="📁", file_types=["file"])

                    chatbot.like(print_like_dislike, None, None)

                    clear = gr.Button("清除")

                with tab_audio:
                    audio = gr.Audio(sources=["microphone"], type="filepath")
                    with gr.Row():
                        audio_submit = gr.Button(elem_id="audio_submit", value="Submit")
                        audio_clear = gr.Button("Clear Audio")

                tab_text.select(lambda: gr.update(visible=True), None, [chatbot], queue=False)
                tab_audio.select(lambda: gr.update(visible=False), None, [chatbot], queue=False)

        with gr.Column():
            with gr.Row():
                with gr.Column(scale=1):
                    text_language = gr.Dropdown(["多语种混合", "中文", "英文", "日文", "中英混合", "日英混合"],
                                                value="多语种混合", label="文本语言")
                    cha_name, character_emotion, characters_and_emotions_ = change_character_list(
                        character_info_endpoint)
                    characters_and_emotions = gr.State(characters_and_emotions_)
                with gr.Column(scale=1):
                    speed_factor = gr.Slider(minimum=0.25, maximum=4, value=1, label="语速", step=0.05)
                    batch_size = gr.Slider(minimum=1, maximum=35, value=default_batch_size,
                                           label="batch_size，1代表不并行，越大越快，但是越可能爆", step=1)
                    top_k = gr.Slider(minimum=1, maximum=30, value=6, label="Top K", step=1)
                    top_p = gr.Slider(minimum=0, maximum=1, value=0.8, label="Top P")
                    temperature = gr.Slider(minimum=0, maximum=1, value=0.8, label="Temperature")
                    read_mode = gr.Checkbox(label="对话模式（自动生成音频并播放）")
                    batch_size.release(change_batch_size, inputs=[batch_size])
            with gr.Tabs():
                with gr.Tab(label="流式音频"):
                    with gr.Row():
                        sendStreamRequest = gr.Button("发送并开始播放", variant="primary", interactive=True)
                        stopStreamButton = gr.Button("停止播放", variant="secondary")
                    with gr.Row():
                        audioStreamRecieve = gr.Audio(None, label="音频输出", interactive=False)
                with gr.Tab(label="请求音频"):
                    with gr.Row():
                        sendRequest = gr.Button("发送请求", variant="primary")
                        audioRecieve = gr.Audio(None, label="音频输出", type="filepath", streaming=False)

        txt_msg = txt.submit(ban_input, None, [txt, btn, audio_submit], queue=False
                             ).then(user_chat, [txt, chatbot], [txt, chatbot], queue=False
                                    ).then(get_bot_message,
                                           [gr.State(default_chat_data), gr.State(session_id), gr.State("text"),
                                            chatbot], chatbot
                                           ).then(save_bot_message,
                                                  [gr.State(default_chat_data), gr.State(session_id), chatbot],
                                                  text_for_speech
                                                  ).then(restore_input, None, [txt, btn, audio_submit],
                                                         queue=False
                                                         ).then(read_aloud,
                                                                inputs=[
                                                                    gr.State(default_read_data),
                                                                    text_for_speech,
                                                                    cha_name,
                                                                    text_language,
                                                                    batch_size,
                                                                    speed_factor,
                                                                    top_k,
                                                                    top_p,
                                                                    temperature,
                                                                    character_emotion,
                                                                    read_mode,
                                                                    gr.State("True")
                                                                ],
                                                                outputs=[audioStreamRecieve],
                                                                )

        file_msg = btn.upload(ban_input, None, [txt, btn, audio_submit], queue=False
                              ).then(file_upload, [btn, chatbot], [chatbot], queue=False
                                     ).then(get_bot_message,
                                            [gr.State(default_chat_data), gr.State(session_id), gr.State("file"),
                                             chatbot], chatbot
                                            ).then(save_bot_message,
                                                   [gr.State(default_chat_data), gr.State(session_id), chatbot],
                                                   text_for_speech
                                                   ).then(restore_input, None, [txt, btn, audio_submit], queue=False
                                                          ).then(read_aloud,
                                                                 inputs=[
                                                                     gr.State(default_read_data),
                                                                     text_for_speech,
                                                                     cha_name,
                                                                     text_language,
                                                                     batch_size,
                                                                     speed_factor,
                                                                     top_k,
                                                                     top_p,
                                                                     temperature,
                                                                     character_emotion,
                                                                     read_mode,
                                                                     gr.State("True")
                                                                 ],
                                                                 outputs=[audioStreamRecieve],
                                                                 )
        clear.click(lambda: None, None, chatbot, queue=False)

        audio_submit.click(ban_input, None, [txt, btn, audio_submit], queue=False
                           ).then(speech_recognition,
                                  [gr.State(default_asr_data), gr.State(session_id), audio, chatbot],
                                  [chatbot]
                                  ).then(get_bot_message,
                                         [gr.State(default_chat_data), gr.State(session_id), gr.State("text"),
                                          chatbot], chatbot
                                         ).then(save_bot_message,
                                                [gr.State(default_chat_data), gr.State(session_id), chatbot],
                                                text_for_speech
                                                ).then(lambda: None, None, audio, queue=False
                                                       ).then(restore_input, None, [txt, btn, audio_submit],
                                                              queue=False
                                                              ).then(read_aloud,
                                                                     inputs=[
                                                                         gr.State(default_read_data),
                                                                         text_for_speech,
                                                                         cha_name,
                                                                         text_language,
                                                                         batch_size,
                                                                         speed_factor,
                                                                         top_k,
                                                                         top_p,
                                                                         temperature,
                                                                         character_emotion,
                                                                         read_mode,
                                                                         gr.State("True")
                                                                     ],
                                                                     outputs=[audioStreamRecieve],
                                                                     )
        audio_clear.click(lambda: None, None, audio, queue=False)

        sendRequest.click(lambda: gr.update(interactive=False), None, [sendRequest]).then(
            read_aloud,
            inputs=[
                gr.State(default_read_data),
                text_for_speech,
                cha_name,
                text_language,
                batch_size,
                speed_factor,
                top_k,
                top_p,
                temperature,
                character_emotion,
                gr.State("False"),
                gr.State("False")
            ],
            outputs=[audioRecieve],
        ).then(lambda: gr.update(interactive=True), None, [sendRequest])

        sendStreamRequest.click(
            lambda: gr.update(interactive=False), None, [sendStreamRequest]
        ).then(
            read_aloud,
            inputs=[
                gr.State(default_read_data),
                text_for_speech,
                cha_name,
                text_language,
                batch_size,
                speed_factor,
                top_k,
                top_p,
                temperature,
                character_emotion,
                gr.State("True"),
                gr.State("True")
            ],
            outputs=[audioStreamRecieve],
        ).then(
            lambda: gr.update(interactive=True), None, [sendStreamRequest]
        )
        stopStreamButton.click(stopAudioPlay, inputs=[])
        cha_name.change(
            load_character_emotions,
            inputs=[cha_name, characters_and_emotions],
            outputs=[character_emotion],
        )

app.queue().launch(server_port=9867, show_error=True, share=is_share)
