from datetime import datetime
import gradio as gr
import json
import requests
from string import Template
import pyaudio, wave
import os, sys
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from Chat.chat import Conversation
from langchain_core.prompts import MessagesPlaceholder

now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append(os.path.join(now_dir, "GPT_SoVITS"))

os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "9200796cba4b2569d70549a440e2ee16c690401256613ec3cdeffc46edb47652"
template = [("system",
             "你是“阿梓从小就很可爱”，一个Bilibili的up主，也是虚拟主播，是虚拟艺人团体VirtuaReal成员，昵称是阿梓或者梓宝，平时喜欢宅在家唱歌打游戏，打游戏水平一般，爱玩无畏契约和英雄联盟，而且唱歌很厉害。现在你需要与用户交谈"),
            MessagesPlaceholder(variable_name="history"), ("human", "{input}")]

conv = Conversation(template)

tts_port = 5000
# 取得模型文件夹路径
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json")
if os.path.exists(config_path):
    with open(config_path, "r", encoding="utf-8") as f:
        _config = json.load(f)
        tts_port = _config.get("tts_port", 5000)
        default_batch_size = _config.get("batch_size", 10)
        is_share = _config.get("is_share", "false").lower() == "true"
        enable_auth = _config.get("enable_auth", "false").lower() == "true"
        users = _config.get("user", {})
        try:
            default_username = list(users.keys())[0]
            default_password = users[default_username]
        except:
            default_username = "admin"
            default_password = "admin123"

default_request_url = f"http://127.0.0.1:{tts_port}"
default_character_info_url = f"{default_request_url}/character_list"
default_endpoint = f"{default_request_url}/tts"
default_change = f"{default_request_url}/change"
default_endpoint_data = """{
    "method": "POST",
    "body": {
        "cha_name": "${chaName}",
        "character_emotion": "${characterEmotion}",
        "text": "${speakText}",
        "text_language": "${textLanguage}",
        "batch_size": ${batch_size},
        "speed": ${speed_factor},
        "top_k": ${topK},
        "top_p": ${topP},
        "temperature": ${temperature},
        "stream": "${stream}",
        "save_temp": "False"
    }
}"""

global p, streamAudio
p = pyaudio.PyAudio()
streamAudio = None


def load_character_emotions(character_name, characters_and_emotions):
    emotion_options = ["default"]
    emotion_options = characters_and_emotions.get(character_name, ["default"])

    return gr.Dropdown(emotion_options, value="default")


def send_request(
        endpoint_data,
        text,
        cha_name,
        text_language,
        batch_size,
        speed_factor,
        top_k,
        top_p,
        temperature,
        character_emotion,
        stream="False",
):
    urlencoded_text = requests.utils.quote(text)

    # 使用Template填充变量
    params = {
        "chaName": cha_name,
        "speakText": urlencoded_text,
        "textLanguage": text_language,
        "batch_size": batch_size,
        "speed_factor": speed_factor,
        "topK": top_k,
        "topP": top_p,
        "temperature": temperature,
        "characterEmotion": character_emotion,
        "stream": stream,
    }

    endpoint_data_template = Template(endpoint_data)
    filled_json_str = endpoint_data_template.substitute(**params)
    # 解析填充后的JSON字符串
    request_data = json.loads(filled_json_str)
    body = request_data["body"]
    if stream.lower() == "false":
        print(f"发送请求到{default_endpoint}")
        # 发送POST请求
        response = requests.post(default_endpoint, json=body)
        # 检查请求是否成功
        if response.status_code == 200:
            # 生成保存路径
            save_path = (
                f"tmp_audio/{cha_name}{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
            )

            # 检查保存路径是否存在
            if not os.path.exists("tmp_audio"):
                os.makedirs("tmp_audio")

            # 保存音频文件到本地
            with open(save_path, "wb") as f:
                f.write(response.content)

            # 返回给gradio
            return gr.Audio(save_path, type="filepath")
        else:
            gr.Warning(f"请求失败，状态码：{response.status_code}, 返回内容：{response.content}")
            return gr.Audio(None, type="filepath")
    else:
        # 发送POST请求
        response = requests.post(default_endpoint, json=body, stream=True)
        # 检查请求是否成功

        global p, streamAudio
        # 打开音频流
        streamAudio = p.open(format=p.get_format_from_width(2),
                             channels=1,
                             rate=32000,
                             output=True)

        response = requests.post(default_endpoint, json=body, stream=True)
        if response.status_code == 200:
            save_path = (
                f"tmp_audio/{cha_name}{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
            )

            # 音频参数
            channels = 1  # 单声道
            sampwidth = 2  # 采样位宽，2字节（16位）
            framerate = 32000  # 采样率，32000 Hz

            # 检查保存路径是否存在
            if not os.path.exists("tmp_audio"):
                os.makedirs("tmp_audio")

            # 打开一个新的 wave 文件，准备写入
            with wave.open(save_path, 'wb') as wf:
                wf.setnchannels(channels)  # 设置声道数
                wf.setsampwidth(sampwidth)  # 设置采样位宽
                wf.setframerate(framerate)  # 设置采样率
                for data in response.iter_content(chunk_size=1024):
                    wf.writeframes(data)
                    if (streamAudio is not None) and (not streamAudio.is_stopped()):
                        streamAudio.write(data)

            # 停止和关闭流
            if streamAudio is not None:
                streamAudio.stop_stream()
            return gr.Audio(save_path, type="filepath")
        else:
            gr.Warning(f"请求失败，状态码：{response.status_code}, 返回内容：{response.content}")
            return gr.Audio(None, type="filepath")


def change_character_params(
        endpoint_data,
        text,
        cha_name,
        text_language,
        batch_size,
        speed_factor,
        top_k,
        top_p,
        temperature,
        character_emotion,
        stream="False",
):
    urlencoded_text = requests.utils.quote(text)

    # 使用Template填充变量
    params = {
        "cha_name": cha_name,
        "text": urlencoded_text,
        "text_language": text_language,
        "batch_size": batch_size,
        "speed_factor": speed_factor,
        "top_k": top_k,
        "top_p": top_p,
        "temperature": temperature,
        "character_emotion": character_emotion,
        "stream": stream,
        "save_temp": "True"
    }
    conv.data = params


def stopAudioPlay():
    global streamAudio
    if streamAudio is not None:
        streamAudio.stop_stream()
        streamAudio = None


def get_characters_and_emotions(character_list_url):
    try:
        response = requests.get(character_list_url)
        if response.status_code == 200:
            return response.json()
        else:
            raise Exception(f"请求失败，状态码：{response.status_code}")
    except:
        raise Exception("请求失败，请检查URL是否正确")


def change_character_list(
        character_list_url, cha_name="", character_emotion="default"
):
    characters_and_emotions = {}

    try:
        characters_and_emotions = get_characters_and_emotions(character_list_url)
        character_names = [i for i in characters_and_emotions]
        if len(character_names) != 0:
            if cha_name in character_names:
                character_name_value = cha_name
            else:
                character_name_value = character_names[0]
        else:
            character_name_value = ""
        emotions = characters_and_emotions.get(character_name_value, ["default"])
        emotion_value = character_emotion
        if emotion_value not in emotions:
            emotion_value = "default"
    except:
        character_names = []
        character_name_value = ""
        emotions = ["default"]
        emotion_value = "default"
        characters_and_emotions = {}
    return (
        gr.Dropdown(character_names, value=character_name_value, label="选择角色"),
        gr.Dropdown(emotions, value=emotion_value, label="情感列表", interactive=True),
        characters_and_emotions,
    )


def change_batch_size(batch_size):
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            _config = json.load(f)
        with open(config_path, "w", encoding="utf-8") as f:
            _config["batch_size"] = batch_size
            json.dump(_config, f, ensure_ascii=False, indent=4)
    except:
        pass
    return


with (gr.Blocks() as app):
    with gr.Row():
        with gr.Column(scale=2):
            text_for_speech = gr.State()


            def print_like_dislike(x: gr.LikeData):
                print(x.index, x.value, x.liked)


            def save_bot_res(history):
                conv.memory.save_context({"input": history[-1][0]}, {"output": history[-1][1]})
                return history[-1][1]


            chatbot = gr.Chatbot(
                [],
                elem_id="chatbot",
                bubble_full_width=False,
                avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.jpg"))),
                height="70vh"
            )

            with gr.Tabs():
                tab_text = gr.Tab(label="对话框")
                tab_audio = gr.Tab(label="音频")
                with tab_text:
                    with gr.Row():
                        txt = gr.Textbox(
                            scale=4,
                            show_label=False,
                            placeholder="Enter text and press enter, or upload an image",
                            container=False,
                        )
                        btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])

                    txt_msg = txt.submit(lambda: gr.update(interactive=False), None, [txt], queue=False
                                         ).then(conv.user, [txt, chatbot], [txt, chatbot], queue=False
                                                ).then(conv.baichuan_bot_stream, chatbot, chatbot
                                                       ).then(save_bot_res, chatbot, text_for_speech
                                                              ).then(conv.read_aloud, chatbot, None
                                                                     ).then(lambda: gr.update(interactive=True),
                                                                            None,
                                                                            [txt], queue=False)

                    file_msg = btn.upload(conv.file, [btn, chatbot], [chatbot], queue=False).then(
                        conv.baichuan_bot_stream, chatbot, chatbot
                    )

                    chatbot.like(print_like_dislike, None, None)

                    clear = gr.Button("清除")

                    clear.click(lambda: None, None, chatbot, queue=False)

                with tab_audio:
                    audio = gr.Audio(sources=["microphone"], type="filepath")
                    with gr.Row():
                        audio_submit = gr.Button("Submit")
                        audio_clear = gr.Button("Clear Audio")
                    audio_submit.click(lambda: gr.update(interactive=False), None, [audio_submit]
                                       ).then(conv.listen, [audio, chatbot], [chatbot]
                                              ).then(conv.baichuan_bot_stream, chatbot, chatbot
                                                     ).then(save_bot_res, chatbot, text_for_speech
                                                            ).then(conv.read_aloud, chatbot, audio
                                                                   ).then(lambda: None, None, audio, queue=False
                                                                          ).then(lambda: gr.update(interactive=True),
                                                                                 None, [audio_submit])
                    audio_clear.click(lambda: None, None, audio, queue=False)

                tab_text.select(lambda: gr.update(visible=True), None, [chatbot], queue=False)
                tab_audio.select(lambda: gr.update(visible=False), None, [chatbot], queue=False)

        with gr.Column():
            with gr.Row():
                with gr.Column(scale=1):
                    text_language = gr.Dropdown(["多语种混合", "中文", "英文", "日文", "中英混合", "日英混合"],
                                                value="多语种混合", label="文本语言")
                    cha_name, character_emotion, characters_and_emotions_ = change_character_list(
                        default_character_info_url)
                    characters_and_emotions = gr.State(characters_and_emotions_)
                with gr.Column(scale=1):
                    speed_factor = gr.Slider(minimum=0.25, maximum=4, value=1, label="语速", step=0.05)
                    batch_size = gr.Slider(minimum=1, maximum=35, value=default_batch_size,
                                           label="batch_size，1代表不并行，越大越快，但是越可能爆", step=1)
                    top_k = gr.Slider(minimum=1, maximum=30, value=6, label="Top K", step=1)
                    top_p = gr.Slider(minimum=0, maximum=1, value=0.8, label="Top P")
                    temperature = gr.Slider(minimum=0, maximum=1, value=0.8, label="Temperature")
                    batch_size.release(change_batch_size, inputs=[batch_size])
                    change_character_btn = gr.Button("更换人物与参数", variant="secondary")
            with gr.Tabs():
                with gr.Tab(label="请求完整音频"):
                    with gr.Row():
                        sendRequest = gr.Button("发送请求", variant="primary")
                        audioRecieve = gr.Audio(None, label="音频输出", type="filepath", streaming=False)
                with gr.Tab(label="流式音频"):
                    with gr.Row():
                        sendStreamRequest = gr.Button("发送并开始播放", variant="primary", interactive=True)
                        stopStreamButton = gr.Button("停止播放", variant="secondary")
                    with gr.Row():
                        audioStreamRecieve = gr.Audio(None, label="音频输出", interactive=False)

            change_character_btn.click(lambda: gr.update(interactive=False), None, [change_character_btn]).then(
                change_character_params,
                inputs=[
                    gr.State(default_endpoint_data),
                    text_for_speech,
                    cha_name,
                    text_language,
                    batch_size,
                    speed_factor,
                    top_k,
                    top_p,
                    temperature,
                    character_emotion,
                    gr.State("False"),
                ],
                outputs=None,
            ).then(lambda: gr.update(interactive=True), None, [change_character_btn])

            sendRequest.click(lambda: gr.update(interactive=False), None, [sendRequest]).then(
                send_request,
                inputs=[
                    gr.State(default_endpoint_data),
                    text_for_speech,
                    cha_name,
                    text_language,
                    batch_size,
                    speed_factor,
                    top_k,
                    top_p,
                    temperature,
                    character_emotion,
                    gr.State("False"),
                ],
                outputs=[audioRecieve],
            ).then(lambda: gr.update(interactive=True), None, [sendRequest])

            sendStreamRequest.click(
                lambda: gr.update(interactive=False), None, [sendStreamRequest]
            ).then(
                send_request,
                inputs=[
                    gr.State(default_endpoint_data),
                    text_for_speech,
                    cha_name,
                    text_language,
                    batch_size,
                    speed_factor,
                    top_k,
                    top_p,
                    temperature,
                    character_emotion,
                    gr.State("True"),
                ],
                outputs=[audioStreamRecieve],
            ).then(
                lambda: gr.update(interactive=True), None, [sendStreamRequest]
            )
            stopStreamButton.click(stopAudioPlay, inputs=[])
            cha_name.change(
                load_character_emotions,
                inputs=[cha_name, characters_and_emotions],
                outputs=[character_emotion],
            )

app.queue().launch(server_port=9867, show_error=True, share=is_share)
