import os, sys
import gradio as gr
from src.gradio_demo import SadTalker
import io
import random
import requests
from gradio_client import Client
import wave
import numpy as np

# 初始化客户端
client = Client("https://xzjosh-kobe-bert-vits2-2-3.hf.space/--replicas/9fhp9/")

arri = 0
try:
    import webui  # in webui

    in_webui = True
except:
    in_webui = False

talkarr = ["ZH", "EN", "ZH", "ZH"]


def toggle_audio_file(choice):
    if choice == False:
        return gr.update(visible=True), gr.update(visible=False)
    else:
        return gr.update(visible=False), gr.update(visible=True)


def ref_video_fn(path_of_ref_video):
    if path_of_ref_video is not None:
        return gr.update(value=True)
    else:
        return gr.update(value=False)


# def chat(message, chat_history):
#     url = "http://localhost:8000/chatg/"+str(message)
#     response = requests.get(url)
#     bot_message =" 数字人：" +response.text
#     chat_history.append((message, bot_message))
#     return "",chat_history


def sadtalker_demo(checkpoint_path='checkpoints', config_path='src/config', warpfn=None):
    sad_talker = SadTalker(checkpoint_path, config_path, lazy_load=True)

    with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
        gr.Markdown("<div align='center'>"
                    " <h1>劳大复活计划</h1>"
                    "<h3>作者：AKA凯哥</h3>"
                    "</div>")

        with gr.Row(variant='panel'):
            with gr.Tabs(elem_id="sadtalker_genearted"):
                with gr.Row():
                    gen_video = gr.Video(format="mp4", autoplay=True, include_audio=False, show_label=False,
                                         show_download_button=False).style(width=300, height=300)
                    source_image = gr.Image(source="upload", type="filepath",
                                            elem_id="img2img_image").style(width=300, height=300)
            with gr.Tabs():
                gr.Markdown("<div align='center'>"
                            "<br>"
                            "<br>"
                            " <h1>简介</h1>"
                            "<h3>为什么做这个的原因是因为，我懒得写这个简介了...</h3>"
                            "<h3>上传数字人图片即可开口说话</h3>"
                            "<h3>说话习惯是大预言模型根据数字人习惯学习的</h3>"
                            "<h3>声音也可也定制</h3>"
                            "</div>")

        with gr.Column():
            with gr.Tabs():
                # 设置一个对话窗
                chatbot = gr.Chatbot().style(color_map=("green", "pink"), width=640, height=300)

            with gr.Tabs():
                with gr.Row():
                    input_chat = gr.Textbox()
                    btn_chat = gr.Button()

        def chat(message, chat_history, img):
            global arri
            arriarr = ["这并不好笑孩子们", "what can i say man ba out", "这并不孩子，好笑们",
                       "我现在所做的一切，都是为了追求更加完美"]

            response1 = arriarr[arri]

            arri += 1
            bot_message = " 劳大：" + response1
            chat_history.append((message, bot_message))
            # 发送预测请求
            response = client.predict(
                response1,  # str  in '输入文本内容' Textbox component
                "科比",  # str (Option from: [('科比', '科比')]) in 'Speaker' Dropdown component
                0.5,  # int | float (numeric value between 0 and 1) in 'SDP Ratio' Slider component
                0.5,  # int | float (numeric value between 0.1 and 2) in 'Noise' Slider component
                0.9,  # int | float (numeric value between 0.1 and 2) in 'Noise_W' Slider component
                1,  # int | float (numeric value between 0.1 and 2) in 'Length' Slider component
                talkarr[arri-1],
                # str (Option from: [('ZH', 'ZH'), ('JP', 'JP'), ('EN', 'EN'), ('auto', 'auto'), ('mix', 'mix')]) in 'Language' Dropdown component
                "",  # str (filepath on your computer (or URL) of file) in 'Audio prompt' Audio component
                "Happy",  # str  in 'Text prompt' Textbox component
                "Text prompt",  # str  in 'Prompt Mode' Radio component
                "",  # str  in '辅助文本' Textbox component
                0.7,  # int | float (numeric value between 0 and 1) in 'Weight' Slider component
                fn_index=0
            )

            def double_volume(input_wav_path, output_wav_path):
                # 1. 读取WAV文件
                with wave.open(input_wav_path, 'rb') as wav_file:
                    n_channels = wav_file.getnchannels()
                    sample_width = wav_file.getsampwidth()
                    framerate = wav_file.getframerate()
                    n_frames = wav_file.getnframes()
                    duration = n_frames / framerate
                    # 读取音频数据
                    audio_data = wav_file.readframes(n_frames)
                # 3. 重新写入新的WAV文件
                with wave.open(output_wav_path, 'wb') as out_wav_file:
                    out_wav_file.setnchannels(n_channels)
                    out_wav_file.setsampwidth(sample_width)
                    out_wav_file.setframerate(framerate)
                    out_wav_file.setnframes(n_frames)
                    out_wav_file.writeframes(audio_data)

            # 使用示例
            input_wav_path = str(response[1])
            output_wav_path = 'D:/EdgeDownload/talk.wav'
            double_volume(input_wav_path, output_wav_path)
            wav_path = sad_talker.test(img, output_wav_path)
            return "", chat_history, wav_path

        btn_chat.click(
            fn=chat,
            inputs=[input_chat, chatbot, source_image],
            outputs=[input_chat, chatbot, gen_video]
        )

    return sadtalker_interface


if __name__ == "__main__":
    demo = sadtalker_demo()
    demo.queue()
    #demo.launch(auth=("admin", "pass1234"))
    demo.launch()
