import spaces
import os
import random
import argparse

import torch
import gradio as gr
import numpy as np

import ChatTTS

print("loading ChatTTS model...")
chat = ChatTTS.Chat()
chat.load_models()
# print("loading Translate model...")
# import Translate



from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name_zh_en = "hf-models/opus-mt-zh-en"
model_name_en_zh = "hf-models/opus-mt-en-zh"

tokenizer_zh_en = AutoTokenizer.from_pretrained(model_name_zh_en)
model_zh_en = AutoModelForSeq2SeqLM.from_pretrained(model_name_zh_en)

tokenizer_en_zh = AutoTokenizer.from_pretrained(model_name_en_zh)
model_en_zh = AutoModelForSeq2SeqLM.from_pretrained(model_name_en_zh)



def fy(data,tokenizer,model):
    encoded = tokenizer([data], return_tensors="pt")
    translation = model.generate(**encoded)
    result = tokenizer.batch_decode(translation, skip_special_tokens=True)[0]
    return result

def fy_en_zh(data):
    return fy(data,tokenizer_en_zh,model_en_zh)

def fy_zh_en(data):
    return fy(data,tokenizer_zh_en,model_zh_en)



ALL_LIST = ["汉语","english"]
TEXT_FY_INPUT = "请选择输入语言"
TEXT_FY_OUTPUT = "请选择输出语言"

DICT_FY_FUNC = {
    ("汉语","english"): fy_zh_en,
    ("english","汉语"): fy_en_zh,
}

def sl_get_remain(all:list,sl:gr.Dropdown):
    _all = all.copy()
    if str(sl) in _all:
        _all.remove(str(sl))
    return _all

def sl1_update(sl1):
    _all =ALL_LIST.copy()
    if str(sl1) in _all:
        _all.remove(str(sl1))
        print(_all)
    sl2 = gr.Dropdown(label=TEXT_FY_OUTPUT, choices=_all, interactive=True, value=_all[0])
    return sl2

def func_fy(data,sl1,sl2):
    func = DICT_FY_FUNC.get((sl1,sl2),fy_zh_en)
    return func(data)

def sl_swap(sl1,sl2,text1,text2):
    _sl1 = gr.Dropdown(label=TEXT_FY_INPUT, interactive=True, choices=ALL_LIST, value=sl2)
    _sl2 = gr.Dropdown(label=TEXT_FY_OUTPUT, interactive=True, choices=sl_get_remain(ALL_LIST,sl1), value=sl1)
    return _sl1,_sl2,text2,text1

TEXTBOX_LINES = 4






def generate_seed():
    new_seed = random.randint(1, 100000000)
    return {
        "__type__": "update",
        "value": new_seed
        }

@spaces.GPU
def generate_audio(text, temperature, top_P, top_K, audio_seed_input, text_seed_input, refine_text_flag):

    torch.manual_seed(audio_seed_input)
    rand_spk = torch.randn(768)
    params_infer_code = {
        'spk_emb': rand_spk, 
        'temperature': temperature,
        'top_P': top_P,
        'top_K': top_K,
        }
    params_refine_text = {'prompt': '[oral_2][laugh_0][break_6]'}
    
    torch.manual_seed(text_seed_input)

    if refine_text_flag:
        text = chat.infer(text, 
                          skip_refine_text=False,
                          refine_text_only=True,
                          params_refine_text=params_refine_text,
                          params_infer_code=params_infer_code
                          )
    
    wav = chat.infer(text, 
                     skip_refine_text=True, 
                     params_refine_text=params_refine_text, 
                     params_infer_code=params_infer_code
                     )
    
    audio_data = np.array(wav[0]).flatten()
    sample_rate = 24000
    text_data = text[0] if isinstance(text, list) else text

    return [(sample_rate, audio_data), text_data]


with gr.Blocks() as demo:
    gr.Markdown("# 文本翻译")

    with gr.Row():
        fy_text_input = gr.Textbox(label="输入文本", lines=TEXTBOX_LINES, placeholder="请在这里输入文本", value="")
        fy_text_output = gr.Textbox(label="输出文本", lines=TEXTBOX_LINES)
    with gr.Row():
        sl1 = gr.Dropdown(label=TEXT_FY_INPUT, choices=ALL_LIST, value=0,scale=4)
        sl2 = gr.Dropdown(label=TEXT_FY_OUTPUT, choices=["english"], value="english",scale=4)
        translate_swap = gr.Button("交换",scale=2)
    translate_button = gr.Button("翻译")

    translate_button.click(func_fy,
                            inputs=[fy_text_input,sl1,sl2],
                            outputs=[fy_text_output])
    translate_swap.click(sl_swap,inputs=[sl1,sl2,fy_text_input,fy_text_output],outputs=[sl1,sl2,fy_text_input,fy_text_output])
    sl1.change(sl1_update,inputs=[sl1],outputs=[sl2])

    gr.Markdown("# 语音播放")

    with gr.Row():
        refine_text_checkbox = gr.Checkbox(label="Refine text", value=True)
        temperature_slider = gr.Slider(minimum=0.00001, maximum=1.0, step=0.00001, value=0.3, label="Audio temperature")
        top_p_slider = gr.Slider(minimum=0.1, maximum=0.9, step=0.05, value=0.7, label="top_P")
        top_k_slider = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_K")

    with gr.Row():
        audio_seed_input = gr.Number(value=42, label="Audio Seed")
        generate_audio_seed = gr.Button("\U0001F3B2")
        text_seed_input = gr.Number(value=42, label="Text Seed")
        generate_text_seed = gr.Button("\U0001F3B2")

    generate_button = gr.Button("生成语音")
        
    text_output = gr.Textbox(label="Output Text", interactive=False)
    audio_output = gr.Audio(label="Output Audio")

    generate_audio_seed.click(generate_seed, 
                              inputs=[], 
                              outputs=audio_seed_input)
        
    generate_text_seed.click(generate_seed, 
                             inputs=[], 
                             outputs=text_seed_input)
        
    generate_button.click(generate_audio, 
                          inputs=[fy_text_output, temperature_slider, top_p_slider, top_k_slider, audio_seed_input, text_seed_input, refine_text_checkbox], 
                          outputs=[audio_output, text_output])

parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
parser.add_argument('--server_port', type=int, default=8080, help='Server port')
args = parser.parse_args()

    # demo.launch(server_name=args.server_name, server_port=args.server_port, inbrowser=True)




if __name__ == '__main__':
    demo.launch()