import gradio as gr
import datetime
import base64
import numpy as np
import dashscope
import os
from dotenv import load_dotenv

# 加载 .env 文件
load_dotenv()

API_KEY = os.environ.get('API_KEY')
if not API_KEY:
    raise ValueError("请设置 API_KEY 环境变量或在 .env 文件中配置")

VOICE_OPTIONS = [
    "Cherry",
    "Ethan", 
    "Jennifer",
    "Ryan",
    "Katerina",
    "Nofish",
    "Elias",
    "Li", 
    "Marcus", 
    "Roy", 
    "Peter", 
    "Eric", 
    "Rocky",
    "Kiki",
    "Sunny",
    "Jada",
    "Dylan",
]

LANGUAGE_OPTIONS = [
    "Auto",
    "English", 
    "Chinese", 
    "German", 
    "Italian", 
    "Portuguese", 
    "Spanish", 
    "Japanese", 
    "Korean", 
    "French", 
    "Russian"
]

def tts_interface(text, voice_name, language):
    print(f"text: {text}, {voice_name}, {language} time: {datetime.datetime.now()}\n")

    audio_frames = []

    try:
        responses = dashscope.MultiModalConversation.call(
            api_key=API_KEY,
            model="qwen3-tts-flash",
            text=text,
            voice=voice_name,
            stream=True,
            language_type=language
        )
        
        for chunk in responses:
            audio_string = ""
            try:
                audio_string = chunk.output.audio.data
            except:
                print(chunk)
                pass
            wav_bytes = base64.b64decode(audio_string)
            audio_np = np.frombuffer(wav_bytes, dtype=np.int16).astype(np.float32) / 32768.0
            audio_frames.append(audio_np)

        if audio_frames:
            full_audio = np.concatenate(audio_frames)
        else:
            full_audio = None

        sample_rate = 24000
        return (sample_rate, full_audio)
    
    except Exception as e:
        print(f"Error: {e}")
        return None

# 创建简化的界面
with gr.Blocks(title="Qwen3 TTS Demo") as demo:
    gr.Markdown("# 🎤 Qwen3-TTS Demo")
    
    with gr.Row():
        with gr.Column():
            text_input = gr.Textbox(
                label="输入文本",
                placeholder="请输入要合成为语音的文本...",
                lines=4
            )
            
            voice_select = gr.Dropdown(
                label="选择发音人",
                choices=VOICE_OPTIONS,
                value="Cherry"
            )
            
            language_select = gr.Dropdown(
                label="选择文本语言",
                choices=LANGUAGE_OPTIONS,
                value="Auto"
            )
            
            generate_btn = gr.Button("生成语音", variant="primary")
        
        with gr.Column():
            audio_output = gr.Audio(label="生成的语音")
    
    examples = gr.Examples(
        examples=[
            ["你好，我是通义千问，很高兴认识你。", "Cherry", "Chinese"],
            ["Hello, this is a text-to-speech demo", "Jennifer", "English"],
            ["こんにちは、これはデモです", "Cherry", "Japanese"],
        ],
        inputs=[text_input, voice_select, language_select],
        label="示例文本"
    )

    generate_btn.click(
        fn=tts_interface,
        inputs=[text_input, voice_select, language_select],
        outputs=audio_output
    )

if __name__ == "__main__":
    demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
