import gradio as gr
import datetime
import base64
import numpy as np
import dashscope
import os
from dotenv import load_dotenv

# 加载 .env 文件
load_dotenv()

API_KEY = os.environ.get('API_KEY')
if not API_KEY:
    raise ValueError("请设置 API_KEY 环境变量或在 .env 文件中配置")

print(f"API_KEY 已设置: {API_KEY[:10]}...")

VOICE_OPTIONS = [
    "Cherry",
    "Ethan", 
    "Jennifer",
    "Ryan",
    "Katerina",
    "Nofish",
    "Elias",
    "Li", 
    "Marcus", 
    "Roy", 
    "Peter", 
    "Eric", 
    "Rocky",
    "Kiki",
    "Sunny",
    "Jada",
    "Dylan",
]

LANGUAGE_OPTIONS = [
    "Auto",
    "English", 
    "Chinese", 
    "German", 
    "Italian", 
    "Portuguese", 
    "Spanish", 
    "Japanese", 
    "Korean", 
    "French", 
    "Russian"
]

def tts_interface(text, voice_name, language):
    print(f"开始处理: text='{text}', voice='{voice_name}', language='{language}'")
    
    if not text or not text.strip():
        print("错误: 文本为空")
        return None
    
    try:
        print("调用 dashscope API...")
        responses = dashscope.MultiModalConversation.call(
            api_key=API_KEY,
            model="qwen3-tts-flash",
            text=text.strip(),
            voice=voice_name,
            stream=True,
            language_type=language
        )
        
        print("API 调用成功，开始处理音频数据...")
        audio_frames = []
        
        for chunk in responses:
            try:
                if hasattr(chunk, 'output') and hasattr(chunk.output, 'audio') and hasattr(chunk.output.audio, 'data'):
                    audio_string = chunk.output.audio.data
                    if audio_string:
                        wav_bytes = base64.b64decode(audio_string)
                        audio_np = np.frombuffer(wav_bytes, dtype=np.int16).astype(np.float32) / 32768.0
                        audio_frames.append(audio_np)
                        print(f"处理音频块，大小: {len(audio_np)}")
                else:
                    print(f"跳过无效块: {chunk}")
            except Exception as e:
                print(f"处理音频块时出错: {e}")
                continue

        if audio_frames:
            full_audio = np.concatenate(audio_frames)
            sample_rate = 24000
            print(f"音频生成成功，总长度: {len(full_audio)}")
            return (sample_rate, full_audio)
        else:
            print("错误: 没有生成音频数据")
            return None
    
    except Exception as e:
        print(f"API 调用失败: {e}")
        import traceback
        traceback.print_exc()
        return None

# 创建简化的界面
with gr.Blocks(title="Qwen3 TTS Demo") as demo:
    gr.Markdown("# 🎤 Qwen3-TTS Demo (调试版本)")
    
    with gr.Row():
        with gr.Column():
            text_input = gr.Textbox(
                label="输入文本",
                placeholder="请输入要合成为语音的文本...",
                lines=4,
                value="你好，这是一个测试。"
            )
            
            voice_select = gr.Dropdown(
                label="选择发音人",
                choices=VOICE_OPTIONS,
                value="Cherry"
            )
            
            language_select = gr.Dropdown(
                label="选择文本语言",
                choices=LANGUAGE_OPTIONS,
                value="Chinese"
            )
            
            generate_btn = gr.Button("生成语音", variant="primary")
            
            status_output = gr.Textbox(
                label="状态信息",
                lines=3,
                interactive=False
            )
        
        with gr.Column():
            audio_output = gr.Audio(label="生成的语音")

    def process_with_status(text, voice_name, language):
        print(f"用户输入: {text}")
        result = tts_interface(text, voice_name, language)
        if result:
            status = "✅ 音频生成成功！"
        else:
            status = "❌ 音频生成失败，请检查控制台输出"
        return result, status

    generate_btn.click(
        fn=process_with_status,
        inputs=[text_input, voice_select, language_select],
        outputs=[audio_output, status_output]
    )

if __name__ == "__main__":
    print("启动 Gradio 应用...")
    demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
