import os
from openai import OpenAI
from dotenv import load_dotenv, find_dotenv
import base64
import numpy as np
import soundfile as sf
from pydantic import BaseModel

load_dotenv(find_dotenv(), override=True)
api_key = os.getenv("DASHSCOPE_API_KEY")
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"


class QwenOmniTurbo:
    def __init__(self, api_key=None, base_url=None):
        self.api_key = api_key or os.getenv("DASHSCOPE_API_KEY")
        self.base_url = base_url or "https://dashscope.aliyuncs.com/compatible-mode/v1"

        if not self.api_key:
            raise ValueError(
                "API key is not provided and DASHSCOPE_API_KEY environment variable is not set."
            )

        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url,
        )

    def chat_completion(
        self,
        messages,
        modalities=["text", "audio"],
        audio_params={"voice": "Cherry", "format": "wav"},
        output_audio_path=None,
    ):
        completion = self.client.chat.completions.create(
            model="qwen-omni-turbo-0119",
            messages=messages,
            modalities=modalities,
            audio=audio_params,
            stream=True,
            stream_options={"include_usage": True},
        )

        audio_string = ""
        text_response = ""
        usage_data = None

        for chunk in completion:
            if chunk.choices:
                delta = chunk.choices[0].delta
                if hasattr(delta, "audio") and delta.audio and "data" in delta.audio:
                    audio_string += delta.audio["data"]
                elif hasattr(delta, "content") and delta.content:
                    text_response += delta.content
                    print(delta.content, end="")  # Stream text output
            elif hasattr(chunk, "usage"):
                usage_data = chunk.usage
                print(f"\nUsage: {usage_data}")

        if audio_string and output_audio_path:
            try:
                output_dir = os.path.dirname(output_audio_path)
                if output_dir and not os.path.exists(output_dir):
                    os.makedirs(output_dir)

                wav_bytes = base64.b64decode(audio_string)
                audio_np = np.frombuffer(wav_bytes, dtype=np.int16)
                sf.write(output_audio_path, audio_np, samplerate=24000)
                print(f"\nAudio saved to {output_audio_path}")
                return {
                    "text": text_response,
                    "audio_file": output_audio_path,
                    "usage": usage_data,
                }
            except Exception as e:
                print(f"\nError saving audio: {e}")
                return {
                    "text": text_response,
                    "audio_data": audio_string,
                    "error": str(e),
                    "usage": usage_data,
                }

        return {"text": text_response, "usage": usage_data}


if __name__ == "__main__":
    # 示例用法
    try:
        qwen_turbo = QwenOmniTurbo()

        # 1. 音频输入和文本输出 + 音频输出
        print("\n--- Test Case 1: Audio Input, Text and Audio Output ---")
        messages_audio_input = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "input_audio",
                        "input_audio": {
                            "data": "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250211/tixcef/cherry.wav",
                            "format": "wav",
                        },
                    },
                    {
                        "type": "text",
                        "text": "这段音频在说什么？请用甜美的声音回答我。",
                    },
                ],
            },
        ]
        response1 = qwen_turbo.chat_completion(
            messages=messages_audio_input,
            modalities=["text", "audio"],
            audio_params={"voice": "Cherry", "format": "wav"},  # 尝试不同的声音
            output_audio_path=".output/test_audio_output_from_class.wav",
        )
        # print(f"Response 1: {response1}")

        # 2. 纯文本输入和文本输出
        print("\n\n--- Test Case 2: Text Input, Text Output ---")
        messages_text_input = [{"role": "user", "content": "你好，请介绍一下你自己。"}]
        response2 = qwen_turbo.chat_completion(
            messages=messages_text_input,
            modalities=["text"],  # 只要求文本输出
        )
        # print(f"Response 2: {response2}")

        # 3. 文本输入，但要求音频输出 (取决于模型能力)
        print("\n\n--- Test Case 3: Text Input, Audio Output (TTS) ---")
        messages_tts = [
            {"role": "user", "content": "把这句话读出来：欢迎使用通义千问大模型。"}
        ]
        response3 = qwen_turbo.chat_completion(
            messages=messages_tts,
            modalities=["text", "audio"],  # 请求文本和音频
            audio_params={"voice": "Ethan", "format": "mp3"},  # 尝试mp3输出
            output_audio_path=".output/test_tts_output_from_class.mp3",
        )
        # print(f"Response 3: {response3}")

    except ValueError as e:
        print(f"Configuration Error: {e}")
    except Exception as e:
        print(f"An unexpected error occurred: {e}")
