# 运行前的准备工作:
# 运行下列命令安装第三方依赖
# pip install numpy soundfile openai

import time
import base64
import soundfile as sf
import numpy as np
from openai import OpenAI
import pyaudio

client = OpenAI(
    api_key="sk-ef60db9070b543919b8d8e2e6fff955a",  # 填写自己的API Key
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

sample_rate = 24000
pa=pyaudio.PyAudio()
stream=pa.open(
    format=pyaudio.paInt16,
    channels=1,
    rate=sample_rate,
    output=True
)

#  Base64 编码格式
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

start=time.time()
first_text_receice=False
first_audio_receice=False

# # 文本输入，输出文本+音频
# completion = client.chat.completions.create(
#     model="qwen3-omni-flash", # 模型为Qwen3-Omni-Flash时，请在非思考模式下运行
#     messages=[
#         {
#             "role": "user",
#             "content": [
#                 {"type": "text", "text": "你是谁"},
#             ],
#         },
#     ],
#     # 设置输出数据的模态，当前支持两种：["text","audio"]、["text"]
#     modalities=["text", "audio"],
#     audio={"voice": "Cherry", "format": "wav"},
#     # stream 必须设置为 True，否则会报错
#     stream=True,
#     stream_options={
#         "include_usage": True
#     }
# )

# # 文本+图片输入，输出文本+音频
# image_file = "pig.jpg"
# completion = client.chat.completions.create(
#     model="qwen3-omni-flash", # 模型为Qwen3-Omni-Flash时，请在非思考模式下运行
#     messages=[
#         {
#             "role": "user",
#             "content": [
#                 {
#                     "type": "image_url",
#                     "image_url": {
#                          "url": f"data:image/png;base64,{encode_image(image_file)}",
#                     },
#                 },
#                 {"type": "text", "text": "图中描绘的是什么景象？"},
#             ],
#         },
#     ],
#     # 设置输出数据的模态，当前支持两种：["text","audio"]、["text"]
#     modalities=["text", "audio"],
#     audio={"voice": "Cherry", "format": "wav"},
#     # stream 必须设置为 True，否则会报错
#     stream=True,
#     stream_options={
#         "include_usage": True
#     }
# )

# # 文本+音频输入，输出文本+音频
# wav_file="pig.wav"
# completion = client.chat.completions.create(
#     model="qwen3-omni-flash", # 模型为Qwen3-Omni-Flash时，请在非思考模式下运行
#     messages=[
#         {
#             "role": "user",
#             "content": [
#                 {
#                     "type": "input_audio",
#                     "input_audio": {
#                          "data": f"data:;base64,{encode_image(wav_file)}",
#                          "format": "wav",
#                     },
#                 },
#                 {"type": "text", "text": "这段音频在说什么？"},
#             ],
#         },
#     ],
#     # 设置输出数据的模态，当前支持两种：["text","audio"]、["text"]
#     modalities=["text", "audio"],
#     audio={"voice": "Cherry", "format": "wav"},
#     # stream 必须设置为 True，否则会报错
#     stream=True,
#     stream_options={
#         "include_usage": True
#     }
# )

# 文本+视频输入，输出文本+音频
video_file="dance.mp4"
completion = client.chat.completions.create(
    model="qwen3-omni-flash", # 模型为Qwen3-Omni-Flash时，请在非思考模式下运行
    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "video_url",
                    "video_url": {
                         "url": f"data:;base64,{encode_image(video_file)}",
                    },
                },
                {"type": "text", "text": "这段视频内容是什么？"},
            ],
        },
    ],
    # 设置输出数据的模态，当前支持两种：["text","audio"]、["text"]
    modalities=["text", "audio"],
    audio={"voice": "Cherry", "format": "wav"},
    # stream 必须设置为 True，否则会报错
    stream=True,
    stream_options={
        "include_usage": True
    }
)

print("模型回复：")
audio_base64_string = ""
for chunk in completion:
    # 处理文本部分
    if chunk.choices and chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="")
        if not first_text_receice:
            end=time.time()
            print(f"\n首包文本接收时间：{(end-start)*1000:.2f} ms")
            first_text_receice=True

    # 收集音频部分
    if chunk.choices and hasattr(chunk.choices[0].delta, "audio") and chunk.choices[0].delta.audio:
        audio_base64_string += chunk.choices[0].delta.audio.get("data", "")

        if not first_audio_receice:
            end=time.time()
            print(f"\n首包音频接收时间：{(end-start)*1000:.2f} ms")
            first_audio_receice=True

        pcm_data = base64.b64decode(chunk.choices[0].delta.audio.get("data", ""))
        stream.write(pcm_data)


stream.stop_stream()
stream.close()
pa.terminate()

# 4. 保存音频文件
if audio_base64_string:
    wav_bytes = base64.b64decode(audio_base64_string)
    audio_np = np.frombuffer(wav_bytes, dtype=np.int16)
    sf.write("audio_assistant.wav", audio_np, samplerate=24000)
    print("\n音频文件已保存至：audio_assistant.wav")