# 依赖：dashscope >= 1.23.9，pyaudio。
import time
import base64
import sys
import threading
import pyaudio
from dashscope.audio.qwen_omni import *
import dashscope

# 如果没有设置环境变量，请用您的 API Key 将下行替换为 dashscope.api_key = "sk-xxx"
dashscope.api_key = "sk-9ad5ca53493f4d7e822aa384e84c51fe"
voice = 'Cherry'

class MyCallback(OmniRealtimeCallback):
    """最简回调：建立连接时初始化扬声器，事件中直接播放返回音频。"""
    def __init__(self, ctx):
        super().__init__()
        self.ctx = ctx

    def on_open(self) -> None:
        # 连接建立后初始化 PyAudio 与扬声器(24k/mono/16bit)
        print('connection opened')
        try:
            self.ctx['pya'] = pyaudio.PyAudio()
            self.ctx['out'] = self.ctx['pya'].open(
                format=pyaudio.paInt16,
                channels=1,
                rate=24000,
                output=True
            )
            print('audio output initialized')
        except Exception as e:
            print('[Error] audio init failed: {}'.format(e))

    def on_close(self, close_status_code, close_msg) -> None:
        print('connection closed with code: {}, msg: {}'.format(close_status_code, close_msg))
        sys.exit(0)

    def on_event(self, response: str) -> None:
        try:
            t = response['type']
            handlers = {
                'session.created': lambda r: print('start session: {}'.format(r['session']['id'])),
                'conversation.item.input_audio_transcription.completed': lambda r: print('question: {}'.format(r['transcript'])),
                'response.audio_transcript.delta': lambda r: print('llm text: {}'.format(r['delta'])),
                'response.audio.delta': self._play_audio,
                'response.done': self._response_done,
            }
            h = handlers.get(t)
            if h:
                h(response)
        except Exception as e:
            print('[Error] {}'.format(e))

    def _play_audio(self, response):
        global first_audio_received, start
        if first_audio_received:
            end = time.time()
            first_audio_received = False
            print('音频首包延迟: {} ms'.format(int((end - start) * 1000)))

        # 直接解码base64并写入输出流进行播放
        if self.ctx['out'] is None:
            return
        try:
            data = base64.b64decode(response['delta'])
            self.ctx['out'].write(data)
        except Exception as e:
            print('[Error] audio playback failed: {}'.format(e))

    def _response_done(self, response):
        # 标记本轮对话完成，用于主循环等待
        if self.ctx['conv'] is not None:
            print('[Metric] response: {}, first text delay: {}, first audio delay: {}'.format(
                self.ctx['conv'].get_last_response_id(),
                self.ctx['conv'].get_last_first_text_delay(),
                self.ctx['conv'].get_last_first_audio_delay(),
            ))
        if self.ctx['resp_done'] is not None:
            self.ctx['resp_done'].set()

def shutdown_ctx(ctx):
    """安全释放音频与PyAudio资源。"""
    try:
        if ctx['out'] is not None:
            ctx['out'].close()
            ctx['out'] = None
    except Exception:
        pass
    try:
        if ctx['pya'] is not None:
            ctx['pya'].terminate()
            ctx['pya'] = None
    except Exception:
        pass


def record_until_enter(pya_inst: pyaudio.PyAudio, sample_rate=16000, chunk_size=3200):
    """按 Enter 停止录音，返回PCM字节。"""
    frames = []
    stop_evt = threading.Event()

    stream = pya_inst.open(
        format=pyaudio.paInt16,
        channels=1,
        rate=sample_rate,
        input=True,
        frames_per_buffer=chunk_size
    )

    def _reader():
        while not stop_evt.is_set():
            try:
                frames.append(stream.read(chunk_size, exception_on_overflow=False))
            except Exception:
                break

    t = threading.Thread(target=_reader, daemon=True)
    t.start()
    input()  # 用户再次按 Enter 停止录音
    stop_evt.set()
    t.join(timeout=1.0)
    try:
        stream.close()
    except Exception:
        pass
    return b''.join(frames)


if __name__  == '__main__':
    global first_audio_received, start
    
    print('Initializing ...')
    # 运行时上下文：存放音频与会话句柄
    ctx = {'pya': None, 'out': None, 'conv': None, 'resp_done': threading.Event()}
    callback = MyCallback(ctx)
    conversation = OmniRealtimeConversation(
        model='qwen3-omni-flash-realtime',
        callback=callback,
        # 以下为北京地域url，若使用新加坡地域的模型，需将url替换为：wss://dashscope-intl.aliyuncs.com/api-ws/v1/realtime
        url="wss://dashscope.aliyuncs.com/api-ws/v1/realtime",
    )
    try:
        conversation.connect()
    except Exception as e:
        print('[Error] connect failed: {}'.format(e))
        sys.exit(1)

    ctx['conv'] = conversation
    # 会话配置：启用文本+音频输出（禁用服务端VAD，改为手动录音）
    conversation.update_session(
        output_modalities=[MultiModality.AUDIO, MultiModality.TEXT],
        voice=voice,
        input_audio_format=AudioFormat.PCM_16000HZ_MONO_16BIT,
        output_audio_format=AudioFormat.PCM_24000HZ_MONO_16BIT,
        enable_input_audio_transcription=True,
        # 对输入音频做语音转录的模型，仅支持gummy-realtime-v1
        input_audio_transcription_model='gummy-realtime-v1',
        enable_turn_detection=False,
        instructions="你是个人助理小云，请你准确且友好地解答用户的问题，始终以乐于助人的态度回应。"
    )

    try:
        turn = 1
        while True:
            first_audio_received = True

            print(f"\n--- 第 {turn} 轮对话 ---")
            print("按 Enter 开始录音（输入 q 回车退出）...")
            user_input = input()
            if user_input.strip().lower() in ['q', 'quit']:
                print("用户请求退出...")
                break
            print("录音中... 再次按 Enter 停止录音。")
            if ctx['pya'] is None:
                ctx['pya'] = pyaudio.PyAudio()
            recorded = record_until_enter(ctx['pya'])
            if not recorded:
                print("未录制到有效音频，请重试。")
                continue
            print(f"成功录制音频: {len(recorded)} 字节，发送中...")

            start = time.time()

            # 以3200字节为块发送（对应16k/16bit/100ms）
            chunk_size = 3200
            for i in range(0, len(recorded), chunk_size):
                chunk = recorded[i:i+chunk_size]
                conversation.append_audio(base64.b64encode(chunk).decode('ascii'))

            print("发送完成，等待模型响应...")
            ctx['resp_done'].clear()
            conversation.commit()
            conversation.create_response()
            ctx['resp_done'].wait()
            print('播放音频完成')
            turn += 1
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    finally:
        shutdown_ctx(ctx)
        print("程序退出")