import pyaudio
import dashscope
from dashscope.audio.asr import *
import os
from openai import OpenAI

from tests import config

# 配置API密钥
dashscope.api_key = config.modeConfig["api_key"]
openai_client = OpenAI(
    api_key=config.modeConfig["api_key"],
    base_url=config.modeConfig["base_url"],
)

mic = None
stream = None
# 存储识别到的文本
recognized_text = ""


class Callback(TranslationRecognizerCallback):
    def on_open(self) -> None:
        global mic
        global stream
        print("TranslationRecognizerCallback open.")
        mic = pyaudio.PyAudio()
        stream = mic.open(
            format=pyaudio.paInt16, channels=1, rate=16000, input=True
        )

    def on_close(self) -> None:
        global mic
        global stream
        print("TranslationRecognizerCallback close.")
        if stream:
            stream.stop_stream()
            stream.close()
        if mic:
            mic.terminate()
        stream = None
        mic = None

    def on_event(
            self,
            request_id,
            transcription_result: TranscriptionResult,
            translation_result: TranslationResult,
            usage,
    ) -> None:
        global recognized_text
        print("\nrequest id: ", request_id)
        print("usage: ", usage)

        # 获取识别到的文本
        if transcription_result is not None:
            print("sentence id: ", transcription_result.sentence_id)
            print("识别结果: ", transcription_result.text)
            recognized_text = transcription_result.text

        # 当检测到语音结束时，调用OpenAI API
        if transcription_result and transcription_result.vad_pre_end:
            print("\n语音输入结束，正在调用AI处理...")
            self.call_openai_api(recognized_text)

    def call_openai_api(self, text):
        """调用OpenAI API处理识别到的文本"""
        try:
            # 构建消息，包含用户的语音识别结果
            messages = [
                {"role": "system", "content": "你是一个智能助手，会根据用户的问题提供帮助。"},
                {"role": "user", "content": text}
            ]

            completion = openai_client.chat.completions.create(
                model=config.modeConfig["model"],
                messages=messages
            )

            response = completion.choices[0].message.content
            print(f"\nAI回复: {response}")

        except Exception as e:
            print(f"\nAPI调用错误: {e}")
            print("请参考文档：https://help.aliyun.com/zh/model-studio/developer-reference/error-code")


# 初始化并启动语音识别
callback = Callback()
translator = TranslationRecognizerChat(
    model="gummy-chat-v1",
    format="pcm",
    sample_rate=16000,
    transcription_enabled=True,
    translation_enabled=True,
    translation_target_languages=["en"],
    callback=callback,
)

try:
    translator.start()
    print("请您通过麦克风讲话，说完后将自动处理...")
    while True:
        if stream:
            data = stream.read(3200, exception_on_overflow=False)
            if not translator.send_audio_frame(data):
                print("处理结束，停止发送音频")
                break
        else:
            break
except KeyboardInterrupt:
    print("\n用户中断程序")
finally:
    translator.stop()
    print("程序已退出")
