import pyaudio
import websocket
from dashscope.audio.asr import (Recognition, RecognitionCallback,
                                 RecognitionResult)
import logging
import sys
import queue
import asyncio

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler(sys.stdout)],
)
from fastapi.websockets import WebSocket

class AudioRecognition:
    def __init__(self, model, format, sample_rate, chunk_size=3200):
        self.mic = None
        self.stream = None
        self.model = model
        self.format = format
        self.sample_rate = sample_rate
        self.chunk_size = chunk_size
        self.callback = self.RecognitionCallback(self)
        self.recognition = Recognition(
            model=self.model,
            format=self.format,
            sample_rate=self.sample_rate,
            callback=self.callback,
        )
        self.queue = queue.Queue()

    class RecognitionCallback(RecognitionCallback):
        def __init__(self, parent):
            super().__init__()
            self.parent = parent
        def on_open(self) -> None:
            logging.info("RecognitionCallback: WebSocket 连接已建立 (on_open)")
            self.parent.start_mic_stream()
        def on_event(self, result: RecognitionResult) -> None:
            sentence = result.get_sentence()
            logging.info(f"RecognitionCallback: 收到回复: {sentence} (on_event)")
            # 将数据放入队列
            self.parent.queue.put(sentence)
        def on_close(self) -> None:
            logging.info("RecognitionCallback: WebSocket 连接已关闭 (on_close)")
            self.parent.stop_mic_stream()

    def start_mic_stream(self):
        try:
            self.mic = pyaudio.PyAudio()
            self.stream = self.mic.open(
                format=pyaudio.paInt16,
                channels=1,
                rate=self.sample_rate,
                input=True,
                frames_per_buffer=self.chunk_size,
            )
            logging.info("Microphone stream started.")
        except Exception as e:
            logging.error(f"Failed to start microphone stream: {e}")
            self.stop_mic_stream()

    def stop_mic_stream(self):
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            self.stream = None
        if self.mic:
            self.mic.terminate()
            self.mic = None
        logging.info("Microphone stream stopped.")

    def start_recognition(self):
        logging.info("Starting recognition...")
        self.recognition.start()

        try:
            while True:
                if self.stream:
                    data = self.stream.read(self.chunk_size, exception_on_overflow=False)
                    self.recognition.send_audio_frame(data)
        except KeyboardInterrupt:
            logging.info("Keyboard interrupt detected. Stopping recognition...")
        except Exception as e:
            logging.error(f"An error occurred during recognition: {e}")
        finally:
            self.stop_recognition()

    def stop_recognition(self):
        logging.info("Stopping recognition...")
        self.recognition.stop()
        self.stop_mic_stream()


if __name__ == '__main__':
    import dashscope
    dashscope.api_key='sk-859fc1e0370a48388274de322457f192'
    logging.info("Audio recognition started.")
    audio_recognition = AudioRecognition(
        model='paraformer-realtime-v2',
        format='pcm',
        sample_rate=16000,
        chunk_size=3200,
    )
    audio_recognition.start_recognition()
