import pyaudio
import wave
import numpy as np
import dashscope
from dashscope.audio.asr import Recognition, RecognitionCallback, RecognitionResult
import os
import time
import threading
import queue

class STTCallback(RecognitionCallback):
    def __init__(self):
        super().__init__()
        self.text_queue = queue.Queue()

    def on_open(self) -> None:
        print('Recognition service connected.')

    def on_close(self) -> None:
        print('Recognition service disconnected.')

    def on_complete(self) -> None:
        print('Recognition completed.')

    def on_error(self, message) -> None:
        print(f'Recognition error: {message.message}')

    def on_event(self, result: RecognitionResult) -> None:
        sentence = result.get_sentence()
        if 'text' in sentence:
            text = sentence['text']
            print(f'Recognized text: {text}')
            self.text_queue.put(text)

class LocalSTTClient:
    def __init__(self, api_key=None):
        # Initialize DashScope API
        if api_key:
            dashscope.api_key = api_key
        elif 'DASHSCOPE_API_KEY' in os.environ:
            dashscope.api_key = "sk-19f50359761f47b78e2082e936f5b69c"
        else:
            raise ValueError("DashScope API key not provided")

        # Audio recording parameters
        self.CHUNK = 1024
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 16000  # 16kHz
        self.RECORD_SECONDS = 5  # Recording duration in seconds

        # Initialize PyAudio
        self.p = pyaudio.PyAudio()
        
        # Initialize recognition
        self.callback = STTCallback()
        self.recognition = Recognition(
            model='paraformer-realtime-v2',
            format='pcm',
            sample_rate=self.RATE,
            semantic_punctuation_enabled=True,
            callback=self.callback
        )

    def record_and_recognize(self):
        try:
            # Start recognition service
            self.recognition.start()
            
            # Open audio stream
            stream = self.p.open(
                format=self.FORMAT,
                channels=self.CHANNELS,
                rate=self.RATE,
                input=True,
                frames_per_buffer=self.CHUNK
            )

            print("* Recording started. Press Ctrl+C to stop.")

            # Record and send audio data
            while True:
                data = stream.read(self.CHUNK)
                self.recognition.send_audio_frame(data)

        except KeyboardInterrupt:
            print("\n* Recording stopped.")
        except Exception as e:
            print(f"Error: {str(e)}")
        finally:
            # Clean up
            stream.stop_stream()
            stream.close()
            self.recognition.stop()
            self.p.terminate()

    def get_recognized_text(self):
        """Get the latest recognized text from the queue"""
        try:
            return self.callback.text_queue.get_nowait()
        except queue.Empty:
            return None

def main():
    # Initialize client
    client = LocalSTTClient(api_key="sk-19f50359761f47b78e2082e936f5b69c")
    
    # Start recording and recognition in a separate thread
    record_thread = threading.Thread(target=client.record_and_recognize)
    record_thread.start()

    try:
        while True:
            # Get and print recognized text
            text = client.get_recognized_text()
            if text:
                print(f"Latest recognition: {text}")
            time.sleep(0.1)
    except KeyboardInterrupt:
        print("\nExiting...")
        client.p.terminate()

if __name__ == "__main__":
    main() 