#!/usr/bin/env python3
"""
Real-time Microphone Transcription using FunASR
Supports both Chinese and English speech recognition with streaming capability
"""

import numpy as np
import pyaudio
import threading
import queue
import time
import sys
from typing import Optional
import signal

try:
    from funasr import AutoModel
except ImportError:
    print("Please install FunASR: pip install funasr")
    sys.exit(1)

class MicrophoneStreamer:
    def __init__(self, 
                 sample_rate: int = 16000,
                 chunk_duration: float = 0.6,  # 600ms chunks
                 channels: int = 1,
                 format: int = pyaudio.paInt16):
        """
        Initialize microphone streamer
        
        Args:
            sample_rate: Audio sample rate (16kHz for FunASR)
            chunk_duration: Duration of each audio chunk in seconds
            channels: Number of audio channels (1 for mono)
            format: Audio format
        """
        self.sample_rate = sample_rate
        self.chunk_duration = chunk_duration
        self.chunk_size = int(sample_rate * chunk_duration)  # 9600 samples for 600ms
        self.channels = channels
        self.format = format
        
        # Audio streaming
        self.audio = pyaudio.PyAudio()
        self.stream: Optional[pyaudio.Stream] = None
        self.audio_queue = queue.Queue()
        self.is_recording = False
        
        # Threading
        self.record_thread: Optional[threading.Thread] = None
        
    def list_audio_devices(self):
        """List available audio input devices"""
        print("Available audio input devices:")
        for i in range(self.audio.get_device_count()):
            device_info = self.audio.get_device_info_by_index(i)
            if device_info['maxInputChannels'] > 0:
                print(f"  Device {i}: {device_info['name']} "
                      f"(Channels: {device_info['maxInputChannels']}, "
                      f"Sample Rate: {device_info['defaultSampleRate']})")
    
    def start_recording(self, device_index: Optional[int] = None):
        """Start recording from microphone"""
        try:
            self.stream = self.audio.open(
                format=self.format,
                channels=self.channels,
                rate=self.sample_rate,
                input=True,
                input_device_index=device_index,
                frames_per_buffer=self.chunk_size,
                stream_callback=self._audio_callback
            )
            
            self.is_recording = True
            self.stream.start_stream()
            print(f"🎤 Recording started (Sample Rate: {self.sample_rate}Hz, Chunk: {self.chunk_duration}s)")
            
        except Exception as e:
            print(f"❌ Error starting recording: {e}")
            self.stop_recording()
    
    def _audio_callback(self, in_data, frame_count, time_info, status):
        """Callback function for audio stream"""
        if status:
            print(f"⚠️  Audio callback status: {status}")
        
        # Convert audio data to numpy array
        audio_data = np.frombuffer(in_data, dtype=np.int16).astype(np.float32) / 32768.0
        self.audio_queue.put(audio_data)
        
        return (None, pyaudio.paContinue)
    
    def get_audio_chunk(self) -> Optional[np.ndarray]:
        """Get the next audio chunk from the queue"""
        try:
            return self.audio_queue.get(timeout=1.0)
        except queue.Empty:
            return None
    
    def stop_recording(self):
        """Stop recording and cleanup"""
        self.is_recording = False
        
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            
        self.audio.terminate()
        print("🛑 Recording stopped")

class FunASRTranscriber:
    def __init__(self, 
                 model_name: str = "paraformer-zh-streaming",
                 language: str = "zh",
                 device: str = "cpu"):
        """
        Initialize FunASR transcriber
        
        Args:
            model_name: Model to use for transcription
            language: Language for transcription ("zh" or "en")
            device: Device to run on ("cpu" or "cuda")
        """
        self.model_name = model_name
        self.language = language
        self.device = device
        
        # Streaming parameters based on FunASR documentation
        self.chunk_size = [0, 10, 5]  # [0, 10, 5] = 600ms real-time, 300ms lookahead
        self.encoder_chunk_look_back = 4
        self.decoder_chunk_look_back = 1
        self.chunk_stride = self.chunk_size[1] * 960  # 600ms stride
        
        # Initialize model
        print(f"🔄 Loading model: {model_name}")
        try:
            self.model = AutoModel(model=model_name, device=device)
            print(f"✅ Model loaded successfully on {device}")
        except Exception as e:
            print(f"❌ Error loading model: {e}")
            raise
        
        # Cache for streaming state
        self.cache = {}
        self.chunk_count = 0
    
    def transcribe_chunk(self, audio_chunk: np.ndarray, is_final: bool = False) -> str:
        """
        Transcribe a single audio chunk
        
        Args:
            audio_chunk: Audio data as numpy array
            is_final: Whether this is the final chunk
            
        Returns:
            Transcribed text
        """
        try:
            res = self.model.generate(
                input=audio_chunk,
                cache=self.cache,
                is_final=is_final,
                chunk_size=self.chunk_size,
                encoder_chunk_look_back=self.encoder_chunk_look_back,
                decoder_chunk_look_back=self.decoder_chunk_look_back
            )
            
            self.chunk_count += 1
            
            # Extract text from result
            if res and len(res) > 0 and 'text' in res[0]:
                text = res[0]['text'].strip()
                return text
            return ""
            
        except Exception as e:
            print(f"❌ Transcription error: {e}")
            return ""
    
    def reset_cache(self):
        """Reset the streaming cache"""
        self.cache = {}
        self.chunk_count = 0

class RealTimeTranscriber:
    def __init__(self, 
                 model_name: str = "paraformer-zh-streaming",
                 language: str = "zh",
                 device: str = "cpu",
                 microphone_device: Optional[int] = None):
        """
        Initialize real-time transcriber
        
        Args:
            model_name: FunASR model name
            language: Language for transcription
            device: Device to run model on
            microphone_device: Microphone device index
        """
        self.microphone = MicrophoneStreamer()
        self.transcriber = FunASRTranscriber(model_name, language, device)
        self.microphone_device = microphone_device
        self.is_running = False
        
    def list_microphones(self):
        """List available microphone devices"""
        self.microphone.list_audio_devices()
    
    def start(self):
        """Start real-time transcription"""
        print("🚀 Starting real-time transcription...")
        print("Press Ctrl+C to stop")
        
        # Setup signal handler for graceful shutdown
        signal.signal(signal.SIGINT, self._signal_handler)
        
        try:
            # Start microphone recording
            self.microphone.start_recording(self.microphone_device)
            self.is_running = True
            
            # Transcription loop
            last_text = ""
            silence_count = 0
            
            while self.is_running:
                # Get audio chunk
                audio_chunk = self.microphone.get_audio_chunk()
                
                if audio_chunk is not None:
                    # Check if audio has significant content (simple energy-based VAD)
                    energy = np.mean(np.abs(audio_chunk))
                    
                    if energy > 0.01:  # Threshold for speech detection
                        silence_count = 0
                        
                        # Transcribe chunk
                        text = self.transcriber.transcribe_chunk(audio_chunk)
                        
                        if text and text != last_text:
                            print(f"🗣️  [{time.strftime('%H:%M:%S')}] {text}")
                            last_text = text
                    else:
                        silence_count += 1
                        
                        # Reset cache after period of silence
                        if silence_count > 10:  # ~6 seconds of silence
                            if last_text:
                                print(f"📝 Final: {last_text}")
                                last_text = ""
                            self.transcriber.reset_cache()
                            silence_count = 0
                
                time.sleep(0.01)  # Small delay to prevent excessive CPU usage
                
        except KeyboardInterrupt:
            pass
        finally:
            self.stop()
    
    def _signal_handler(self, signum, frame):
        """Handle Ctrl+C gracefully"""
        print("\n🛑 Stopping transcription...")
        self.is_running = False
    
    def stop(self):
        """Stop transcription and cleanup"""
        self.is_running = False
        self.microphone.stop_recording()
        print("✅ Transcription stopped")

def main():
    """Main function"""
    print("🎯 FunASR Real-time Microphone Transcription")
    print("=" * 50)
    
    # Configuration
    MODEL_NAME = "paraformer-zh-streaming"  # Change to "paraformer-en-streaming" for English
    LANGUAGE = "zh"  # Change to "en" for English
    DEVICE = "cuda"  # Change to "cuda" if you have GPU support
    
    try:
        # Create transcriber
        transcriber = RealTimeTranscriber(
            model_name=MODEL_NAME,
            language=LANGUAGE,
            device=DEVICE
        )
        
        # List available microphones
        print("\n📱 Available microphones:")
        transcriber.list_microphones()
        
        # Ask user to select microphone (optional)
        print(f"\n🎤 Using default microphone (or specify device index)")
        print("Starting in 3 seconds...")
        time.sleep(3)
        
        # Start transcription
        transcriber.start()
        
    except Exception as e:
        print(f"❌ Error: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main() 