#!/usr/bin/env python3
"""
Voice Activity Detection (VAD) for Jetson Edge Device

This module provides efficient VAD functionality optimized for Jetson devices
running Ubuntu 20.04. It detects human voice in real-time and returns boolean
results indicating whether speech is detected.

Usage:
    python3 vad_detect.py --silero-vad-model silero_vad.onnx

Download the model from:
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
"""

import argparse
import os
import sys
import time
import threading
from pathlib import Path
from typing import Optional, Callable

try:
    import sounddevice as sd
except ImportError:
    print("Please install sounddevice first. You can use:")
    print("  pip install sounddevice")
    print("to install it")
    sys.exit(-1)

import numpy as np
import sherpa_onnx


class VADDetector:
    """
    Voice Activity Detection class optimized for Jetson edge devices.
    
    This class provides real-time voice detection with configurable parameters
    suitable for edge computing scenarios.
    """
    
    def __init__(self, 
                 vad_model_path: str,
                 sample_rate: int = 16000,
                 threshold: float = 0.5,
                 min_silence_duration: float = 0.25,
                 min_speech_duration: float = 0.25,
                 callback: Optional[Callable[[bool], None]] = None):
        """
        Initialize VAD detector.
        
        Args:
            vad_model_path: Path to silero_vad.onnx model file
            sample_rate: Audio sample rate (default: 16000 Hz)
            threshold: VAD threshold (default: 0.5)
            min_silence_duration: Minimum silence duration in seconds
            min_speech_duration: Minimum speech duration in seconds
            callback: Optional callback function called when speech status changes
        """
        if not Path(vad_model_path).is_file():
            raise FileNotFoundError(
                f"{vad_model_path} does not exist. Please download it from "
                "https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx"
            )
        
        self.sample_rate = sample_rate
        self.callback = callback
        self.is_running = False
        self.speech_detected = False
        
        # Configure VAD model
        config = sherpa_onnx.VadModelConfig()
        config.silero_vad.model = vad_model_path
        config.silero_vad.threshold = threshold
        config.silero_vad.min_silence_duration = min_silence_duration
        config.silero_vad.min_speech_duration = min_speech_duration
        config.sample_rate = sample_rate
        
        # Initialize VAD with optimized buffer size for edge devices
        self.vad = sherpa_onnx.VoiceActivityDetector(config, buffer_size_in_seconds=10)
        
        # Audio processing parameters
        self.samples_per_read = int(0.1 * sample_rate)  # 100ms chunks
        self.mic_sample_rate = sample_rate
        
        # Get microphone sample rate from environment if available
        if "SHERPA_ONNX_MIC_SAMPLE_RATE" in os.environ:
            self.mic_sample_rate = int(os.environ.get("SHERPA_ONNX_MIC_SAMPLE_RATE"))
            print(f"Using microphone sample rate: {self.mic_sample_rate} Hz")
    
    def _setup_audio_device(self) -> Optional[int]:
        """
        Setup audio input device.
        
        Returns:
            Device index if successful, None otherwise
        """
        devices = sd.query_devices()
        if len(devices) == 0:
            print("No microphone devices found")
            print("If you are using Linux and you are sure there is a microphone ")
            print("on your system, please check ALSA configuration")
            return None
        
        if "SHERPA_ONNX_MIC_DEVICE" in os.environ:
            device_idx = int(os.environ.get("SHERPA_ONNX_MIC_DEVICE"))
            sd.default.device[0] = device_idx
            print(f'Using selected device: {devices[device_idx]["name"]}')
        else:
            device_idx = sd.default.device[0]
            print(f'Using default device: {devices[device_idx]["name"]}')
        
        return device_idx
    
    def is_speech_detected(self) -> bool:
        """
        Check if speech is currently detected.
        
        Returns:
            True if speech is detected, False otherwise
        """
        return self.speech_detected
    
    def start_detection(self) -> bool:
        """
        Start real-time voice detection.
        
        Returns:
            True if started successfully, False otherwise
        """
        if self.is_running:
            print("Detection is already running")
            return True
        
        device_idx = self._setup_audio_device()
        if device_idx is None:
            return False
        
        self.is_running = True
        self.detection_thread = threading.Thread(target=self._detection_loop)
        self.detection_thread.daemon = True
        self.detection_thread.start()
        
        print("VAD detection started. Press Ctrl+C to stop.")
        print("Audio input is being processed...")
        print("Will print 'true' when human voice is detected.")
        return True
    
    def stop_detection(self):
        """
        Stop voice detection.
        """
        self.is_running = False
        if hasattr(self, 'detection_thread'):
            self.detection_thread.join(timeout=1.0)
        print("VAD detection stopped.")
    
    def _detection_loop(self):
        """
        Main detection loop running in separate thread.
        """
        audio_chunks_processed = 0
        last_status_print = time.time()
        
        try:
            # Get device info to determine proper channel configuration
            devices = sd.query_devices()
            device_idx = sd.default.device[0]
            device_info = devices[device_idx]
            
            # For multi-channel devices, use only 1 channel for VAD
            max_channels = device_info['max_input_channels']
            channels_to_use = 1  # Always use mono for VAD
            
            print(f"Device info: {device_info['name']}")
            print(f"Max channels: {max_channels}, Using: {channels_to_use}")
            
            with sd.InputStream(
                channels=channels_to_use,  # Force mono
                dtype="float32", 
                samplerate=self.mic_sample_rate,
                device=device_idx
            ) as stream:
                print(f"Audio stream opened successfully at {self.mic_sample_rate} Hz")
                
                while self.is_running:
                    samples, _ = stream.read(self.samples_per_read)
                    
                    # Ensure samples are in correct format (flatten if needed)
                    if len(samples.shape) > 1:
                        samples = samples[:, 0]  # Take first channel if multi-channel
                    samples = samples.reshape(-1)
                    
                    audio_chunks_processed += 1
                    
                    # Print periodic status to confirm audio is being processed
                    current_time = time.time()
                    if current_time - last_status_print > 5.0:  # Every 5 seconds
                        print(f"[DEBUG] Audio processing active - {audio_chunks_processed} chunks processed")
                        last_status_print = current_time
                    
                    # Resample if necessary
                    if self.mic_sample_rate != self.sample_rate:
                        try:
                            import librosa
                            samples = librosa.resample(
                                samples, 
                                orig_sr=self.mic_sample_rate, 
                                target_sr=self.sample_rate
                            )
                        except ImportError:
                            print("Warning: librosa not available for resampling")
                            # Simple downsampling for common cases
                            if self.mic_sample_rate == 48000 and self.sample_rate == 16000:
                                samples = samples[::3]  # Simple 3:1 downsampling
                    
                    # Process audio with VAD
                    self.vad.accept_waveform(samples)
                    
                    # Update speech detection status
                    current_detection = self.vad.is_speech_detected()
                    if current_detection != self.speech_detected:
                        self.speech_detected = current_detection
                        if self.callback:
                            self.callback(self.speech_detected)
                        
                        # Print only when speech is detected
                        if self.speech_detected:
                            print("true")
                        # No output when silence is detected
                    
                    # Clean up processed segments (optional, for memory efficiency)
                    while not self.vad.empty():
                        self.vad.pop()
        
        except Exception as e:
            print(f"Error in detection loop: {e}")
            print(f"Trying fallback device configuration...")
            # Try with different channel configuration
            self._fallback_detection_loop()
    def _fallback_detection_loop(self):
        """
        Fallback detection loop with alternative device configurations.
        """
        print("Trying fallback configurations...")
        
        # Try different device configurations
        fallback_configs = [
            {'device': None, 'channels': 1},  # Default device, mono
            {'device': 'default', 'channels': 1},  # Explicit default
            {'device': 27, 'channels': 1},  # Default device by index
        ]
        
        for i, config in enumerate(fallback_configs):
            try:
                print(f"Trying config {i+1}: device={config['device']}, channels={config['channels']}")
                
                with sd.InputStream(
                    channels=config['channels'],
                    dtype="float32", 
                    samplerate=self.mic_sample_rate,
                    device=config['device']
                ) as stream:
                    print(f"✅ Fallback config {i+1} successful!")
                    
                    # Run detection with this configuration
                    while self.is_running:
                        samples, _ = stream.read(self.samples_per_read)
                        
                        if len(samples.shape) > 1:
                            samples = samples[:, 0]
                        samples = samples.reshape(-1)
                        
                        # Resample if necessary
                        if self.mic_sample_rate != self.sample_rate:
                            try:
                                import librosa
                                samples = librosa.resample(
                                    samples, 
                                    orig_sr=self.mic_sample_rate, 
                                    target_sr=self.sample_rate
                                )
                            except ImportError:
                                if self.mic_sample_rate == 48000 and self.sample_rate == 16000:
                                    samples = samples[::3]
                        
                        # Process with VAD
                        self.vad.accept_waveform(samples)
                        
                        current_detection = self.vad.is_speech_detected()
                        if current_detection != self.speech_detected:
                            self.speech_detected = current_detection
                            if self.callback:
                                self.callback(self.speech_detected)
                            
                            if self.speech_detected:
                                print("true")
                        
                        while not self.vad.empty():
                            self.vad.pop()
                    
                    return  # Success, exit function
                    
            except Exception as e:
                print(f"❌ Config {i+1} failed: {e}")
                continue
        
        print("❌ All fallback configurations failed!")
    
    def detect_from_file(self, audio_file: str) -> bool:
        """
        Detect speech from audio file.
        
        Args:
            audio_file: Path to audio file
        
        Returns:
            True if speech is detected in the file, False otherwise
        """
        if not Path(audio_file).is_file():
            raise FileNotFoundError(f"Audio file {audio_file} not found")
        
        try:
            import soundfile as sf
            samples, file_sample_rate = sf.read(audio_file, dtype='float32')
            
            # Convert to mono if stereo
            if len(samples.shape) > 1:
                samples = samples[:, 0]
            
            # Resample if necessary
            if file_sample_rate != self.sample_rate:
                try:
                    import librosa
                    samples = librosa.resample(
                        samples, 
                        orig_sr=file_sample_rate, 
                        target_sr=self.sample_rate
                    )
                except ImportError:
                    print("Warning: librosa not available for resampling")
                    return False
            
            # Process audio in chunks
            window_size = self.vad.config.silero_vad.window_size
            speech_found = False
            
            while len(samples) > window_size:
                self.vad.accept_waveform(samples[:window_size])
                samples = samples[window_size:]
                
                if self.vad.is_speech_detected():
                    speech_found = True
                    break
                
                # Clean up
                while not self.vad.empty():
                    self.vad.pop()
            
            return speech_found
        
        except ImportError:
            print("Please install soundfile: pip install soundfile")
            return False
        except Exception as e:
            print(f"Error processing file: {e}")
            return False


def get_args():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description="Voice Activity Detection for Jetson Edge Device"
    )
    
    parser.add_argument(
        "--silero-vad-model",
        type=str,
        default="/home/unitree/softwares/jetson/vad-sherpa-onnx/silero_vad.onnx",
        help="Path to silero_vad.onnx model file"
    )
    
    parser.add_argument(
        "--sample-rate",
        type=int,
        default=16000,
        help="Audio sample rate in Hz"
    )
    
    parser.add_argument(
        "--threshold",
        type=float,
        default=0.5,
        help="VAD detection threshold (0.0-1.0)"
    )
    
    parser.add_argument(
        "--min-silence-duration",
        type=float,
        default=0.25,
        help="Minimum silence duration in seconds"
    )
    
    parser.add_argument(
        "--min-speech-duration",
        type=float,
        default=0.25,
        help="Minimum speech duration in seconds"
    )
    
    parser.add_argument(
        "--test-file",
        type=str,
        help="Test VAD on audio file instead of microphone"
    )
    
    return parser.parse_args()


def speech_callback(is_speech: bool):
    """
    Callback function called when speech detection status changes.
    
    Args:
        is_speech: True if speech detected, False if silence
    """
    # Only print when speech is detected
    if is_speech:
        print("true")


def main():
    """Main function for testing VAD detection."""
    args = get_args()
    
    try:
        # Initialize VAD detector
        detector = VADDetector(
            vad_model_path=args.silero_vad_model,
            sample_rate=args.sample_rate,
            threshold=args.threshold,
            min_silence_duration=args.min_silence_duration,
            min_speech_duration=args.min_speech_duration,
            callback=speech_callback
        )
        
        if args.test_file:
            # Test with audio file
            print(f"Testing VAD on file: {args.test_file}")
            result = detector.detect_from_file(args.test_file)
            print(f"Speech detected in file: {result}")
        else:
            # Real-time detection
            if detector.start_detection():
                try:
                    while True:
                        time.sleep(0.1)
                        # You can check detector.is_speech_detected() here
                        # for integration with other systems
                except KeyboardInterrupt:
                    print("\nStopping detection...")
                finally:
                    detector.stop_detection()
            else:
                print("Failed to start detection")
                sys.exit(1)
    
    except FileNotFoundError as e:
        print(f"Error: {e}")
        print("\nTo download the model:")
        print("wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx")
        sys.exit(1)
    except Exception as e:
        print(f"Unexpected error: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()