import torch
import numpy as np
import pyaudio
import wave
import time
import os
from collections import deque

class SileroVAD:
    def __init__(self):
        # 设置环境变量避免路径问题
        cache_dir = os.path.join(os.path.dirname(__file__), '..', 'models')
        os.environ['TORCH_HOME'] = cache_dir
        os.environ['XDG_CACHE_HOME'] = cache_dir
        
        try:
            # 检查是否已经下载了模型
            model_path = os.path.join(cache_dir, 'hub', 'snakers4_silero-vad_master')
            if os.path.exists(model_path):
                # 如果模型已存在，使用本地路径加载
                self.model, self.utils = torch.hub.load(repo_or_dir=model_path,
                                                       model='silero_vad',
                                                       source='local')
            else:
                # 加载Silero VAD模型
                self.model, self.utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
                                                       model='silero_vad',
                                                       force_reload=False,
                                                       trust_repo=True)
            
            (self.get_speech_timestamps,
             self.save_audio,
             self.read_audio,
             self.VADIterator,
             self.collect_chunks) = self.utils
        except Exception as e:
            print(f"❌ 加载Silero VAD模型失败: {e}")
            print("🔄 尝试从GitHub直接下载...")
            # 如果默认方式失败，尝试其他方式加载
            torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
            self.model, self.utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
                                                   model='silero_vad',
                                                   force_reload=False,
                                                   trust_repo=True,
                                                   skip_validation=True)
            (self.get_speech_timestamps,
             self.save_audio,
             self.read_audio,
             self.VADIterator,
             self.collect_chunks) = self.utils
        
        # 音频参数
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 16000  # Silero VAD需要16kHz采样率
        self.CHUNK = 512   # 每块采样数
        self.VAD_THRESHOLD = 0.5  # 语音概率阈值
        
        # 初始化VAD迭代器
        self.vad_iterator = self.VADIterator(self.model, threshold=self.VAD_THRESHOLD)
        
        # 音频缓冲区
        self.audio_buffer = deque(maxlen=20)  # 保存最近的音频块
        self.speech_buffer = []
        
    def detect_voice_activity(self, timeout=5):
        """检测语音活动，返回是否检测到语音"""
        p = pyaudio.PyAudio()
        
        try:
            stream = p.open(format=self.FORMAT,
                            channels=self.CHANNELS,
                            rate=self.RATE,
                            input=True,
                            frames_per_buffer=self.CHUNK)
            
            print(" 开始语音检测...")
            start_time = time.time()
            speech_detected = False
            
            # 增加静音检测，忽略前0.5秒的声音（避免回音）
            ignore_duration = 1.0  # 忽略前1秒的声音
            ignore_start_time = time.time()
            
            while time.time() - start_time < timeout:
                # 读取音频数据
                audio_data = stream.read(self.CHUNK, exception_on_overflow=False)
                
                # 转换为numpy数组
                audio_np = np.frombuffer(audio_data, dtype=np.int16)
                
                # 归一化到[-1, 1]
                audio_float = audio_np.astype(np.float32) / 32768.0
                
                # 更新VAD状态
                speech_dict = self.vad_iterator(audio_float, return_seconds=True)
                
                # 检查是否还在忽略时间段内
                if time.time() - ignore_start_time < ignore_duration:
                    continue  # 跳过这段时间内的所有检测结果
                
                if speech_dict:
                    print(f"️  检测到语音: {speech_dict}")
                    self.speech_buffer.append(audio_data)
                    speech_detected = True
                    break
                
                self.audio_buffer.append(audio_data)
                
                # 短暂休眠减少CPU占用
                time.sleep(0.01)
                
            stream.stop_stream()
            stream.close()
            p.terminate()
            
            return speech_detected
            
        except Exception as e:
            print(f" VAD检测异常: {e}")
            p.terminate()
            return False
    
    def continuous_listen(self, callback=None):
        """持续监听并检测语音活动"""
        p = pyaudio.PyAudio()
        
        try:
            stream = p.open(format=self.FORMAT,
                            channels=self.CHANNELS,
                            rate=self.RATE,
                            input=True,
                            frames_per_buffer=self.CHUNK)
            
            print(" 持续监听中... (按Ctrl+C停止)")
            
            while True:
                audio_data = stream.read(self.CHUNK, exception_on_overflow=False)
                audio_np = np.frombuffer(audio_data, dtype=np.int16)
                audio_float = audio_np.astype(np.float32) / 32768.0
                
                speech_dict = self.vad_iterator(audio_float, return_seconds=True)
                
                if speech_dict:
                    print(f"️  检测到语音段: {speech_dict}")
                    if callback:
                        callback(audio_data)
                        
                time.sleep(0.001)
                
        except KeyboardInterrupt:
            print("\n 停止监听")
        except Exception as e:
            print(f" 监听异常: {e}")
        finally:
            stream.stop_stream()
            stream.close()
            p.terminate()
    
    def save_detected_speech(self, filename="detected_speech.wav"):
        """保存检测到的语音到文件"""
        if not self.speech_buffer:
            print("️  没有检测到语音数据")
            return
            
        wf = wave.open(filename, 'wb')
        wf.setnchannels(self.CHANNELS)
        wf.setsampwidth(pyaudio.PyAudio().get_sample_size(self.FORMAT))
        wf.setframerate(self.RATE)
        wf.writeframes(b''.join(self.speech_buffer))
        wf.close()
        print(f" 语音已保存到: {filename}")