import torch
import torch.nn as nn
import numpy as np
import librosa
import os
import json
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt
import webrtcvad
import noisereduce as nr
from speechbrain.inference.speaker import EncoderClassifier
import soundfile as sf
import traceback

# 配置参数 - 优化版
CONFIG = {
    "sample_rate": 16000,
    "vad_aggressiveness": 1,  # 更低的敏感度（0-3，0最不敏感）
    "min_speech_duration": 0.2,   # 更小的最小持续时间
    "similarity_threshold": 0.65,  # 降低相似度阈值
    "max_merge_duration": 1.0,    # 合并间隔
    "frame_duration": 30,         # 更大的帧检测（10, 20, 30ms）
    "noise_reduction": 0.6,       # 适中的降噪
    "energy_threshold": 0.02,     # 更低的能量检测阈值
    "device": "cpu"
}

class AviationSpeakerRecognizer:
    def __init__(self):
        self.sample_rate = CONFIG["sample_rate"]
        
        # 初始化WebRTC VAD
        self.webrtc_vad = webrtcvad.Vad(CONFIG["vad_aggressiveness"])
        
        # 存储说话人特征
        self.pilot_features = None
        self.tower_features = None
        
        # 初始化声纹模型
        try:
            self.speaker_model = EncoderClassifier.from_hparams(
                source="speechbrain/spkrec-ecapa-voxceleb",
                savedir="pretrained_models/spkrec-ecapa-voxceleb",
                run_opts={"device": "cpu"}
            )
            print("ECAPA声纹模型加载成功")
        except Exception as e:
            print(f"无法加载ECAPA声纹模型: {e}")
            print("将使用轻量级替代模型")
            self.speaker_model = self._create_lightweight_model()
    
    def _create_lightweight_model(self):
        """创建轻量级声纹模型作为备选"""
        return nn.Sequential(
            nn.Conv1d(1, 8, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.Conv1d(8, 16, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.Flatten(),
            nn.Linear(16 * 24, 64)
        )
    
    def preprocess_audio(self, audio_path):
        """专业级音频预处理"""
        try:
            print(f"加载音频文件: {audio_path}")
            # 使用librosa加载音频
            y, sr = librosa.load(audio_path, sr=self.sample_rate, mono=True)
            print(f"原始音频时长: {len(y)/self.sample_rate:.2f}秒")
            
            # 保存原始音频用于调试
            sf.write("original_audio.wav", y, self.sample_rate)
            
            # 多级降噪处理
            y = self._advanced_noise_reduction(y)
            
            # 带通滤波
            y = self._bandpass_filter(y)
            
            # 音量标准化
            y = self._normalize_audio(y)
            
            # 保存预处理后的音频
            sf.write("preprocessed_audio.wav", y, self.sample_rate)
            print(f"预处理后音频时长: {len(y)/self.sample_rate:.2f}秒")
            
            return y
        except Exception as e:
            print(f"音频处理错误: {e}")
            traceback.print_exc()
            return np.zeros(self.sample_rate * 10)
    
    def _advanced_noise_reduction(self, audio):
        """多级降噪处理"""
        print("应用噪声减少...")
        return nr.reduce_noise(
            y=audio, 
            sr=self.sample_rate,
            stationary=True,
            prop_decrease=CONFIG["noise_reduction"]
        )
    
    def _bandpass_filter(self, y):
        """300-3000Hz带通滤波"""
        print("应用带通滤波...")
        nyq = 0.5 * self.sample_rate
        low = 300 / nyq
        high = 3000 / nyq
        b, a = butter(4, [low, high], btype='band')
        return filtfilt(b, a, y)
    
    def _normalize_audio(self, y):
        """自适应音量标准化"""
        print("应用音量标准化...")
        max_amp = np.max(np.abs(y))
        if max_amp < 1e-6:  # 避免除以零
            return y
        
        rms = np.sqrt(np.mean(y**2))
        
        # 自适应增益
        gain = 0.9 / max(rms * 3, max_amp)
        return y * gain
    
    def detect_voice_activity(self, audio):
        """使用WebRTC VAD进行语音检测"""
        print("开始语音活动检测(VAD)...")
        
        # 转换为16位PCM格式
        int_audio = (audio * 32767).astype(np.int16)
        frame_length = int(self.sample_rate * CONFIG["frame_duration"] / 1000)
        
        # 确保帧长度有效
        if frame_length < 10:
            frame_length = 480  # 默认30ms帧
        
        segments = []
        current_start = None
        speech_count = 0
        total_frames = 0
        
        for i in range(0, len(int_audio) - frame_length, frame_length):
            frame = int_audio[i:i+frame_length]
            
            # 检测当前帧是否为语音
            try:
                is_speech = self.webrtc_vad.is_speech(frame.tobytes(), self.sample_rate)
            except Exception as e:
                print(f"VAD检测错误: {e}")
                is_speech = False
            
            total_frames += 1
            if is_speech:
                speech_count += 1
            
            if is_speech and current_start is None:
                current_start = i
            elif not is_speech and current_start is not None:
                duration = (i - current_start) / self.sample_rate
                if duration >= CONFIG["min_speech_duration"]:
                    segments.append((current_start, i))
                current_start = None
        
        # 处理最后一段
        if current_start is not None:
            end_idx = len(int_audio)
            duration = (end_idx - current_start) / self.sample_rate
            if duration >= CONFIG["min_speech_duration"]:
                segments.append((current_start, end_idx))
        
        print(f"VAD检测完成: 总帧数={total_frames}, 语音帧={speech_count}, 检测到{len(segments)}个语音段")
        return segments
    
    def extract_features(self, audio_segment):
        """提取声纹特征"""
        if len(audio_segment) < 800:  # 小于0.05秒
            return np.zeros(64)  # 返回零向量
        
        # 确保输入在合理范围内
        audio_segment = np.clip(audio_segment, -1.0, 1.0)
        
        # 使用模型
        try:
            # 转为tensor
            audio_tensor = torch.tensor(audio_segment).float().unsqueeze(0).unsqueeze(0)
            
            # 提取特征
            with torch.no_grad():
                if isinstance(self.speaker_model, nn.Module):
                    features = self.speaker_model(audio_tensor)
                else:
                    features = self.speaker_model.encode_batch(audio_tensor)
            
            # 转换为numpy数组
            features = features.squeeze().cpu().numpy()
            return features
        except Exception as e:
            print(f"特征提取错误: {e}")
            return np.zeros(64)
    
    def classify_speaker(self, features):
        """改进的说话人分类方法"""
        # 初始参考特征处理
        if self.pilot_features is None:
            self.pilot_features = features
            self.tower_features = -features  # 初始化为相反方向
            print("初始化飞行员特征")
            return "pilot"
        
        # 计算余弦相似度
        def cosine_sim(a, b):
            norm_a = np.linalg.norm(a)
            norm_b = np.linalg.norm(b)
            if norm_a < 1e-6 or norm_b < 1e-6:
                return 0.0
            return np.dot(a, b) / (norm_a * norm_b)
        
        pilot_sim = cosine_sim(features, self.pilot_features)
        tower_sim = cosine_sim(features, self.tower_features)
        
        print(f"相似度 - 飞行员: {pilot_sim:.3f}, 塔台: {tower_sim:.3f}")
        
        # 动态阈值分类
        if pilot_sim > max(tower_sim, CONFIG["similarity_threshold"]):
            self.pilot_features = 0.8 * self.pilot_features + 0.2 * features
            return "pilot"
        elif tower_sim > max(pilot_sim, CONFIG["similarity_threshold"]):
            self.tower_features = 0.8 * self.tower_features + 0.2 * features
            return "tower"
        else:
            # 新说话人处理
            if pilot_sim > tower_sim:
                self.pilot_features = 0.8 * self.pilot_features + 0.2 * features
                return "pilot"
            else:
                self.tower_features = 0.8 * self.tower_features + 0.2 * features
                return "tower"
    
    def process_audio(self, audio_path):
        """处理音频文件"""
        audio = self.preprocess_audio(audio_path)
        
        # 使用VAD检测语音活动
        segments = self.detect_voice_activity(audio)
        
        # 如果没有检测到任何语音段
        if not segments:
            print("警告: 没有检测到任何语音段!")
            # 尝试基于能量的备选方法
            segments = self._energy_based_vad(audio)
            if not segments:
                print("严重错误: 所有VAD方法均未检测到语音段")
                return []
        
        # 保存VAD结果
        self.debug_vad(audio, segments, "vad_debug.png")
        
        # 处理每个语音段
        results = []
        for i, (start, end) in enumerate(segments):
            segment_audio = audio[start:end]
            duration = (end - start) / self.sample_rate
            print(f"处理段 {i+1}: 开始={start}, 结束={end}, 时长={duration:.2f}秒")
            
            # 跳过太短的段
            if duration < 0.05:  # 小于50ms
                print(f"跳过短段: {duration:.3f}秒")
                continue
                
            features = self.extract_features(segment_audio)
            speaker = self.classify_speaker(features)
            
            # 计算置信度
            pilot_sim = np.dot(features, self.pilot_features) / (
                np.linalg.norm(features) * np.linalg.norm(self.pilot_features) + 1e-8)
            tower_sim = np.dot(features, self.tower_features) / (
                np.linalg.norm(features) * np.linalg.norm(self.tower_features) + 1e-8)
            confidence = max(pilot_sim, tower_sim)
            
            results.append({
                "start": start / self.sample_rate,
                "end": end / self.sample_rate,
                "speaker": speaker,
                "confidence": confidence
            })
        
        # 合并和优化结果
        return self.optimize_results(results, audio)
    
    def _energy_based_vad(self, audio):
        """基于能量的备选VAD方法"""
        print("使用备选能量VAD...")
        frame_length = int(0.03 * self.sample_rate)  # 30ms帧
        hop_length = frame_length // 2
        segments = []
        current_start = None
        
        for i in range(0, len(audio) - frame_length, hop_length):
            frame = audio[i:i+frame_length]
            energy = np.sum(frame ** 2) / len(frame)
            
            if energy > CONFIG["energy_threshold"]:
                if current_start is None:
                    current_start = i
            elif current_start is not None:
                segments.append((current_start, i + frame_length))
                current_start = None
        
        if current_start is not None:
            segments.append((current_start, len(audio)))
        
        print(f"能量VAD检测到 {len(segments)} 个段")
        return segments
    
    def optimize_results(self, results, audio):
        """结果优化：合并、验证和边界调整"""
        if not results:
            return []
        
        # 1. 合并相同说话人的相邻段
        merged = self.merge_segments(results)
        
        # 2. 验证和修正
        verified = []
        for seg in merged:
            # 提取段音频
            start_idx = int(seg["start"] * self.sample_rate)
            end_idx = int(seg["end"] * self.sample_rate)
            seg_audio = audio[start_idx:end_idx]
            
            # 验证是否为真实语音
            if self.is_real_speech(seg_audio):
                # 精确调整边界
                adj_start, adj_end = self.adjust_boundaries(seg_audio, start_idx, end_idx)
                seg["start"] = adj_start / self.sample_rate
                seg["end"] = adj_end / self.sample_rate
                verified.append(seg)
        
        return verified
    
    def merge_segments(self, results):
        """合并相同说话人的相邻段"""
        if not results:
            return []
        
        merged = [results[0]]
        for seg in results[1:]:
            last = merged[-1]
            gap = seg["start"] - last["end"]
            
            # 合并条件：相同说话人且间隔小于阈值
            if (seg["speaker"] == last["speaker"] and 
                gap < CONFIG["max_merge_duration"]):
                last["end"] = seg["end"]
                last["confidence"] = max(last["confidence"], seg["confidence"])
            else:
                merged.append(seg)
        
        return merged
    
    def is_real_speech(self, audio_segment):
        """验证是否为真实语音（而非噪声）"""
        # 计算信噪比
        rms_signal = np.sqrt(np.mean(audio_segment**2))
        
        # 计算噪声估计（前100ms）
        noise_est = np.sqrt(np.mean(audio_segment[:1600]**2)) if len(audio_segment) > 1600 else 0
        
        # 避免除零错误
        if noise_est < 1e-6:
            snr = 30  # 高信噪比
        else:
            snr = 10 * np.log10(rms_signal / noise_est)
        
        # 计算过零率
        zero_crossings = np.sum(np.diff(np.sign(audio_segment)) != 0) / len(audio_segment)
        
        # 判断标准
        is_speech = snr > 10 and zero_crossings < 0.3
        print(f"语音验证: SNR={snr:.1f}dB, 过零率={zero_crossings:.3f} -> {'是语音' if is_speech else '可能是噪声'}")
        
        return is_speech
    
    def adjust_boundaries(self, audio_segment, start_idx, end_idx):
        """精确调整语音段边界"""
        # 向前搜索实际开始点
        frame_size = 160  # 10ms
        new_start = start_idx
        for i in range(0, len(audio_segment)-frame_size, frame_size):
            frame = audio_segment[i:i+frame_size]
            energy = np.sum(frame**2) / len(frame)
            if energy > CONFIG["energy_threshold"] * 0.3:
                new_start = start_idx + i
                break
        
        # 向后搜索实际结束点
        new_end = end_idx
        for i in range(len(audio_segment)-frame_size, 0, -frame_size):
            frame = audio_segment[i:i+frame_size]
            energy = np.sum(frame**2) / len(frame)
            if energy > CONFIG["energy_threshold"] * 0.3:
                new_end = start_idx + i + frame_size
                break
        
        print(f"边界调整: {start_idx}->{new_start}, {end_idx}->{new_end}")
        return new_start, new_end
    
    def debug_vad(self, audio, segments, output_file="vad_debug.png"):
        """增强版VAD调试可视化"""
        plt.figure(figsize=(15, 8))
        
        # 绘制音频波形
        plt.subplot(2, 1, 1)
        time_axis = np.arange(len(audio)) / self.sample_rate
        plt.plot(time_axis, audio, alpha=0.7, label="Audio Waveform")
        
        # 标记语音段
        for start, end in segments:
            start_time = start / self.sample_rate
            end_time = end / self.sample_rate
            plt.axvspan(start_time, end_time, color='green', alpha=0.3)
        
        plt.title("VAD Detection Result")
        plt.xlabel("Time (seconds)")
        plt.ylabel("Amplitude")
        plt.legend()
        
        # 绘制频谱图
        plt.subplot(2, 1, 2)
        S = librosa.amplitude_to_db(np.abs(librosa.stft(audio)), ref=np.max)
        librosa.display.specshow(S, sr=self.sample_rate, x_axis='time', y_axis='log')
        plt.colorbar(format='%+2.0f dB')
        plt.title('Spectrogram with VAD Segments')
        
        # 标记VAD段
        for start, end in segments:
            start_time = start / self.sample_rate
            end_time = end / self.sample_rate
            plt.axvspan(start_time, end_time, color='red', alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(output_file)
        plt.close()
        print(f"详细VAD调试图已保存: {output_file}")

if __name__ == "__main__":
    import sys
    # 支持命令行参数，也可直接写音频文件名
    if len(sys.argv) > 1:
        audio_file = sys.argv[1]
    else:
        # 使用示例音频文件
        audio_file = "whisper_test.mp3"
        print(f"未提供音频文件，将使用示例文件: {audio_file}")
        # 生成一个简单的测试音频
        if not os.path.exists(audio_file):
            duration = 5.0  # 5秒
            sample_rate = 16000
            t = np.linspace(0, duration, int(duration * sample_rate), endpoint=False)
            audio = 0.5 * np.sin(2 * np.pi * 440 * t)  # A4音符
            silence = np.zeros(int(1.0 * sample_rate))  # 1秒静音
            full_audio = np.concatenate([silence, audio, silence])
            sf.write(audio_file, full_audio, sample_rate)
            print(f"已创建测试音频文件: {audio_file}")

    recognizer = AviationSpeakerRecognizer()
    
    print("\n开始处理音频...")
    segments = recognizer.process_audio(audio_file)
    
    if not segments:
        print("\n警告: 未检测到任何语音段!")
        print("可能原因:")
        print("1. 音频音量过低或静音")
        print("2. VAD参数设置过于严格")
        print("3. 音频预处理过度去除了语音")
        print("建议检查预处理后的音频文件: preprocessed_audio.wav")
    else:
        # 打印分段结果
        print("\n说话人分段结果:")
        for i, seg in enumerate(segments):
            print(f"{i+1}. [{seg['start']:.2f}-{seg['end']:.2f}s] {seg['speaker']} (置信度: {seg['confidence']:.2f})")

        # 保存JSON
        with open("speaker_segments.json", "w", encoding="utf-8") as f:
            json.dump(segments, f, indent=2, ensure_ascii=False)
        print("分段结果已保存为 speaker_segments.json")
        
        # 创建可视化结果
        recognizer.debug_vad(
            recognizer.preprocess_audio(audio_file), 
            [(int(seg['start']*recognizer.sample_rate), int(seg['end']*recognizer.sample_rate)) for seg in segments],
            "final_result.png"
        )
        print("最终结果可视化已保存为 final_result.png")